Update vendor of Buildah and imagebuilder

Fixes the testing issues we are hitting.

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
Daniel J Walsh
2019-03-13 08:01:28 -04:00
parent 8b637bd78c
commit adad93342c
16 changed files with 118 additions and 59 deletions

View File

@ -1,46 +0,0 @@
// Package command contains the set of Dockerfile commands.
package command
// Define constants for the command strings
const (
Add = "add"
Arg = "arg"
Cmd = "cmd"
Copy = "copy"
Entrypoint = "entrypoint"
Env = "env"
Expose = "expose"
From = "from"
Healthcheck = "healthcheck"
Label = "label"
Maintainer = "maintainer"
Onbuild = "onbuild"
Run = "run"
Shell = "shell"
StopSignal = "stopsignal"
User = "user"
Volume = "volume"
Workdir = "workdir"
)
// Commands is list of all Dockerfile commands
var Commands = map[string]struct{}{
Add: {},
Arg: {},
Cmd: {},
Copy: {},
Entrypoint: {},
Env: {},
Expose: {},
From: {},
Healthcheck: {},
Label: {},
Maintainer: {},
Onbuild: {},
Run: {},
Shell: {},
StopSignal: {},
User: {},
Volume: {},
Workdir: {},
}

View File

@ -1,399 +0,0 @@
package parser
// line parsers are dispatch calls that parse a single unit of text into a
// Node object which contains the whole statement. Dockerfiles have varied
// (but not usually unique, see ONBUILD for a unique example) parsing rules
// per-command, and these unify the processing in a way that makes it
// manageable.
import (
"encoding/json"
"errors"
"fmt"
"sort"
"strings"
"unicode"
"unicode/utf8"
"github.com/docker/docker/builder/dockerfile/command"
)
var (
errDockerfileNotStringArray = errors.New("when using JSON array syntax, arrays must be comprised of strings only")
)
const (
commandLabel = "LABEL"
)
// ignore the current argument. This will still leave a command parsed, but
// will not incorporate the arguments into the ast.
func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) {
return &Node{}, nil, nil
}
// used for onbuild. Could potentially be used for anything that represents a
// statement with sub-statements.
//
// ONBUILD RUN foo bar -> (onbuild (run foo bar))
//
func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) {
if rest == "" {
return nil, nil, nil
}
child, err := newNodeFromLine(rest, d)
if err != nil {
return nil, nil, err
}
return &Node{Children: []*Node{child}}, nil, nil
}
// helper to parse words (i.e space delimited or quoted strings) in a statement.
// The quotes are preserved as part of this function and they are stripped later
// as part of processWords().
func parseWords(rest string, d *Directive) []string {
const (
inSpaces = iota // looking for start of a word
inWord
inQuote
)
words := []string{}
phase := inSpaces
word := ""
quote := '\000'
blankOK := false
var ch rune
var chWidth int
for pos := 0; pos <= len(rest); pos += chWidth {
if pos != len(rest) {
ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
}
if phase == inSpaces { // Looking for start of word
if pos == len(rest) { // end of input
break
}
if unicode.IsSpace(ch) { // skip spaces
continue
}
phase = inWord // found it, fall through
}
if (phase == inWord || phase == inQuote) && (pos == len(rest)) {
if blankOK || len(word) > 0 {
words = append(words, word)
}
break
}
if phase == inWord {
if unicode.IsSpace(ch) {
phase = inSpaces
if blankOK || len(word) > 0 {
words = append(words, word)
}
word = ""
blankOK = false
continue
}
if ch == '\'' || ch == '"' {
quote = ch
blankOK = true
phase = inQuote
}
if ch == d.escapeToken {
if pos+chWidth == len(rest) {
continue // just skip an escape token at end of line
}
// If we're not quoted and we see an escape token, then always just
// add the escape token plus the char to the word, even if the char
// is a quote.
word += string(ch)
pos += chWidth
ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
}
word += string(ch)
continue
}
if phase == inQuote {
if ch == quote {
phase = inWord
}
// The escape token is special except for ' quotes - can't escape anything for '
if ch == d.escapeToken && quote != '\'' {
if pos+chWidth == len(rest) {
phase = inWord
continue // just skip the escape token at end
}
pos += chWidth
word += string(ch)
ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
}
word += string(ch)
}
}
return words
}
// parse environment like statements. Note that this does *not* handle
// variable interpolation, which will be handled in the evaluator.
func parseNameVal(rest string, key string, d *Directive) (*Node, error) {
// This is kind of tricky because we need to support the old
// variant: KEY name value
// as well as the new one: KEY name=value ...
// The trigger to know which one is being used will be whether we hit
// a space or = first. space ==> old, "=" ==> new
words := parseWords(rest, d)
if len(words) == 0 {
return nil, nil
}
// Old format (KEY name value)
if !strings.Contains(words[0], "=") {
parts := tokenWhitespace.Split(rest, 2)
if len(parts) < 2 {
return nil, fmt.Errorf(key + " must have two arguments")
}
return newKeyValueNode(parts[0], parts[1]), nil
}
var rootNode *Node
var prevNode *Node
for _, word := range words {
if !strings.Contains(word, "=") {
return nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word)
}
parts := strings.SplitN(word, "=", 2)
node := newKeyValueNode(parts[0], parts[1])
rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode)
}
return rootNode, nil
}
func newKeyValueNode(key, value string) *Node {
return &Node{
Value: key,
Next: &Node{Value: value},
}
}
func appendKeyValueNode(node, rootNode, prevNode *Node) (*Node, *Node) {
if rootNode == nil {
rootNode = node
}
if prevNode != nil {
prevNode.Next = node
}
prevNode = node.Next
return rootNode, prevNode
}
func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) {
node, err := parseNameVal(rest, "ENV", d)
return node, nil, err
}
func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) {
node, err := parseNameVal(rest, commandLabel, d)
return node, nil, err
}
// NodeFromLabels returns a Node for the injected labels
func NodeFromLabels(labels map[string]string) *Node {
keys := []string{}
for key := range labels {
keys = append(keys, key)
}
// Sort the label to have a repeatable order
sort.Strings(keys)
labelPairs := []string{}
var rootNode *Node
var prevNode *Node
for _, key := range keys {
value := labels[key]
labelPairs = append(labelPairs, fmt.Sprintf("%q='%s'", key, value))
// Value must be single quoted to prevent env variable expansion
// See https://github.com/docker/docker/issues/26027
node := newKeyValueNode(key, "'"+value+"'")
rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode)
}
return &Node{
Value: command.Label,
Original: commandLabel + " " + strings.Join(labelPairs, " "),
Next: rootNode,
}
}
// parses a statement containing one or more keyword definition(s) and/or
// value assignments, like `name1 name2= name3="" name4=value`.
// Note that this is a stricter format than the old format of assignment,
// allowed by parseNameVal(), in a way that this only allows assignment of the
// form `keyword=[<value>]` like `name2=`, `name3=""`, and `name4=value` above.
// In addition, a keyword definition alone is of the form `keyword` like `name1`
// above. And the assignments `name2=` and `name3=""` are equivalent and
// assign an empty value to the respective keywords.
func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) {
words := parseWords(rest, d)
if len(words) == 0 {
return nil, nil, nil
}
var (
rootnode *Node
prevNode *Node
)
for i, word := range words {
node := &Node{}
node.Value = word
if i == 0 {
rootnode = node
} else {
prevNode.Next = node
}
prevNode = node
}
return rootnode, nil, nil
}
// parses a whitespace-delimited set of arguments. The result is effectively a
// linked list of string arguments.
func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) {
if rest == "" {
return nil, nil, nil
}
node := &Node{}
rootnode := node
prevnode := node
for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp
prevnode = node
node.Value = str
node.Next = &Node{}
node = node.Next
}
// XXX to get around regexp.Split *always* providing an empty string at the
// end due to how our loop is constructed, nil out the last node in the
// chain.
prevnode.Next = nil
return rootnode, nil, nil
}
// parseString just wraps the string in quotes and returns a working node.
func parseString(rest string, d *Directive) (*Node, map[string]bool, error) {
if rest == "" {
return nil, nil, nil
}
n := &Node{}
n.Value = rest
return n, nil, nil
}
// parseJSON converts JSON arrays to an AST.
func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
rest = strings.TrimLeftFunc(rest, unicode.IsSpace)
if !strings.HasPrefix(rest, "[") {
return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest)
}
var myJSON []interface{}
if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil {
return nil, nil, err
}
var top, prev *Node
for _, str := range myJSON {
s, ok := str.(string)
if !ok {
return nil, nil, errDockerfileNotStringArray
}
node := &Node{Value: s}
if prev == nil {
top = node
} else {
prev.Next = node
}
prev = node
}
return top, map[string]bool{"json": true}, nil
}
// parseMaybeJSON determines if the argument appears to be a JSON array. If
// so, passes to parseJSON; if not, quotes the result and returns a single
// node.
func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
if rest == "" {
return nil, nil, nil
}
node, attrs, err := parseJSON(rest, d)
if err == nil {
return node, attrs, nil
}
if err == errDockerfileNotStringArray {
return nil, nil, err
}
node = &Node{}
node.Value = rest
return node, nil, nil
}
// parseMaybeJSONToList determines if the argument appears to be a JSON array. If
// so, passes to parseJSON; if not, attempts to parse it as a whitespace
// delimited string.
func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) {
node, attrs, err := parseJSON(rest, d)
if err == nil {
return node, attrs, nil
}
if err == errDockerfileNotStringArray {
return nil, nil, err
}
return parseStringsWhitespaceDelimited(rest, d)
}
// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument.
func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) {
// Find end of first argument
var sep int
for ; sep < len(rest); sep++ {
if unicode.IsSpace(rune(rest[sep])) {
break
}
}
next := sep
for ; next < len(rest); next++ {
if !unicode.IsSpace(rune(rest[next])) {
break
}
}
if sep == 0 {
return nil, nil, nil
}
typ := rest[:sep]
cmd, attrs, err := parseMaybeJSON(rest[next:], d)
if err != nil {
return nil, nil, err
}
return &Node{Value: typ, Next: cmd}, attrs, err
}

View File

@ -1,360 +0,0 @@
// Package parser implements a parser and parse tree dumper for Dockerfiles.
package parser
import (
"bufio"
"bytes"
"fmt"
"io"
"regexp"
"runtime"
"strconv"
"strings"
"unicode"
"github.com/docker/docker/builder/dockerfile/command"
"github.com/docker/docker/pkg/system"
"github.com/pkg/errors"
)
// Node is a structure used to represent a parse tree.
//
// In the node there are three fields, Value, Next, and Children. Value is the
// current token's string value. Next is always the next non-child token, and
// children contains all the children. Here's an example:
//
// (value next (child child-next child-next-next) next-next)
//
// This data structure is frankly pretty lousy for handling complex languages,
// but lucky for us the Dockerfile isn't very complicated. This structure
// works a little more effectively than a "proper" parse tree for our needs.
//
type Node struct {
Value string // actual content
Next *Node // the next item in the current sexp
Children []*Node // the children of this sexp
Attributes map[string]bool // special attributes for this node
Original string // original line used before parsing
Flags []string // only top Node should have this set
StartLine int // the line in the original dockerfile where the node begins
endLine int // the line in the original dockerfile where the node ends
}
// Dump dumps the AST defined by `node` as a list of sexps.
// Returns a string suitable for printing.
func (node *Node) Dump() string {
str := ""
str += node.Value
if len(node.Flags) > 0 {
str += fmt.Sprintf(" %q", node.Flags)
}
for _, n := range node.Children {
str += "(" + n.Dump() + ")\n"
}
for n := node.Next; n != nil; n = n.Next {
if len(n.Children) > 0 {
str += " " + n.Dump()
} else {
str += " " + strconv.Quote(n.Value)
}
}
return strings.TrimSpace(str)
}
func (node *Node) lines(start, end int) {
node.StartLine = start
node.endLine = end
}
// AddChild adds a new child node, and updates line information
func (node *Node) AddChild(child *Node, startLine, endLine int) {
child.lines(startLine, endLine)
if node.StartLine < 0 {
node.StartLine = startLine
}
node.endLine = endLine
node.Children = append(node.Children, child)
}
var (
dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error)
tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`)
tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P<escapechar>.).*$`)
tokenPlatformCommand = regexp.MustCompile(`^#[ \t]*platform[ \t]*=[ \t]*(?P<platform>.*)$`)
tokenComment = regexp.MustCompile(`^#.*$`)
)
// DefaultEscapeToken is the default escape token
const DefaultEscapeToken = '\\'
// Directive is the structure used during a build run to hold the state of
// parsing directives.
type Directive struct {
escapeToken rune // Current escape token
platformToken string // Current platform token
lineContinuationRegex *regexp.Regexp // Current line continuation regex
processingComplete bool // Whether we are done looking for directives
escapeSeen bool // Whether the escape directive has been seen
platformSeen bool // Whether the platform directive has been seen
}
// setEscapeToken sets the default token for escaping characters in a Dockerfile.
func (d *Directive) setEscapeToken(s string) error {
if s != "`" && s != "\\" {
return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s)
}
d.escapeToken = rune(s[0])
d.lineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`)
return nil
}
// setPlatformToken sets the default platform for pulling images in a Dockerfile.
func (d *Directive) setPlatformToken(s string) error {
s = strings.ToLower(s)
valid := []string{runtime.GOOS}
if system.LCOWSupported() {
valid = append(valid, "linux")
}
for _, item := range valid {
if s == item {
d.platformToken = s
return nil
}
}
return fmt.Errorf("invalid PLATFORM '%s'. Must be one of %v", s, valid)
}
// possibleParserDirective looks for one or more parser directives '# escapeToken=<char>' and
// '# platform=<string>'. Parser directives must precede any builder instruction
// or other comments, and cannot be repeated.
func (d *Directive) possibleParserDirective(line string) error {
if d.processingComplete {
return nil
}
tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line))
if len(tecMatch) != 0 {
for i, n := range tokenEscapeCommand.SubexpNames() {
if n == "escapechar" {
if d.escapeSeen {
return errors.New("only one escape parser directive can be used")
}
d.escapeSeen = true
return d.setEscapeToken(tecMatch[i])
}
}
}
// Only recognise a platform token if LCOW is supported
if system.LCOWSupported() {
tpcMatch := tokenPlatformCommand.FindStringSubmatch(strings.ToLower(line))
if len(tpcMatch) != 0 {
for i, n := range tokenPlatformCommand.SubexpNames() {
if n == "platform" {
if d.platformSeen {
return errors.New("only one platform parser directive can be used")
}
d.platformSeen = true
return d.setPlatformToken(tpcMatch[i])
}
}
}
}
d.processingComplete = true
return nil
}
// NewDefaultDirective returns a new Directive with the default escapeToken token
func NewDefaultDirective() *Directive {
directive := Directive{}
directive.setEscapeToken(string(DefaultEscapeToken))
return &directive
}
func init() {
// Dispatch Table. see line_parsers.go for the parse functions.
// The command is parsed and mapped to the line parser. The line parser
// receives the arguments but not the command, and returns an AST after
// reformulating the arguments according to the rules in the parser
// functions. Errors are propagated up by Parse() and the resulting AST can
// be incorporated directly into the existing AST as a next.
dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){
command.Add: parseMaybeJSONToList,
command.Arg: parseNameOrNameVal,
command.Cmd: parseMaybeJSON,
command.Copy: parseMaybeJSONToList,
command.Entrypoint: parseMaybeJSON,
command.Env: parseEnv,
command.Expose: parseStringsWhitespaceDelimited,
command.From: parseStringsWhitespaceDelimited,
command.Healthcheck: parseHealthConfig,
command.Label: parseLabel,
command.Maintainer: parseString,
command.Onbuild: parseSubCommand,
command.Run: parseMaybeJSON,
command.Shell: parseMaybeJSON,
command.StopSignal: parseString,
command.User: parseString,
command.Volume: parseMaybeJSONToList,
command.Workdir: parseString,
}
}
// newNodeFromLine splits the line into parts, and dispatches to a function
// based on the command and command arguments. A Node is created from the
// result of the dispatch.
func newNodeFromLine(line string, directive *Directive) (*Node, error) {
cmd, flags, args, err := splitCommand(line)
if err != nil {
return nil, err
}
fn := dispatch[cmd]
// Ignore invalid Dockerfile instructions
if fn == nil {
fn = parseIgnore
}
next, attrs, err := fn(args, directive)
if err != nil {
return nil, err
}
return &Node{
Value: cmd,
Original: line,
Flags: flags,
Next: next,
Attributes: attrs,
}, nil
}
// Result is the result of parsing a Dockerfile
type Result struct {
AST *Node
EscapeToken rune
// TODO @jhowardmsft - see https://github.com/moby/moby/issues/34617
// This next field will be removed in a future update for LCOW support.
OS string
Warnings []string
}
// PrintWarnings to the writer
func (r *Result) PrintWarnings(out io.Writer) {
if len(r.Warnings) == 0 {
return
}
fmt.Fprintf(out, strings.Join(r.Warnings, "\n")+"\n")
}
// Parse reads lines from a Reader, parses the lines into an AST and returns
// the AST and escape token
func Parse(rwc io.Reader) (*Result, error) {
d := NewDefaultDirective()
currentLine := 0
root := &Node{StartLine: -1}
scanner := bufio.NewScanner(rwc)
warnings := []string{}
var err error
for scanner.Scan() {
bytesRead := scanner.Bytes()
if currentLine == 0 {
// First line, strip the byte-order-marker if present
bytesRead = bytes.TrimPrefix(bytesRead, utf8bom)
}
bytesRead, err = processLine(d, bytesRead, true)
if err != nil {
return nil, err
}
currentLine++
startLine := currentLine
line, isEndOfLine := trimContinuationCharacter(string(bytesRead), d)
if isEndOfLine && line == "" {
continue
}
var hasEmptyContinuationLine bool
for !isEndOfLine && scanner.Scan() {
bytesRead, err := processLine(d, scanner.Bytes(), false)
if err != nil {
return nil, err
}
currentLine++
if isComment(scanner.Bytes()) {
// original line was a comment (processLine strips comments)
continue
}
if isEmptyContinuationLine(bytesRead) {
hasEmptyContinuationLine = true
continue
}
continuationLine := string(bytesRead)
continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d)
line += continuationLine
}
if hasEmptyContinuationLine {
warning := "[WARNING]: Empty continuation line found in:\n " + line
warnings = append(warnings, warning)
}
child, err := newNodeFromLine(line, d)
if err != nil {
return nil, err
}
root.AddChild(child, startLine, currentLine)
}
if len(warnings) > 0 {
warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.")
}
return &Result{
AST: root,
Warnings: warnings,
EscapeToken: d.escapeToken,
OS: d.platformToken,
}, nil
}
func trimComments(src []byte) []byte {
return tokenComment.ReplaceAll(src, []byte{})
}
func trimWhitespace(src []byte) []byte {
return bytes.TrimLeftFunc(src, unicode.IsSpace)
}
func isComment(line []byte) bool {
return tokenComment.Match(trimWhitespace(line))
}
func isEmptyContinuationLine(line []byte) bool {
return len(trimWhitespace(line)) == 0
}
var utf8bom = []byte{0xEF, 0xBB, 0xBF}
func trimContinuationCharacter(line string, d *Directive) (string, bool) {
if d.lineContinuationRegex.MatchString(line) {
line = d.lineContinuationRegex.ReplaceAllString(line, "")
return line, false
}
return line, true
}
// TODO: remove stripLeftWhitespace after deprecation period. It seems silly
// to preserve whitespace on continuation lines. Why is that done?
func processLine(d *Directive, token []byte, stripLeftWhitespace bool) ([]byte, error) {
if stripLeftWhitespace {
token = trimWhitespace(token)
}
return trimComments(token), d.possibleParserDirective(string(token))
}

View File

@ -1,118 +0,0 @@
package parser
import (
"strings"
"unicode"
)
// splitCommand takes a single line of text and parses out the cmd and args,
// which are used for dispatching to more exact parsing functions.
func splitCommand(line string) (string, []string, string, error) {
var args string
var flags []string
// Make sure we get the same results irrespective of leading/trailing spaces
cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2)
cmd := strings.ToLower(cmdline[0])
if len(cmdline) == 2 {
var err error
args, flags, err = extractBuilderFlags(cmdline[1])
if err != nil {
return "", nil, "", err
}
}
return cmd, flags, strings.TrimSpace(args), nil
}
func extractBuilderFlags(line string) (string, []string, error) {
// Parses the BuilderFlags and returns the remaining part of the line
const (
inSpaces = iota // looking for start of a word
inWord
inQuote
)
words := []string{}
phase := inSpaces
word := ""
quote := '\000'
blankOK := false
var ch rune
for pos := 0; pos <= len(line); pos++ {
if pos != len(line) {
ch = rune(line[pos])
}
if phase == inSpaces { // Looking for start of word
if pos == len(line) { // end of input
break
}
if unicode.IsSpace(ch) { // skip spaces
continue
}
// Only keep going if the next word starts with --
if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' {
return line[pos:], words, nil
}
phase = inWord // found something with "--", fall through
}
if (phase == inWord || phase == inQuote) && (pos == len(line)) {
if word != "--" && (blankOK || len(word) > 0) {
words = append(words, word)
}
break
}
if phase == inWord {
if unicode.IsSpace(ch) {
phase = inSpaces
if word == "--" {
return line[pos:], words, nil
}
if blankOK || len(word) > 0 {
words = append(words, word)
}
word = ""
blankOK = false
continue
}
if ch == '\'' || ch == '"' {
quote = ch
blankOK = true
phase = inQuote
continue
}
if ch == '\\' {
if pos+1 == len(line) {
continue // just skip \ at end
}
pos++
ch = rune(line[pos])
}
word += string(ch)
continue
}
if phase == inQuote {
if ch == quote {
phase = inWord
continue
}
if ch == '\\' {
if pos+1 == len(line) {
phase = inWord
continue // just skip \ at end
}
pos++
ch = rune(line[pos])
}
word += string(ch)
}
}
return "", words, nil
}