vendor: bump to buildah ca578b290144 and use new cache API

Bump to buildah ca578b290144 and use new `cacheTo` and `cacheFrom` API.

[NO NEW TESTS NEEDED]
[NO TESTS NEEDED]

Signed-off-by: Aditya R <arajan@redhat.com>
This commit is contained in:
Aditya R
2022-12-20 17:12:55 +05:30
parent fb967aabc3
commit 987c8e3a78
34 changed files with 225 additions and 210 deletions

View File

@ -527,16 +527,16 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
}
}
}
var cacheTo reference.Named
var cacheFrom reference.Named
var cacheTo []reference.Named
var cacheFrom []reference.Named
if c.Flag("cache-to").Changed {
cacheTo, err = parse.RepoNameToNamedReference(flags.CacheTo)
cacheTo, err = parse.RepoNamesToNamedReferences(flags.CacheTo)
if err != nil {
return nil, fmt.Errorf("unable to parse value provided `%s` to --cache-to: %w", flags.CacheTo, err)
}
}
if c.Flag("cache-from").Changed {
cacheFrom, err = parse.RepoNameToNamedReference(flags.CacheFrom)
cacheFrom, err = parse.RepoNamesToNamedReferences(flags.CacheFrom)
if err != nil {
return nil, fmt.Errorf("unable to parse value provided `%s` to --cache-from: %w", flags.CacheTo, err)
}

2
go.mod
View File

@ -11,7 +11,7 @@ require (
github.com/container-orchestrated-devices/container-device-interface v0.5.3
github.com/containernetworking/cni v1.1.2
github.com/containernetworking/plugins v1.1.1
github.com/containers/buildah v1.28.1-0.20221130132810-cf661299d14f
github.com/containers/buildah v1.28.1-0.20221219201600-ca578b290144
github.com/containers/common v0.50.2-0.20221216120044-ef7e0d6f3002
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.23.1-0.20221216122512-3963f229df32

4
go.sum
View File

@ -262,8 +262,8 @@ github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHV
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE=
github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
github.com/containers/buildah v1.28.1-0.20221130132810-cf661299d14f h1:Nzbda2tG7/aimoKnDxysqFgS1Q/gSsbcn88lFPj9LwY=
github.com/containers/buildah v1.28.1-0.20221130132810-cf661299d14f/go.mod h1:0HcSoS6BHXWzMKqtxY1L0gupebEX33oPC+X62lPi6+c=
github.com/containers/buildah v1.28.1-0.20221219201600-ca578b290144 h1:2RQIBdC4z6JeUysEBFmdyRjeQL+XHikWGxDoWiPDsAw=
github.com/containers/buildah v1.28.1-0.20221219201600-ca578b290144/go.mod h1:UtGNHlAwNF1WV/Z63R/sPgxItTog/YPi/1gSfZ8ZdpE=
github.com/containers/common v0.50.2-0.20221216120044-ef7e0d6f3002 h1:wvT0IrvGcZ0tEAvF1CYjaI6xjQjXr4vDnrlHRAYEo0Q=
github.com/containers/common v0.50.2-0.20221216120044-ef7e0d6f3002/go.mod h1:EhEJRALj8qJWhnnzk6nY6wqDkSjfGpU2DwcLb9UpVoM=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=

View File

@ -400,47 +400,30 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
}
}
// Docker's newer clients popuates `cacheFrom` and `cacheTo` parameter
// by default as empty array for all commands but buildah's design of
// distributed cache expects this to be a repo not image hence parse
// only the first populated repo and ignore if empty array.
// Read more here: https://github.com/containers/podman/issues/15928
// TODO: Remove this when buildah's API is extended.
compatIgnoreForcedCacheOptions := func(queryStr string) string {
query := queryStr
if strings.HasPrefix(query, "[") {
query = ""
var arr []string
parseErr := json.Unmarshal([]byte(query), &arr)
if parseErr != nil {
if len(arr) > 0 {
query = arr[0]
}
}
}
return query
}
var cacheFrom reference.Named
cacheFrom := []reference.Named{}
if _, found := r.URL.Query()["cachefrom"]; found {
cacheFromQuery := compatIgnoreForcedCacheOptions(query.CacheFrom)
if cacheFromQuery != "" {
cacheFrom, err = parse.RepoNameToNamedReference(cacheFromQuery)
if err != nil {
utils.BadRequest(w, "cacheFrom", cacheFromQuery, err)
return
}
var cacheFromSrcList []string
if err := json.Unmarshal([]byte(query.CacheFrom), &cacheFromSrcList); err != nil {
utils.BadRequest(w, "cacheFrom", query.CacheFrom, err)
return
}
cacheFrom, err = parse.RepoNamesToNamedReferences(cacheFromSrcList)
if err != nil {
utils.BadRequest(w, "cacheFrom", query.CacheFrom, err)
return
}
}
var cacheTo reference.Named
cacheTo := []reference.Named{}
if _, found := r.URL.Query()["cacheto"]; found {
cacheToQuery := compatIgnoreForcedCacheOptions(query.CacheTo)
if cacheToQuery != "" {
cacheTo, err = parse.RepoNameToNamedReference(cacheToQuery)
if err != nil {
utils.BadRequest(w, "cacheto", cacheToQuery, err)
return
}
var cacheToDestList []string
if err := json.Unmarshal([]byte(query.CacheTo), &cacheToDestList); err != nil {
utils.BadRequest(w, "cacheTo", query.CacheTo, err)
return
}
cacheTo, err = parse.RepoNamesToNamedReferences(cacheToDestList)
if err != nil {
utils.BadRequest(w, "cacheto", query.CacheTo, err)
return
}
}
var cacheTTL time.Duration

View File

@ -231,7 +231,15 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
params.Set("manifest", options.Manifest)
}
if options.CacheFrom != nil {
params.Set("cachefrom", options.CacheFrom.String())
cacheFrom := []string{}
for _, cacheSrc := range options.CacheFrom {
cacheFrom = append(cacheFrom, cacheSrc.String())
}
cacheFromJSON, err := jsoniter.MarshalToString(cacheFrom)
if err != nil {
return nil, err
}
params.Set("cachefrom", cacheFromJSON)
}
switch options.SkipUnusedStages {
@ -242,7 +250,15 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
}
if options.CacheTo != nil {
params.Set("cacheto", options.CacheTo.String())
cacheTo := []string{}
for _, cacheSrc := range options.CacheTo {
cacheTo = append(cacheTo, cacheSrc.String())
}
cacheToJSON, err := jsoniter.MarshalToString(cacheTo)
if err != nil {
return nil, err
}
params.Set("cacheto", cacheToJSON)
}
if int64(options.CacheTTL) != 0 {
params.Set("cachettl", options.CacheTTL.String())

View File

@ -27,13 +27,13 @@ env:
####
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
FEDORA_NAME: "fedora-36"
#PRIOR_FEDORA_NAME: "fedora-35"
FEDORA_NAME: "fedora-37"
PRIOR_FEDORA_NAME: "fedora-36"
UBUNTU_NAME: "ubuntu-2204"
IMAGE_SUFFIX: "c5124654741323776"
IMAGE_SUFFIX: "c4815821738868736"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
#PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
IN_PODMAN_IMAGE: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}"
@ -72,9 +72,9 @@ meta_task:
env:
# Space-separated list of images used by this repository state
# TODO: Re-add ${PRIOR_FEDORA_CACHE_IMAGE_NAME} when place back in use
IMGNAMES: |-
${FEDORA_CACHE_IMAGE_NAME}
${PRIOR_FEDORA_CACHE_IMAGE_NAME}
${UBUNTU_CACHE_IMAGE_NAME}
build-push-${IMAGE_SUFFIX}
BUILDID: "${CIRRUS_BUILD_ID}"
@ -215,10 +215,10 @@ integration_task:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
# - env:
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
# STORAGE_DRIVER: 'vfs'
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
- env:
DISTRO_NV: "${UBUNTU_NAME}"
IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}"
@ -228,10 +228,10 @@ integration_task:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
# - env:
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
# STORAGE_DRIVER: 'overlay'
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
- env:
DISTRO_NV: "${UBUNTU_NAME}"
IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}"
@ -272,11 +272,11 @@ integration_rootless_task:
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless
# - env:
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
# STORAGE_DRIVER: 'overlay'
# PRIV_NAME: rootless
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless
- env:
DISTRO_NV: "${UBUNTU_NAME}"
IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}"

View File

@ -143,22 +143,24 @@ is available, showing all PRs awaiting review and approval.
## Communications
For general questions or discussions, please use the
IRC group on `irc.freenode.net` called `buildah`
that has been setup.
IRC channel `#podman` on `irc.libera.chat`. If you are unfamiliar with IRC you can start a web client at https://web.libera.chat/#podman.
Alternatively, [\[matrix\]](https://matrix.org) can be used to access the same channel via federation at https://matrix.to/#/#podman:chat.fedoraproject.org.
### For discussions around issues/bugs and features:
#### Buildah Mailing List
You can join the Buildah mailing list by sending an email to `buildah-join@lists.buildah.io` with the word `subscribe` in the subject. You can also go to this [page](https://lists.podman.io/admin/lists/buildah.lists.buildah.io/), then scroll down to the bottom of the page and enter your email and optionally name, then click on the "Subscribe" button.
#### GitHub
You can also use the github
You can also use GitHub
[issues](https://github.com/containers/buildah/issues)
and
[PRs](https://github.com/containers/buildah/pulls)
tracking system.
#### Buildah Mailing List
You can join the Buildah mailing list by sending an email to `buildah-join@lists.buildah.io` with the word `subscribe` in the subject. You can also go to this [page](https://lists.podman.io/admin/lists/buildah.lists.buildah.io/), then scroll down to the bottom of the page and enter your email and optionally name, then click on the "Subscribe" button.
## Becoming a Maintainer
To become a maintainer you must first be nominated by an existing maintainer.
@ -172,3 +174,4 @@ inactive for a long period of time or are viewed as disruptive to the community.
The current list of maintainers can be found in the
[MAINTAINERS](MAINTAINERS) file.

View File

@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
@ -115,7 +114,7 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
if size < 0 {
// Create a temporary file and copy the content to it, so that
// we can figure out how much content there is.
f, err := ioutil.TempFile(mountpoint, "download")
f, err := os.CreateTemp(mountpoint, "download")
if err != nil {
return fmt.Errorf("creating temporary file to hold %q: %w", src, err)
}

View File

@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
@ -402,7 +401,7 @@ func OpenBuilder(store storage.Store, container string) (*Builder, error) {
if err != nil {
return nil, err
}
buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile))
buildstate, err := os.ReadFile(filepath.Join(cdir, stateFile))
if err != nil {
return nil, err
}
@ -444,7 +443,7 @@ func OpenBuilderByPath(store storage.Store, path string) (*Builder, error) {
if err != nil {
return nil, err
}
buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile))
buildstate, err := os.ReadFile(filepath.Join(cdir, stateFile))
if err != nil {
if errors.Is(err, os.ErrNotExist) {
logrus.Debugf("error reading %q: %v, ignoring container %q", filepath.Join(cdir, stateFile), err, container.ID)
@ -481,7 +480,7 @@ func OpenAllBuilders(store storage.Store) (builders []*Builder, err error) {
if err != nil {
return nil, err
}
buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile))
buildstate, err := os.ReadFile(filepath.Join(cdir, stateFile))
if err != nil {
if errors.Is(err, os.ErrNotExist) {
logrus.Debugf("%v, ignoring container %q", err, container.ID)

View File

@ -8,7 +8,6 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"os/signal"
@ -690,7 +689,7 @@ func runUsingChrootExecMain() {
os.Exit(1)
}
} else {
setgroups, _ := ioutil.ReadFile("/proc/self/setgroups")
setgroups, _ := os.ReadFile("/proc/self/setgroups")
if strings.Trim(string(setgroups), "\n") != "deny" {
logrus.Debugf("clearing supplemental groups")
if err = syscall.Setgroups([]int{}); err != nil {

View File

@ -5,7 +5,7 @@ package chroot
import (
"fmt"
"io/ioutil"
"os"
"github.com/containers/common/pkg/seccomp"
specs "github.com/opencontainers/runtime-spec/specs-go"
@ -187,7 +187,7 @@ func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
}
spec.Linux.Seccomp = seccompConfig
default:
seccompProfile, err := ioutil.ReadFile(seccompProfilePath)
seccompProfile, err := os.ReadFile(seccompProfilePath)
if err != nil {
return fmt.Errorf("opening seccomp profile failed: %w", err)
}

View File

@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"time"
@ -392,7 +391,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
dest = dest2
}
if options.IIDFile != "" {
if err = ioutil.WriteFile(options.IIDFile, []byte("sha256:"+img.ID), 0644); err != nil {
if err = os.WriteFile(options.IIDFile, []byte("sha256:"+img.ID), 0644); err != nil {
return imgID, nil, "", err
}
}

View File

@ -8,7 +8,6 @@ import (
"fmt"
"io"
"io/fs"
"io/ioutil"
"net"
"os"
"os/user"
@ -573,7 +572,7 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
bulkReader = bytes.NewReader([]byte{})
}
if bulkWriter == nil {
bulkWriter = ioutil.Discard
bulkWriter = io.Discard
}
cmd := reexec.Command(copierCommand)
stdinRead, stdinWrite, err := os.Pipe()

View File

@ -141,10 +141,10 @@ type BuildOptions struct {
TransientMounts []string
// CacheFrom specifies any remote repository which can be treated as
// potential cache source.
CacheFrom reference.Named
CacheFrom []reference.Named
// CacheTo specifies any remote repository which can be treated as
// potential cache destination.
CacheTo reference.Named
CacheTo []reference.Named
// CacheTTL specifies duration, if specified using `--cache-ttl` then
// cache intermediate images under this duration will be considered as
// valid cache sources and images outside this duration will be ignored.

View File

@ -5,7 +5,7 @@ import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"io"
"net/http"
urlpkg "net/url"
"os"
@ -121,7 +121,7 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
url != "-" {
return "", "", nil
}
name, err = ioutil.TempDir(dir, prefix)
name, err = os.MkdirTemp(dir, prefix)
if err != nil {
return "", "", fmt.Errorf("creating temporary directory for %q: %w", url, err)
}
@ -255,7 +255,7 @@ func downloadToDirectory(url, dir string) error {
return err
}
defer resp1.Body.Close()
body, err := ioutil.ReadAll(resp1.Body)
body, err := io.ReadAll(resp1.Body)
if err != nil {
return err
}
@ -271,7 +271,7 @@ func downloadToDirectory(url, dir string) error {
func stdinToDirectory(dir string) error {
logrus.Debugf("extracting stdin to %q", dir)
r := bufio.NewReader(os.Stdin)
b, err := ioutil.ReadAll(r)
b, err := io.ReadAll(r)
if err != nil {
return fmt.Errorf("failed to read from stdin: %w", err)
}

View File

@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
@ -309,7 +308,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
logrus.Debugf("layer list: %q", layers)
// Make a temporary directory to hold blobs.
path, err := ioutil.TempDir(os.TempDir(), define.Package)
path, err := os.MkdirTemp(os.TempDir(), define.Package)
if err != nil {
return nil, fmt.Errorf("creating temporary directory to hold layer blobs: %w", err)
}

View File

@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
@ -153,7 +152,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
if err != nil {
return "", nil, err
}
data = ioutil.NopCloser(pData)
data = io.NopCloser(pData)
}
dockerfiles = append(dockerfiles, data)

View File

@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"sort"
"strconv"
@ -58,8 +57,8 @@ var builtinAllowedBuildArgs = map[string]bool{
// interface. It coordinates the entire build by using one or more
// StageExecutors to handle each stage of the build.
type Executor struct {
cacheFrom reference.Named
cacheTo reference.Named
cacheFrom []reference.Named
cacheTo []reference.Named
cacheTTL time.Duration
containerSuffix string
logger *logrus.Logger
@ -200,7 +199,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
writer := options.ReportWriter
if options.Quiet {
writer = ioutil.Discard
writer = io.Discard
}
var rusageLogFile io.Writer
@ -589,7 +588,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
stdout := b.out
if b.quiet {
b.out = ioutil.Discard
b.out = io.Discard
}
cleanup := func() error {
@ -954,7 +953,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
}
logrus.Debugf("printing final image id %q", imageID)
if b.iidfile != "" {
if err = ioutil.WriteFile(b.iidfile, []byte("sha256:"+imageID), 0644); err != nil {
if err = os.WriteFile(b.iidfile, []byte("sha256:"+imageID), 0644); err != nil {
return imageID, ref, fmt.Errorf("failed to write image ID to file %q: %w", b.iidfile, err)
}
} else {

View File

@ -1729,42 +1729,48 @@ func (s *StageExecutor) generateCacheKey(ctx context.Context, currNode *parser.N
// cacheImageReference is internal function which generates ImageReference from Named repo sources
// and a tag.
func cacheImageReference(repo reference.Named, cachekey string) (types.ImageReference, error) {
tagged, err := reference.WithTag(repo, cachekey)
if err != nil {
return nil, fmt.Errorf("failed generating tagged reference for %q: %w", repo, err)
func cacheImageReferences(repos []reference.Named, cachekey string) ([]types.ImageReference, error) {
var result []types.ImageReference
for _, repo := range repos {
tagged, err := reference.WithTag(repo, cachekey)
if err != nil {
return nil, fmt.Errorf("failed generating tagged reference for %q: %w", repo, err)
}
dest, err := imagedocker.NewReference(tagged)
if err != nil {
return nil, fmt.Errorf("failed generating docker reference for %q: %w", tagged, err)
}
result = append(result, dest)
}
dest, err := imagedocker.NewReference(tagged)
if err != nil {
return nil, fmt.Errorf("failed generating docker reference for %q: %w", tagged, err)
}
return dest, nil
return result, nil
}
// pushCache takes the image id of intermediate image and attempts
// to perform push at the remote repository with cacheKey as the tag.
// Returns error if fails otherwise returns nil.
func (s *StageExecutor) pushCache(ctx context.Context, src, cacheKey string) error {
dest, err := cacheImageReference(s.executor.cacheTo, cacheKey)
destList, err := cacheImageReferences(s.executor.cacheTo, cacheKey)
if err != nil {
return err
}
logrus.Debugf("trying to push cache to dest: %+v from src:%+v", dest, src)
options := buildah.PushOptions{
Compression: s.executor.compression,
SignaturePolicyPath: s.executor.signaturePolicyPath,
Store: s.executor.store,
SystemContext: s.executor.systemContext,
BlobDirectory: s.executor.blobDirectory,
SignBy: s.executor.signBy,
MaxRetries: s.executor.maxPullPushRetries,
RetryDelay: s.executor.retryPullPushDelay,
for _, dest := range destList {
logrus.Debugf("trying to push cache to dest: %+v from src:%+v", dest, src)
options := buildah.PushOptions{
Compression: s.executor.compression,
SignaturePolicyPath: s.executor.signaturePolicyPath,
Store: s.executor.store,
SystemContext: s.executor.systemContext,
BlobDirectory: s.executor.blobDirectory,
SignBy: s.executor.signBy,
MaxRetries: s.executor.maxPullPushRetries,
RetryDelay: s.executor.retryPullPushDelay,
}
ref, digest, err := buildah.Push(ctx, src, dest, options)
if err != nil {
return fmt.Errorf("failed pushing cache to %q: %w", dest, err)
}
logrus.Debugf("successfully pushed cache to dest: %+v with ref:%+v and digest: %v", dest, ref, digest)
}
ref, digest, err := buildah.Push(ctx, src, dest, options)
if err != nil {
return fmt.Errorf("failed pushing cache to %q: %w", dest, err)
}
logrus.Debugf("successfully pushed cache to dest: %+v with ref:%+v and digest: %v", dest, ref, digest)
return nil
}
@ -1775,29 +1781,33 @@ func (s *StageExecutor) pushCache(ctx context.Context, src, cacheKey string) err
// image was pulled function returns image id otherwise returns empty
// string "" or error if any error was encontered while pulling the cache.
func (s *StageExecutor) pullCache(ctx context.Context, cacheKey string) (string, error) {
src, err := cacheImageReference(s.executor.cacheFrom, cacheKey)
srcList, err := cacheImageReferences(s.executor.cacheFrom, cacheKey)
if err != nil {
return "", err
}
logrus.Debugf("trying to pull cache from remote repo: %+v", src.DockerReference())
options := buildah.PullOptions{
SignaturePolicyPath: s.executor.signaturePolicyPath,
Store: s.executor.store,
SystemContext: s.executor.systemContext,
BlobDirectory: s.executor.blobDirectory,
MaxRetries: s.executor.maxPullPushRetries,
RetryDelay: s.executor.retryPullPushDelay,
AllTags: false,
ReportWriter: nil,
PullPolicy: define.PullIfNewer,
for _, src := range srcList {
logrus.Debugf("trying to pull cache from remote repo: %+v", src.DockerReference())
options := buildah.PullOptions{
SignaturePolicyPath: s.executor.signaturePolicyPath,
Store: s.executor.store,
SystemContext: s.executor.systemContext,
BlobDirectory: s.executor.blobDirectory,
MaxRetries: s.executor.maxPullPushRetries,
RetryDelay: s.executor.retryPullPushDelay,
AllTags: false,
ReportWriter: nil,
PullPolicy: define.PullIfNewer,
}
id, err := buildah.Pull(ctx, src.DockerReference().String(), options)
if err != nil {
logrus.Debugf("failed pulling cache from source %s: %v", src, err)
continue // failed pulling this one try next
//return "", fmt.Errorf("failed while pulling cache from %q: %w", src, err)
}
logrus.Debugf("successfully pulled cache from repo %s: %s", src, id)
return id, nil
}
id, err := buildah.Pull(ctx, src.DockerReference().String(), options)
if err != nil {
logrus.Debugf("failed pulling cache from source %s: %v", src, err)
return "", fmt.Errorf("failed while pulling cache from %q: %w", src, err)
}
logrus.Debugf("successfully pulled cache from repo %s: %s", src, id)
return id, nil
return "", fmt.Errorf("failed pulling cache from all available sources %q", srcList)
}
// intermediateImageExists returns true if an intermediate image of currNode exists in the image store from a previous build.

View File

@ -188,8 +188,8 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st
// GetCacheMount parses a single cache mount entry from the --mount flag.
//
// If this function succeeds and returns a non-nil lockfile.Locker, the caller must unlock it (when??).
func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails) (specs.Mount, lockfile.Locker, error) {
// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??).
func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails) (specs.Mount, *lockfile.LockFile, error) {
var err error
var mode uint64
var buildahLockFilesDir string
@ -364,7 +364,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
}
}
var targetLock lockfile.Locker // = nil
var targetLock *lockfile.LockFile // = nil
succeeded := false
defer func() {
if !succeeded && targetLock != nil {
@ -374,7 +374,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
switch sharing {
case "locked":
// lock parent cache
lockfile, err := lockfile.GetLockfile(filepath.Join(buildahLockFilesDir, BuildahCacheLockfile))
lockfile, err := lockfile.GetLockFile(filepath.Join(buildahLockFilesDir, BuildahCacheLockfile))
if err != nil {
return newMount, nil, fmt.Errorf("unable to acquire lock when sharing mode is locked: %w", err)
}
@ -497,7 +497,7 @@ func Volume(volume string) (specs.Mount, error) {
}
// UnlockLockArray is a helper for cleaning up after GetVolumes and the like.
func UnlockLockArray(locks []lockfile.Locker) {
func UnlockLockArray(locks []*lockfile.LockFile) {
for _, lock := range locks {
lock.Unlock()
}
@ -505,8 +505,8 @@ func UnlockLockArray(locks []lockfile.Locker) {
// GetVolumes gets the volumes from --volume and --mount
//
// If this function succeeds, the caller must unlock the returned lockfile.Lockers if any (when??).
func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, mounts []string, contextDir string) ([]specs.Mount, []string, []lockfile.Locker, error) {
// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??).
func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, mounts []string, contextDir string) ([]specs.Mount, []string, []*lockfile.LockFile, error) {
unifiedMounts, mountedImages, targetLocks, err := getMounts(ctx, store, mounts, contextDir)
if err != nil {
return nil, mountedImages, nil, err
@ -541,13 +541,13 @@ func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string,
// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ...
// buildah run --mount type=tmpfs,target=/dev/shm ...
//
// If this function succeeds, the caller must unlock the returned lockfile.Lockers if any (when??).
func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string) (map[string]specs.Mount, []string, []lockfile.Locker, error) {
// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??).
func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string) (map[string]specs.Mount, []string, []*lockfile.LockFile, error) {
// If `type` is not set default to "bind"
mountType := define.TypeBind
finalMounts := make(map[string]specs.Mount)
mountedImages := make([]string, 0)
targetLocks := make([]lockfile.Locker, 0)
targetLocks := make([]*lockfile.LockFile, 0)
succeeded := false
defer func() {
if !succeeded {

View File

@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
@ -301,18 +300,18 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
iopts.Quiet = true
}
}
var cacheTo reference.Named
var cacheFrom reference.Named
var cacheTo []reference.Named
var cacheFrom []reference.Named
cacheTo = nil
cacheFrom = nil
if c.Flag("cache-to").Changed {
cacheTo, err = parse.RepoNameToNamedReference(iopts.CacheTo)
cacheTo, err = parse.RepoNamesToNamedReferences(iopts.CacheTo)
if err != nil {
return options, nil, nil, fmt.Errorf("unable to parse value provided `%s` to --cache-to: %w", iopts.CacheTo, err)
}
}
if c.Flag("cache-from").Changed {
cacheFrom, err = parse.RepoNameToNamedReference(iopts.CacheFrom)
cacheFrom, err = parse.RepoNamesToNamedReferences(iopts.CacheFrom)
if err != nil {
return options, nil, nil, fmt.Errorf("unable to parse value provided `%s` to --cache-from: %w", iopts.CacheTo, err)
}
@ -423,7 +422,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
UnsetEnvs: iopts.UnsetEnvs,
}
if iopts.Quiet {
options.ReportWriter = ioutil.Discard
options.ReportWriter = io.Discard
}
return options, containerfiles, removeAll, nil
}

View File

@ -53,8 +53,8 @@ type BudResults struct {
Authfile string
BuildArg []string
BuildContext []string
CacheFrom string
CacheTo string
CacheFrom []string
CacheTo []string
CacheTTL string
CertDir string
Compress bool
@ -202,8 +202,8 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.StringArrayVar(&flags.OCIHooksDir, "hooks-dir", []string{}, "set the OCI hooks directory path (may be set multiple times)")
fs.StringArrayVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder")
fs.StringArrayVar(&flags.BuildContext, "build-context", []string{}, "`argument=value` to supply additional build context to the builder")
fs.StringVar(&flags.CacheFrom, "cache-from", "", "remote repository to utilise as potential cache source.")
fs.StringVar(&flags.CacheTo, "cache-to", "", "remote repository to utilise as potential cache destination.")
fs.StringArrayVar(&flags.CacheFrom, "cache-from", []string{}, "remote repository list to utilise as potential cache source.")
fs.StringArrayVar(&flags.CacheTo, "cache-to", []string{}, "remote repository list to utilise as potential cache destination.")
fs.StringVar(&flags.CacheTTL, "cache-ttl", "", "only consider cache images under specified duration.")
fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry")
fs.BoolVar(&flags.Compress, "compress", false, "this is a legacy option, which has no effect on the image")

View File

@ -2,7 +2,6 @@ package overlay
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
@ -60,7 +59,7 @@ func TempDir(containerDir string, rootUID, rootGID int) (string, error) {
return "", fmt.Errorf("failed to create the overlay %s directory: %w", contentDir, err)
}
contentDir, err := ioutil.TempDir(contentDir, "")
contentDir, err := os.MkdirTemp(contentDir, "")
if err != nil {
return "", fmt.Errorf("failed to create the overlay tmpdir in %s directory: %w", contentDir, err)
}
@ -291,7 +290,7 @@ func CleanupMount(contentDir string) (Err error) {
func CleanupContent(containerDir string) (Err error) {
contentDir := filepath.Join(containerDir, "overlay")
files, err := ioutil.ReadDir(contentDir)
files, err := os.ReadDir(contentDir)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil

View File

@ -16,6 +16,7 @@ import (
"github.com/containerd/containerd/platforms"
"github.com/containers/buildah/define"
securejoin "github.com/cyphar/filepath-securejoin"
internalParse "github.com/containers/buildah/internal/parse"
"github.com/containers/buildah/pkg/sshagent"
"github.com/containers/common/pkg/config"
@ -50,16 +51,20 @@ const (
BuildahCacheDir = "buildah-cache"
)
// RepoNameToNamedReference parse the raw string to Named reference
func RepoNameToNamedReference(dest string) (reference.Named, error) {
named, err := reference.ParseNormalizedNamed(dest)
if err != nil {
return nil, fmt.Errorf("invalid repo %q: must contain registry and repository: %w", dest, err)
// RepoNamesToNamedReferences parse the raw string to Named reference
func RepoNamesToNamedReferences(destList []string) ([]reference.Named, error) {
var result []reference.Named
for _, dest := range destList {
named, err := reference.ParseNormalizedNamed(dest)
if err != nil {
return nil, fmt.Errorf("invalid repo %q: must contain registry and repository: %w", dest, err)
}
if !reference.IsNameOnly(named) {
return nil, fmt.Errorf("repository must contain neither a tag nor digest: %v", named)
}
result = append(result, named)
}
if !reference.IsNameOnly(named) {
return nil, fmt.Errorf("repository must contain neither a tag nor digest: %v", named)
}
return named, nil
return result, nil
}
// CommonBuildOptions parses the build options from the bud cli
@ -1103,10 +1108,16 @@ func ContainerIgnoreFile(contextDir, path string, containerFiles []string) ([]st
return excludes, containerfileIgnore, err
}
}
path = filepath.Join(contextDir, ".containerignore")
path, symlinkErr := securejoin.SecureJoin(contextDir, ".containerignore")
if symlinkErr != nil {
return nil, "", symlinkErr
}
excludes, err := imagebuilder.ParseIgnore(path)
if errors.Is(err, os.ErrNotExist) {
path = filepath.Join(contextDir, ".dockerignore")
path, symlinkErr = securejoin.SecureJoin(contextDir, ".dockerignore")
if symlinkErr != nil {
return nil, "", symlinkErr
}
excludes, err = imagebuilder.ParseIgnore(path)
}
if errors.Is(err, os.ErrNotExist) {

View File

@ -4,7 +4,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"path/filepath"
@ -80,7 +79,7 @@ func (a *AgentServer) Serve(processLabel string) (string, error) {
if err != nil {
return "", err
}
serveDir, err := ioutil.TempDir("", ".buildah-ssh-sock")
serveDir, err := os.MkdirTemp("", ".buildah-ssh-sock")
if err != nil {
return "", err
}
@ -223,7 +222,7 @@ func NewSource(paths []string) (*Source, error) {
if err != nil {
return nil, err
}
dt, err := ioutil.ReadAll(&io.LimitedReader{R: f, N: 100 * 1024})
dt, err := io.ReadAll(&io.LimitedReader{R: f, N: 100 * 1024})
if err != nil {
return nil, err
}

View File

@ -3,12 +3,12 @@ package util
import (
"bytes"
"errors"
"io/ioutil"
"time"
"os"
)
func ReadUptime() (time.Duration, error) {
buf, err := ioutil.ReadFile("/proc/uptime")
buf, err := os.ReadFile("/proc/uptime")
if err != nil {
return 0, err
}

View File

@ -2,7 +2,6 @@ package util
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
@ -21,12 +20,12 @@ func MirrorToTempFileIfPathIsDescriptor(file string) (string, bool) {
if !strings.HasPrefix(file, "/dev/fd") {
return file, false
}
b, err := ioutil.ReadFile(file)
b, err := os.ReadFile(file)
if err != nil {
// if anything goes wrong return original path
return file, false
}
tmpfile, err := ioutil.TempFile(os.TempDir(), "buildah-temp-file")
tmpfile, err := os.CreateTemp(os.TempDir(), "buildah-temp-file")
if err != nil {
return file, false
}

View File

@ -178,7 +178,7 @@ type runMountArtifacts struct {
// SSHAuthSock is the path to the ssh auth sock inside the container
SSHAuthSock string
// TargetLocks to be unlocked if there are any.
TargetLocks []lockfile.Locker
TargetLocks []*lockfile.LockFile
}
// RunMountInfo are the available run mounts for this run

View File

@ -9,7 +9,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
@ -556,7 +555,7 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [
}()
// Make sure we read the container's exit status when it exits.
pidValue, err := ioutil.ReadFile(pidFile)
pidValue, err := os.ReadFile(pidFile)
if err != nil {
return 1, err
}
@ -1185,7 +1184,7 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run
logrus.Errorf("did not get container create message from subprocess: %v", err)
} else {
pidFile := filepath.Join(bundlePath, "pid")
pidValue, err := ioutil.ReadFile(pidFile)
pidValue, err := os.ReadFile(pidFile)
if err != nil {
return err
}
@ -1199,7 +1198,7 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run
defer teardown()
}
if err != nil {
return err
return fmt.Errorf("setup network: %w", err)
}
// only add hosts if we manage the hosts file
@ -1464,7 +1463,7 @@ func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMap
agents := make([]*sshagent.AgentServer, 0, len(mounts))
sshCount := 0
defaultSSHSock := ""
targetLocks := []lockfile.Locker{}
targetLocks := []*lockfile.LockFile{}
succeeded := false
defer func() {
if !succeeded {
@ -1655,7 +1654,7 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr
switch secr.SourceType {
case "env":
data = []byte(os.Getenv(secr.Source))
tmpFile, err := ioutil.TempFile(define.TempDir, "buildah*")
tmpFile, err := os.CreateTemp(define.TempDir, "buildah*")
if err != nil {
return nil, "", err
}
@ -1666,7 +1665,7 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr
if err != nil {
return nil, "", err
}
data, err = ioutil.ReadFile(secr.Source)
data, err = os.ReadFile(secr.Source)
if err != nil {
return nil, "", err
}
@ -1680,7 +1679,7 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr
if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0755); err != nil {
return nil, "", err
}
if err := ioutil.WriteFile(ctrFileOnHost, data, 0644); err != nil {
if err := os.WriteFile(ctrFileOnHost, data, 0644); err != nil {
return nil, "", err
}

View File

@ -6,7 +6,6 @@ package buildah
import (
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
@ -72,7 +71,7 @@ func setChildProcess() error {
}
func (b *Builder) Run(command []string, options RunOptions) error {
p, err := ioutil.TempDir("", Package)
p, err := os.MkdirTemp("", Package)
if err != nil {
return err
}
@ -305,8 +304,8 @@ func setupSpecialMountSpecChanges(spec *spec.Spec, shmSize string) ([]specs.Moun
return spec.Mounts, nil
}
// If this function succeeds and returns a non-nil lockfile.Locker, the caller must unlock it (when??).
func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps) (*spec.Mount, lockfile.Locker, error) {
// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??).
func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps) (*spec.Mount, *lockfile.LockFile, error) {
return nil, nil, errors.New("cache mounts not supported on freebsd")
}

View File

@ -7,7 +7,6 @@ import (
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
@ -70,7 +69,7 @@ func setChildProcess() error {
// Run runs the specified command in the container's root filesystem.
func (b *Builder) Run(command []string, options RunOptions) error {
p, err := ioutil.TempDir("", define.Package)
p, err := os.MkdirTemp("", define.Package)
if err != nil {
return err
}
@ -480,7 +479,7 @@ func setupRootlessNetwork(pid int) (teardown func(), err error) {
defer rootlessSlirpSyncR.Close()
// Be sure there are no fds inherited to slirp4netns except the sync pipe
files, err := ioutil.ReadDir("/proc/self/fd")
files, err := os.ReadDir("/proc/self/fd")
if err != nil {
return nil, fmt.Errorf("cannot list open fds: %w", err)
}
@ -1199,8 +1198,8 @@ func checkIdsGreaterThan5(ids []spec.LinuxIDMapping) bool {
return false
}
// If this function succeeds and returns a non-nil lockfile.Locker, the caller must unlock it (when??).
func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps) (*spec.Mount, lockfile.Locker, error) {
// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??).
func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps) (*spec.Mount, *lockfile.LockFile, error) {
var optionMounts []specs.Mount
mount, targetLock, err := internalParse.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints)
if err != nil {

View File

@ -5,7 +5,7 @@ package buildah
import (
"fmt"
"io/ioutil"
"os"
"github.com/containers/common/pkg/seccomp"
"github.com/opencontainers/runtime-spec/specs-go"
@ -22,7 +22,7 @@ func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
}
spec.Linux.Seccomp = seccompConfig
default:
seccompProfile, err := ioutil.ReadFile(seccompProfilePath)
seccompProfile, err := os.ReadFile(seccompProfilePath)
if err != nil {
return fmt.Errorf("opening seccomp profile failed: %w", err)
}

View File

@ -441,7 +441,14 @@ func (m byDestination) Len() int {
}
func (m byDestination) Less(i, j int) bool {
return m.parts(i) < m.parts(j)
iparts, jparts := m.parts(i), m.parts(j)
switch {
case iparts < jparts:
return true
case iparts > jparts:
return false
}
return filepath.Clean(m[i].Destination) < filepath.Clean(m[j].Destination)
}
func (m byDestination) Swap(i, j int) {
@ -453,7 +460,7 @@ func (m byDestination) parts(i int) int {
}
func SortMounts(m []specs.Mount) []specs.Mount {
sort.Sort(byDestination(m))
sort.Stable(byDestination(m))
return m
}

2
vendor/modules.txt vendored
View File

@ -95,7 +95,7 @@ github.com/containernetworking/cni/pkg/version
# github.com/containernetworking/plugins v1.1.1
## explicit; go 1.17
github.com/containernetworking/plugins/pkg/ns
# github.com/containers/buildah v1.28.1-0.20221130132810-cf661299d14f
# github.com/containers/buildah v1.28.1-0.20221219201600-ca578b290144
## explicit; go 1.17
github.com/containers/buildah
github.com/containers/buildah/bind