vendor: bump to buildah ca578b290144 and use new cache API

Bump to buildah ca578b290144 and use new `cacheTo` and `cacheFrom` API.

[NO NEW TESTS NEEDED]
[NO TESTS NEEDED]

Signed-off-by: Aditya R <arajan@redhat.com>
This commit is contained in:
Aditya R
2022-12-20 17:12:55 +05:30
parent fb967aabc3
commit 987c8e3a78
34 changed files with 225 additions and 210 deletions

View File

@ -527,16 +527,16 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
} }
} }
} }
var cacheTo reference.Named var cacheTo []reference.Named
var cacheFrom reference.Named var cacheFrom []reference.Named
if c.Flag("cache-to").Changed { if c.Flag("cache-to").Changed {
cacheTo, err = parse.RepoNameToNamedReference(flags.CacheTo) cacheTo, err = parse.RepoNamesToNamedReferences(flags.CacheTo)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to parse value provided `%s` to --cache-to: %w", flags.CacheTo, err) return nil, fmt.Errorf("unable to parse value provided `%s` to --cache-to: %w", flags.CacheTo, err)
} }
} }
if c.Flag("cache-from").Changed { if c.Flag("cache-from").Changed {
cacheFrom, err = parse.RepoNameToNamedReference(flags.CacheFrom) cacheFrom, err = parse.RepoNamesToNamedReferences(flags.CacheFrom)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to parse value provided `%s` to --cache-from: %w", flags.CacheTo, err) return nil, fmt.Errorf("unable to parse value provided `%s` to --cache-from: %w", flags.CacheTo, err)
} }

2
go.mod
View File

@ -11,7 +11,7 @@ require (
github.com/container-orchestrated-devices/container-device-interface v0.5.3 github.com/container-orchestrated-devices/container-device-interface v0.5.3
github.com/containernetworking/cni v1.1.2 github.com/containernetworking/cni v1.1.2
github.com/containernetworking/plugins v1.1.1 github.com/containernetworking/plugins v1.1.1
github.com/containers/buildah v1.28.1-0.20221130132810-cf661299d14f github.com/containers/buildah v1.28.1-0.20221219201600-ca578b290144
github.com/containers/common v0.50.2-0.20221216120044-ef7e0d6f3002 github.com/containers/common v0.50.2-0.20221216120044-ef7e0d6f3002
github.com/containers/conmon v2.0.20+incompatible github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.23.1-0.20221216122512-3963f229df32 github.com/containers/image/v5 v5.23.1-0.20221216122512-3963f229df32

4
go.sum
View File

@ -262,8 +262,8 @@ github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHV
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE= github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE=
github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
github.com/containers/buildah v1.28.1-0.20221130132810-cf661299d14f h1:Nzbda2tG7/aimoKnDxysqFgS1Q/gSsbcn88lFPj9LwY= github.com/containers/buildah v1.28.1-0.20221219201600-ca578b290144 h1:2RQIBdC4z6JeUysEBFmdyRjeQL+XHikWGxDoWiPDsAw=
github.com/containers/buildah v1.28.1-0.20221130132810-cf661299d14f/go.mod h1:0HcSoS6BHXWzMKqtxY1L0gupebEX33oPC+X62lPi6+c= github.com/containers/buildah v1.28.1-0.20221219201600-ca578b290144/go.mod h1:UtGNHlAwNF1WV/Z63R/sPgxItTog/YPi/1gSfZ8ZdpE=
github.com/containers/common v0.50.2-0.20221216120044-ef7e0d6f3002 h1:wvT0IrvGcZ0tEAvF1CYjaI6xjQjXr4vDnrlHRAYEo0Q= github.com/containers/common v0.50.2-0.20221216120044-ef7e0d6f3002 h1:wvT0IrvGcZ0tEAvF1CYjaI6xjQjXr4vDnrlHRAYEo0Q=
github.com/containers/common v0.50.2-0.20221216120044-ef7e0d6f3002/go.mod h1:EhEJRALj8qJWhnnzk6nY6wqDkSjfGpU2DwcLb9UpVoM= github.com/containers/common v0.50.2-0.20221216120044-ef7e0d6f3002/go.mod h1:EhEJRALj8qJWhnnzk6nY6wqDkSjfGpU2DwcLb9UpVoM=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=

View File

@ -400,47 +400,30 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
} }
} }
// Docker's newer clients popuates `cacheFrom` and `cacheTo` parameter cacheFrom := []reference.Named{}
// by default as empty array for all commands but buildah's design of
// distributed cache expects this to be a repo not image hence parse
// only the first populated repo and ignore if empty array.
// Read more here: https://github.com/containers/podman/issues/15928
// TODO: Remove this when buildah's API is extended.
compatIgnoreForcedCacheOptions := func(queryStr string) string {
query := queryStr
if strings.HasPrefix(query, "[") {
query = ""
var arr []string
parseErr := json.Unmarshal([]byte(query), &arr)
if parseErr != nil {
if len(arr) > 0 {
query = arr[0]
}
}
}
return query
}
var cacheFrom reference.Named
if _, found := r.URL.Query()["cachefrom"]; found { if _, found := r.URL.Query()["cachefrom"]; found {
cacheFromQuery := compatIgnoreForcedCacheOptions(query.CacheFrom) var cacheFromSrcList []string
if cacheFromQuery != "" { if err := json.Unmarshal([]byte(query.CacheFrom), &cacheFromSrcList); err != nil {
cacheFrom, err = parse.RepoNameToNamedReference(cacheFromQuery) utils.BadRequest(w, "cacheFrom", query.CacheFrom, err)
return
}
cacheFrom, err = parse.RepoNamesToNamedReferences(cacheFromSrcList)
if err != nil { if err != nil {
utils.BadRequest(w, "cacheFrom", cacheFromQuery, err) utils.BadRequest(w, "cacheFrom", query.CacheFrom, err)
return return
} }
} }
} cacheTo := []reference.Named{}
var cacheTo reference.Named
if _, found := r.URL.Query()["cacheto"]; found { if _, found := r.URL.Query()["cacheto"]; found {
cacheToQuery := compatIgnoreForcedCacheOptions(query.CacheTo) var cacheToDestList []string
if cacheToQuery != "" { if err := json.Unmarshal([]byte(query.CacheTo), &cacheToDestList); err != nil {
cacheTo, err = parse.RepoNameToNamedReference(cacheToQuery) utils.BadRequest(w, "cacheTo", query.CacheTo, err)
if err != nil {
utils.BadRequest(w, "cacheto", cacheToQuery, err)
return return
} }
cacheTo, err = parse.RepoNamesToNamedReferences(cacheToDestList)
if err != nil {
utils.BadRequest(w, "cacheto", query.CacheTo, err)
return
} }
} }
var cacheTTL time.Duration var cacheTTL time.Duration

View File

@ -231,7 +231,15 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
params.Set("manifest", options.Manifest) params.Set("manifest", options.Manifest)
} }
if options.CacheFrom != nil { if options.CacheFrom != nil {
params.Set("cachefrom", options.CacheFrom.String()) cacheFrom := []string{}
for _, cacheSrc := range options.CacheFrom {
cacheFrom = append(cacheFrom, cacheSrc.String())
}
cacheFromJSON, err := jsoniter.MarshalToString(cacheFrom)
if err != nil {
return nil, err
}
params.Set("cachefrom", cacheFromJSON)
} }
switch options.SkipUnusedStages { switch options.SkipUnusedStages {
@ -242,7 +250,15 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
} }
if options.CacheTo != nil { if options.CacheTo != nil {
params.Set("cacheto", options.CacheTo.String()) cacheTo := []string{}
for _, cacheSrc := range options.CacheTo {
cacheTo = append(cacheTo, cacheSrc.String())
}
cacheToJSON, err := jsoniter.MarshalToString(cacheTo)
if err != nil {
return nil, err
}
params.Set("cacheto", cacheToJSON)
} }
if int64(options.CacheTTL) != 0 { if int64(options.CacheTTL) != 0 {
params.Set("cachettl", options.CacheTTL.String()) params.Set("cachettl", options.CacheTTL.String())

View File

@ -27,13 +27,13 @@ env:
#### ####
# GCE project where images live # GCE project where images live
IMAGE_PROJECT: "libpod-218412" IMAGE_PROJECT: "libpod-218412"
FEDORA_NAME: "fedora-36" FEDORA_NAME: "fedora-37"
#PRIOR_FEDORA_NAME: "fedora-35" PRIOR_FEDORA_NAME: "fedora-36"
UBUNTU_NAME: "ubuntu-2204" UBUNTU_NAME: "ubuntu-2204"
IMAGE_SUFFIX: "c5124654741323776" IMAGE_SUFFIX: "c4815821738868736"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
#PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
IN_PODMAN_IMAGE: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}" IN_PODMAN_IMAGE: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}"
@ -72,9 +72,9 @@ meta_task:
env: env:
# Space-separated list of images used by this repository state # Space-separated list of images used by this repository state
# TODO: Re-add ${PRIOR_FEDORA_CACHE_IMAGE_NAME} when place back in use
IMGNAMES: |- IMGNAMES: |-
${FEDORA_CACHE_IMAGE_NAME} ${FEDORA_CACHE_IMAGE_NAME}
${PRIOR_FEDORA_CACHE_IMAGE_NAME}
${UBUNTU_CACHE_IMAGE_NAME} ${UBUNTU_CACHE_IMAGE_NAME}
build-push-${IMAGE_SUFFIX} build-push-${IMAGE_SUFFIX}
BUILDID: "${CIRRUS_BUILD_ID}" BUILDID: "${CIRRUS_BUILD_ID}"
@ -215,10 +215,10 @@ integration_task:
DISTRO_NV: "${FEDORA_NAME}" DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}" IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs' STORAGE_DRIVER: 'vfs'
# - env: - env:
# DISTRO_NV: "${PRIOR_FEDORA_NAME}" DISTRO_NV: "${PRIOR_FEDORA_NAME}"
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
# STORAGE_DRIVER: 'vfs' STORAGE_DRIVER: 'vfs'
- env: - env:
DISTRO_NV: "${UBUNTU_NAME}" DISTRO_NV: "${UBUNTU_NAME}"
IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}"
@ -228,10 +228,10 @@ integration_task:
DISTRO_NV: "${FEDORA_NAME}" DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}" IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay' STORAGE_DRIVER: 'overlay'
# - env: - env:
# DISTRO_NV: "${PRIOR_FEDORA_NAME}" DISTRO_NV: "${PRIOR_FEDORA_NAME}"
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
# STORAGE_DRIVER: 'overlay' STORAGE_DRIVER: 'overlay'
- env: - env:
DISTRO_NV: "${UBUNTU_NAME}" DISTRO_NV: "${UBUNTU_NAME}"
IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}"
@ -272,11 +272,11 @@ integration_rootless_task:
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}" IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay' STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless PRIV_NAME: rootless
# - env: - env:
# DISTRO_NV: "${PRIOR_FEDORA_NAME}" DISTRO_NV: "${PRIOR_FEDORA_NAME}"
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
# STORAGE_DRIVER: 'overlay' STORAGE_DRIVER: 'overlay'
# PRIV_NAME: rootless PRIV_NAME: rootless
- env: - env:
DISTRO_NV: "${UBUNTU_NAME}" DISTRO_NV: "${UBUNTU_NAME}"
IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}"

View File

@ -143,22 +143,24 @@ is available, showing all PRs awaiting review and approval.
## Communications ## Communications
For general questions or discussions, please use the For general questions or discussions, please use the
IRC group on `irc.freenode.net` called `buildah` IRC channel `#podman` on `irc.libera.chat`. If you are unfamiliar with IRC you can start a web client at https://web.libera.chat/#podman.
that has been setup.
Alternatively, [\[matrix\]](https://matrix.org) can be used to access the same channel via federation at https://matrix.to/#/#podman:chat.fedoraproject.org.
### For discussions around issues/bugs and features: ### For discussions around issues/bugs and features:
#### Buildah Mailing List
You can join the Buildah mailing list by sending an email to `buildah-join@lists.buildah.io` with the word `subscribe` in the subject. You can also go to this [page](https://lists.podman.io/admin/lists/buildah.lists.buildah.io/), then scroll down to the bottom of the page and enter your email and optionally name, then click on the "Subscribe" button.
#### GitHub #### GitHub
You can also use the github You can also use GitHub
[issues](https://github.com/containers/buildah/issues) [issues](https://github.com/containers/buildah/issues)
and and
[PRs](https://github.com/containers/buildah/pulls) [PRs](https://github.com/containers/buildah/pulls)
tracking system. tracking system.
#### Buildah Mailing List
You can join the Buildah mailing list by sending an email to `buildah-join@lists.buildah.io` with the word `subscribe` in the subject. You can also go to this [page](https://lists.podman.io/admin/lists/buildah.lists.buildah.io/), then scroll down to the bottom of the page and enter your email and optionally name, then click on the "Subscribe" button.
## Becoming a Maintainer ## Becoming a Maintainer
To become a maintainer you must first be nominated by an existing maintainer. To become a maintainer you must first be nominated by an existing maintainer.
@ -172,3 +174,4 @@ inactive for a long period of time or are viewed as disruptive to the community.
The current list of maintainers can be found in the The current list of maintainers can be found in the
[MAINTAINERS](MAINTAINERS) file. [MAINTAINERS](MAINTAINERS) file.

View File

@ -5,7 +5,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
@ -115,7 +114,7 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
if size < 0 { if size < 0 {
// Create a temporary file and copy the content to it, so that // Create a temporary file and copy the content to it, so that
// we can figure out how much content there is. // we can figure out how much content there is.
f, err := ioutil.TempFile(mountpoint, "download") f, err := os.CreateTemp(mountpoint, "download")
if err != nil { if err != nil {
return fmt.Errorf("creating temporary file to hold %q: %w", src, err) return fmt.Errorf("creating temporary file to hold %q: %w", src, err)
} }

View File

@ -6,7 +6,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"sort" "sort"
@ -402,7 +401,7 @@ func OpenBuilder(store storage.Store, container string) (*Builder, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile)) buildstate, err := os.ReadFile(filepath.Join(cdir, stateFile))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -444,7 +443,7 @@ func OpenBuilderByPath(store storage.Store, path string) (*Builder, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile)) buildstate, err := os.ReadFile(filepath.Join(cdir, stateFile))
if err != nil { if err != nil {
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {
logrus.Debugf("error reading %q: %v, ignoring container %q", filepath.Join(cdir, stateFile), err, container.ID) logrus.Debugf("error reading %q: %v, ignoring container %q", filepath.Join(cdir, stateFile), err, container.ID)
@ -481,7 +480,7 @@ func OpenAllBuilders(store storage.Store) (builders []*Builder, err error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile)) buildstate, err := os.ReadFile(filepath.Join(cdir, stateFile))
if err != nil { if err != nil {
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {
logrus.Debugf("%v, ignoring container %q", err, container.ID) logrus.Debugf("%v, ignoring container %q", err, container.ID)

View File

@ -8,7 +8,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"os/exec" "os/exec"
"os/signal" "os/signal"
@ -690,7 +689,7 @@ func runUsingChrootExecMain() {
os.Exit(1) os.Exit(1)
} }
} else { } else {
setgroups, _ := ioutil.ReadFile("/proc/self/setgroups") setgroups, _ := os.ReadFile("/proc/self/setgroups")
if strings.Trim(string(setgroups), "\n") != "deny" { if strings.Trim(string(setgroups), "\n") != "deny" {
logrus.Debugf("clearing supplemental groups") logrus.Debugf("clearing supplemental groups")
if err = syscall.Setgroups([]int{}); err != nil { if err = syscall.Setgroups([]int{}); err != nil {

View File

@ -5,7 +5,7 @@ package chroot
import ( import (
"fmt" "fmt"
"io/ioutil" "os"
"github.com/containers/common/pkg/seccomp" "github.com/containers/common/pkg/seccomp"
specs "github.com/opencontainers/runtime-spec/specs-go" specs "github.com/opencontainers/runtime-spec/specs-go"
@ -187,7 +187,7 @@ func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
} }
spec.Linux.Seccomp = seccompConfig spec.Linux.Seccomp = seccompConfig
default: default:
seccompProfile, err := ioutil.ReadFile(seccompProfilePath) seccompProfile, err := os.ReadFile(seccompProfilePath)
if err != nil { if err != nil {
return fmt.Errorf("opening seccomp profile failed: %w", err) return fmt.Errorf("opening seccomp profile failed: %w", err)
} }

View File

@ -6,7 +6,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"strings" "strings"
"time" "time"
@ -392,7 +391,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
dest = dest2 dest = dest2
} }
if options.IIDFile != "" { if options.IIDFile != "" {
if err = ioutil.WriteFile(options.IIDFile, []byte("sha256:"+img.ID), 0644); err != nil { if err = os.WriteFile(options.IIDFile, []byte("sha256:"+img.ID), 0644); err != nil {
return imgID, nil, "", err return imgID, nil, "", err
} }
} }

View File

@ -8,7 +8,6 @@ import (
"fmt" "fmt"
"io" "io"
"io/fs" "io/fs"
"io/ioutil"
"net" "net"
"os" "os"
"os/user" "os/user"
@ -573,7 +572,7 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
bulkReader = bytes.NewReader([]byte{}) bulkReader = bytes.NewReader([]byte{})
} }
if bulkWriter == nil { if bulkWriter == nil {
bulkWriter = ioutil.Discard bulkWriter = io.Discard
} }
cmd := reexec.Command(copierCommand) cmd := reexec.Command(copierCommand)
stdinRead, stdinWrite, err := os.Pipe() stdinRead, stdinWrite, err := os.Pipe()

View File

@ -141,10 +141,10 @@ type BuildOptions struct {
TransientMounts []string TransientMounts []string
// CacheFrom specifies any remote repository which can be treated as // CacheFrom specifies any remote repository which can be treated as
// potential cache source. // potential cache source.
CacheFrom reference.Named CacheFrom []reference.Named
// CacheTo specifies any remote repository which can be treated as // CacheTo specifies any remote repository which can be treated as
// potential cache destination. // potential cache destination.
CacheTo reference.Named CacheTo []reference.Named
// CacheTTL specifies duration, if specified using `--cache-ttl` then // CacheTTL specifies duration, if specified using `--cache-ttl` then
// cache intermediate images under this duration will be considered as // cache intermediate images under this duration will be considered as
// valid cache sources and images outside this duration will be ignored. // valid cache sources and images outside this duration will be ignored.

View File

@ -5,7 +5,7 @@ import (
"bytes" "bytes"
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io"
"net/http" "net/http"
urlpkg "net/url" urlpkg "net/url"
"os" "os"
@ -121,7 +121,7 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
url != "-" { url != "-" {
return "", "", nil return "", "", nil
} }
name, err = ioutil.TempDir(dir, prefix) name, err = os.MkdirTemp(dir, prefix)
if err != nil { if err != nil {
return "", "", fmt.Errorf("creating temporary directory for %q: %w", url, err) return "", "", fmt.Errorf("creating temporary directory for %q: %w", url, err)
} }
@ -255,7 +255,7 @@ func downloadToDirectory(url, dir string) error {
return err return err
} }
defer resp1.Body.Close() defer resp1.Body.Close()
body, err := ioutil.ReadAll(resp1.Body) body, err := io.ReadAll(resp1.Body)
if err != nil { if err != nil {
return err return err
} }
@ -271,7 +271,7 @@ func downloadToDirectory(url, dir string) error {
func stdinToDirectory(dir string) error { func stdinToDirectory(dir string) error {
logrus.Debugf("extracting stdin to %q", dir) logrus.Debugf("extracting stdin to %q", dir)
r := bufio.NewReader(os.Stdin) r := bufio.NewReader(os.Stdin)
b, err := ioutil.ReadAll(r) b, err := io.ReadAll(r)
if err != nil { if err != nil {
return fmt.Errorf("failed to read from stdin: %w", err) return fmt.Errorf("failed to read from stdin: %w", err)
} }

View File

@ -8,7 +8,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -309,7 +308,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
logrus.Debugf("layer list: %q", layers) logrus.Debugf("layer list: %q", layers)
// Make a temporary directory to hold blobs. // Make a temporary directory to hold blobs.
path, err := ioutil.TempDir(os.TempDir(), define.Package) path, err := os.MkdirTemp(os.TempDir(), define.Package)
if err != nil { if err != nil {
return nil, fmt.Errorf("creating temporary directory to hold layer blobs: %w", err) return nil, fmt.Errorf("creating temporary directory to hold layer blobs: %w", err)
} }

View File

@ -6,7 +6,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"os" "os"
"os/exec" "os/exec"
@ -153,7 +152,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
if err != nil { if err != nil {
return "", nil, err return "", nil, err
} }
data = ioutil.NopCloser(pData) data = io.NopCloser(pData)
} }
dockerfiles = append(dockerfiles, data) dockerfiles = append(dockerfiles, data)

View File

@ -5,7 +5,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"sort" "sort"
"strconv" "strconv"
@ -58,8 +57,8 @@ var builtinAllowedBuildArgs = map[string]bool{
// interface. It coordinates the entire build by using one or more // interface. It coordinates the entire build by using one or more
// StageExecutors to handle each stage of the build. // StageExecutors to handle each stage of the build.
type Executor struct { type Executor struct {
cacheFrom reference.Named cacheFrom []reference.Named
cacheTo reference.Named cacheTo []reference.Named
cacheTTL time.Duration cacheTTL time.Duration
containerSuffix string containerSuffix string
logger *logrus.Logger logger *logrus.Logger
@ -200,7 +199,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
writer := options.ReportWriter writer := options.ReportWriter
if options.Quiet { if options.Quiet {
writer = ioutil.Discard writer = io.Discard
} }
var rusageLogFile io.Writer var rusageLogFile io.Writer
@ -589,7 +588,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
stdout := b.out stdout := b.out
if b.quiet { if b.quiet {
b.out = ioutil.Discard b.out = io.Discard
} }
cleanup := func() error { cleanup := func() error {
@ -954,7 +953,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
} }
logrus.Debugf("printing final image id %q", imageID) logrus.Debugf("printing final image id %q", imageID)
if b.iidfile != "" { if b.iidfile != "" {
if err = ioutil.WriteFile(b.iidfile, []byte("sha256:"+imageID), 0644); err != nil { if err = os.WriteFile(b.iidfile, []byte("sha256:"+imageID), 0644); err != nil {
return imageID, ref, fmt.Errorf("failed to write image ID to file %q: %w", b.iidfile, err) return imageID, ref, fmt.Errorf("failed to write image ID to file %q: %w", b.iidfile, err)
} }
} else { } else {

View File

@ -1729,7 +1729,9 @@ func (s *StageExecutor) generateCacheKey(ctx context.Context, currNode *parser.N
// cacheImageReference is internal function which generates ImageReference from Named repo sources // cacheImageReference is internal function which generates ImageReference from Named repo sources
// and a tag. // and a tag.
func cacheImageReference(repo reference.Named, cachekey string) (types.ImageReference, error) { func cacheImageReferences(repos []reference.Named, cachekey string) ([]types.ImageReference, error) {
var result []types.ImageReference
for _, repo := range repos {
tagged, err := reference.WithTag(repo, cachekey) tagged, err := reference.WithTag(repo, cachekey)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed generating tagged reference for %q: %w", repo, err) return nil, fmt.Errorf("failed generating tagged reference for %q: %w", repo, err)
@ -1738,17 +1740,20 @@ func cacheImageReference(repo reference.Named, cachekey string) (types.ImageRefe
if err != nil { if err != nil {
return nil, fmt.Errorf("failed generating docker reference for %q: %w", tagged, err) return nil, fmt.Errorf("failed generating docker reference for %q: %w", tagged, err)
} }
return dest, nil result = append(result, dest)
}
return result, nil
} }
// pushCache takes the image id of intermediate image and attempts // pushCache takes the image id of intermediate image and attempts
// to perform push at the remote repository with cacheKey as the tag. // to perform push at the remote repository with cacheKey as the tag.
// Returns error if fails otherwise returns nil. // Returns error if fails otherwise returns nil.
func (s *StageExecutor) pushCache(ctx context.Context, src, cacheKey string) error { func (s *StageExecutor) pushCache(ctx context.Context, src, cacheKey string) error {
dest, err := cacheImageReference(s.executor.cacheTo, cacheKey) destList, err := cacheImageReferences(s.executor.cacheTo, cacheKey)
if err != nil { if err != nil {
return err return err
} }
for _, dest := range destList {
logrus.Debugf("trying to push cache to dest: %+v from src:%+v", dest, src) logrus.Debugf("trying to push cache to dest: %+v from src:%+v", dest, src)
options := buildah.PushOptions{ options := buildah.PushOptions{
Compression: s.executor.compression, Compression: s.executor.compression,
@ -1765,6 +1770,7 @@ func (s *StageExecutor) pushCache(ctx context.Context, src, cacheKey string) err
return fmt.Errorf("failed pushing cache to %q: %w", dest, err) return fmt.Errorf("failed pushing cache to %q: %w", dest, err)
} }
logrus.Debugf("successfully pushed cache to dest: %+v with ref:%+v and digest: %v", dest, ref, digest) logrus.Debugf("successfully pushed cache to dest: %+v with ref:%+v and digest: %v", dest, ref, digest)
}
return nil return nil
} }
@ -1775,10 +1781,11 @@ func (s *StageExecutor) pushCache(ctx context.Context, src, cacheKey string) err
// image was pulled function returns image id otherwise returns empty // image was pulled function returns image id otherwise returns empty
// string "" or error if any error was encontered while pulling the cache. // string "" or error if any error was encontered while pulling the cache.
func (s *StageExecutor) pullCache(ctx context.Context, cacheKey string) (string, error) { func (s *StageExecutor) pullCache(ctx context.Context, cacheKey string) (string, error) {
src, err := cacheImageReference(s.executor.cacheFrom, cacheKey) srcList, err := cacheImageReferences(s.executor.cacheFrom, cacheKey)
if err != nil { if err != nil {
return "", err return "", err
} }
for _, src := range srcList {
logrus.Debugf("trying to pull cache from remote repo: %+v", src.DockerReference()) logrus.Debugf("trying to pull cache from remote repo: %+v", src.DockerReference())
options := buildah.PullOptions{ options := buildah.PullOptions{
SignaturePolicyPath: s.executor.signaturePolicyPath, SignaturePolicyPath: s.executor.signaturePolicyPath,
@ -1794,10 +1801,13 @@ func (s *StageExecutor) pullCache(ctx context.Context, cacheKey string) (string,
id, err := buildah.Pull(ctx, src.DockerReference().String(), options) id, err := buildah.Pull(ctx, src.DockerReference().String(), options)
if err != nil { if err != nil {
logrus.Debugf("failed pulling cache from source %s: %v", src, err) logrus.Debugf("failed pulling cache from source %s: %v", src, err)
return "", fmt.Errorf("failed while pulling cache from %q: %w", src, err) continue // failed pulling this one try next
//return "", fmt.Errorf("failed while pulling cache from %q: %w", src, err)
} }
logrus.Debugf("successfully pulled cache from repo %s: %s", src, id) logrus.Debugf("successfully pulled cache from repo %s: %s", src, id)
return id, nil return id, nil
}
return "", fmt.Errorf("failed pulling cache from all available sources %q", srcList)
} }
// intermediateImageExists returns true if an intermediate image of currNode exists in the image store from a previous build. // intermediateImageExists returns true if an intermediate image of currNode exists in the image store from a previous build.

View File

@ -188,8 +188,8 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st
// GetCacheMount parses a single cache mount entry from the --mount flag. // GetCacheMount parses a single cache mount entry from the --mount flag.
// //
// If this function succeeds and returns a non-nil lockfile.Locker, the caller must unlock it (when??). // If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??).
func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails) (specs.Mount, lockfile.Locker, error) { func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails) (specs.Mount, *lockfile.LockFile, error) {
var err error var err error
var mode uint64 var mode uint64
var buildahLockFilesDir string var buildahLockFilesDir string
@ -364,7 +364,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
} }
} }
var targetLock lockfile.Locker // = nil var targetLock *lockfile.LockFile // = nil
succeeded := false succeeded := false
defer func() { defer func() {
if !succeeded && targetLock != nil { if !succeeded && targetLock != nil {
@ -374,7 +374,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
switch sharing { switch sharing {
case "locked": case "locked":
// lock parent cache // lock parent cache
lockfile, err := lockfile.GetLockfile(filepath.Join(buildahLockFilesDir, BuildahCacheLockfile)) lockfile, err := lockfile.GetLockFile(filepath.Join(buildahLockFilesDir, BuildahCacheLockfile))
if err != nil { if err != nil {
return newMount, nil, fmt.Errorf("unable to acquire lock when sharing mode is locked: %w", err) return newMount, nil, fmt.Errorf("unable to acquire lock when sharing mode is locked: %w", err)
} }
@ -497,7 +497,7 @@ func Volume(volume string) (specs.Mount, error) {
} }
// UnlockLockArray is a helper for cleaning up after GetVolumes and the like. // UnlockLockArray is a helper for cleaning up after GetVolumes and the like.
func UnlockLockArray(locks []lockfile.Locker) { func UnlockLockArray(locks []*lockfile.LockFile) {
for _, lock := range locks { for _, lock := range locks {
lock.Unlock() lock.Unlock()
} }
@ -505,8 +505,8 @@ func UnlockLockArray(locks []lockfile.Locker) {
// GetVolumes gets the volumes from --volume and --mount // GetVolumes gets the volumes from --volume and --mount
// //
// If this function succeeds, the caller must unlock the returned lockfile.Lockers if any (when??). // If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??).
func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, mounts []string, contextDir string) ([]specs.Mount, []string, []lockfile.Locker, error) { func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, mounts []string, contextDir string) ([]specs.Mount, []string, []*lockfile.LockFile, error) {
unifiedMounts, mountedImages, targetLocks, err := getMounts(ctx, store, mounts, contextDir) unifiedMounts, mountedImages, targetLocks, err := getMounts(ctx, store, mounts, contextDir)
if err != nil { if err != nil {
return nil, mountedImages, nil, err return nil, mountedImages, nil, err
@ -541,13 +541,13 @@ func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string,
// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ... // buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ...
// buildah run --mount type=tmpfs,target=/dev/shm ... // buildah run --mount type=tmpfs,target=/dev/shm ...
// //
// If this function succeeds, the caller must unlock the returned lockfile.Lockers if any (when??). // If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??).
func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string) (map[string]specs.Mount, []string, []lockfile.Locker, error) { func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string) (map[string]specs.Mount, []string, []*lockfile.LockFile, error) {
// If `type` is not set default to "bind" // If `type` is not set default to "bind"
mountType := define.TypeBind mountType := define.TypeBind
finalMounts := make(map[string]specs.Mount) finalMounts := make(map[string]specs.Mount)
mountedImages := make([]string, 0) mountedImages := make([]string, 0)
targetLocks := make([]lockfile.Locker, 0) targetLocks := make([]*lockfile.LockFile, 0)
succeeded := false succeeded := false
defer func() { defer func() {
if !succeeded { if !succeeded {

View File

@ -8,7 +8,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -301,18 +300,18 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
iopts.Quiet = true iopts.Quiet = true
} }
} }
var cacheTo reference.Named var cacheTo []reference.Named
var cacheFrom reference.Named var cacheFrom []reference.Named
cacheTo = nil cacheTo = nil
cacheFrom = nil cacheFrom = nil
if c.Flag("cache-to").Changed { if c.Flag("cache-to").Changed {
cacheTo, err = parse.RepoNameToNamedReference(iopts.CacheTo) cacheTo, err = parse.RepoNamesToNamedReferences(iopts.CacheTo)
if err != nil { if err != nil {
return options, nil, nil, fmt.Errorf("unable to parse value provided `%s` to --cache-to: %w", iopts.CacheTo, err) return options, nil, nil, fmt.Errorf("unable to parse value provided `%s` to --cache-to: %w", iopts.CacheTo, err)
} }
} }
if c.Flag("cache-from").Changed { if c.Flag("cache-from").Changed {
cacheFrom, err = parse.RepoNameToNamedReference(iopts.CacheFrom) cacheFrom, err = parse.RepoNamesToNamedReferences(iopts.CacheFrom)
if err != nil { if err != nil {
return options, nil, nil, fmt.Errorf("unable to parse value provided `%s` to --cache-from: %w", iopts.CacheTo, err) return options, nil, nil, fmt.Errorf("unable to parse value provided `%s` to --cache-from: %w", iopts.CacheTo, err)
} }
@ -423,7 +422,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
UnsetEnvs: iopts.UnsetEnvs, UnsetEnvs: iopts.UnsetEnvs,
} }
if iopts.Quiet { if iopts.Quiet {
options.ReportWriter = ioutil.Discard options.ReportWriter = io.Discard
} }
return options, containerfiles, removeAll, nil return options, containerfiles, removeAll, nil
} }

View File

@ -53,8 +53,8 @@ type BudResults struct {
Authfile string Authfile string
BuildArg []string BuildArg []string
BuildContext []string BuildContext []string
CacheFrom string CacheFrom []string
CacheTo string CacheTo []string
CacheTTL string CacheTTL string
CertDir string CertDir string
Compress bool Compress bool
@ -202,8 +202,8 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.StringArrayVar(&flags.OCIHooksDir, "hooks-dir", []string{}, "set the OCI hooks directory path (may be set multiple times)") fs.StringArrayVar(&flags.OCIHooksDir, "hooks-dir", []string{}, "set the OCI hooks directory path (may be set multiple times)")
fs.StringArrayVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder") fs.StringArrayVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder")
fs.StringArrayVar(&flags.BuildContext, "build-context", []string{}, "`argument=value` to supply additional build context to the builder") fs.StringArrayVar(&flags.BuildContext, "build-context", []string{}, "`argument=value` to supply additional build context to the builder")
fs.StringVar(&flags.CacheFrom, "cache-from", "", "remote repository to utilise as potential cache source.") fs.StringArrayVar(&flags.CacheFrom, "cache-from", []string{}, "remote repository list to utilise as potential cache source.")
fs.StringVar(&flags.CacheTo, "cache-to", "", "remote repository to utilise as potential cache destination.") fs.StringArrayVar(&flags.CacheTo, "cache-to", []string{}, "remote repository list to utilise as potential cache destination.")
fs.StringVar(&flags.CacheTTL, "cache-ttl", "", "only consider cache images under specified duration.") fs.StringVar(&flags.CacheTTL, "cache-ttl", "", "only consider cache images under specified duration.")
fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry") fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry")
fs.BoolVar(&flags.Compress, "compress", false, "this is a legacy option, which has no effect on the image") fs.BoolVar(&flags.Compress, "compress", false, "this is a legacy option, which has no effect on the image")

View File

@ -2,7 +2,6 @@ package overlay
import ( import (
"fmt" "fmt"
"io/ioutil"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
@ -60,7 +59,7 @@ func TempDir(containerDir string, rootUID, rootGID int) (string, error) {
return "", fmt.Errorf("failed to create the overlay %s directory: %w", contentDir, err) return "", fmt.Errorf("failed to create the overlay %s directory: %w", contentDir, err)
} }
contentDir, err := ioutil.TempDir(contentDir, "") contentDir, err := os.MkdirTemp(contentDir, "")
if err != nil { if err != nil {
return "", fmt.Errorf("failed to create the overlay tmpdir in %s directory: %w", contentDir, err) return "", fmt.Errorf("failed to create the overlay tmpdir in %s directory: %w", contentDir, err)
} }
@ -291,7 +290,7 @@ func CleanupMount(contentDir string) (Err error) {
func CleanupContent(containerDir string) (Err error) { func CleanupContent(containerDir string) (Err error) {
contentDir := filepath.Join(containerDir, "overlay") contentDir := filepath.Join(containerDir, "overlay")
files, err := ioutil.ReadDir(contentDir) files, err := os.ReadDir(contentDir)
if err != nil { if err != nil {
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {
return nil return nil

View File

@ -16,6 +16,7 @@ import (
"github.com/containerd/containerd/platforms" "github.com/containerd/containerd/platforms"
"github.com/containers/buildah/define" "github.com/containers/buildah/define"
securejoin "github.com/cyphar/filepath-securejoin"
internalParse "github.com/containers/buildah/internal/parse" internalParse "github.com/containers/buildah/internal/parse"
"github.com/containers/buildah/pkg/sshagent" "github.com/containers/buildah/pkg/sshagent"
"github.com/containers/common/pkg/config" "github.com/containers/common/pkg/config"
@ -50,8 +51,10 @@ const (
BuildahCacheDir = "buildah-cache" BuildahCacheDir = "buildah-cache"
) )
// RepoNameToNamedReference parse the raw string to Named reference // RepoNamesToNamedReferences parse the raw string to Named reference
func RepoNameToNamedReference(dest string) (reference.Named, error) { func RepoNamesToNamedReferences(destList []string) ([]reference.Named, error) {
var result []reference.Named
for _, dest := range destList {
named, err := reference.ParseNormalizedNamed(dest) named, err := reference.ParseNormalizedNamed(dest)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid repo %q: must contain registry and repository: %w", dest, err) return nil, fmt.Errorf("invalid repo %q: must contain registry and repository: %w", dest, err)
@ -59,7 +62,9 @@ func RepoNameToNamedReference(dest string) (reference.Named, error) {
if !reference.IsNameOnly(named) { if !reference.IsNameOnly(named) {
return nil, fmt.Errorf("repository must contain neither a tag nor digest: %v", named) return nil, fmt.Errorf("repository must contain neither a tag nor digest: %v", named)
} }
return named, nil result = append(result, named)
}
return result, nil
} }
// CommonBuildOptions parses the build options from the bud cli // CommonBuildOptions parses the build options from the bud cli
@ -1103,10 +1108,16 @@ func ContainerIgnoreFile(contextDir, path string, containerFiles []string) ([]st
return excludes, containerfileIgnore, err return excludes, containerfileIgnore, err
} }
} }
path = filepath.Join(contextDir, ".containerignore") path, symlinkErr := securejoin.SecureJoin(contextDir, ".containerignore")
if symlinkErr != nil {
return nil, "", symlinkErr
}
excludes, err := imagebuilder.ParseIgnore(path) excludes, err := imagebuilder.ParseIgnore(path)
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {
path = filepath.Join(contextDir, ".dockerignore") path, symlinkErr = securejoin.SecureJoin(contextDir, ".dockerignore")
if symlinkErr != nil {
return nil, "", symlinkErr
}
excludes, err = imagebuilder.ParseIgnore(path) excludes, err = imagebuilder.ParseIgnore(path)
} }
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {

View File

@ -4,7 +4,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net" "net"
"os" "os"
"path/filepath" "path/filepath"
@ -80,7 +79,7 @@ func (a *AgentServer) Serve(processLabel string) (string, error) {
if err != nil { if err != nil {
return "", err return "", err
} }
serveDir, err := ioutil.TempDir("", ".buildah-ssh-sock") serveDir, err := os.MkdirTemp("", ".buildah-ssh-sock")
if err != nil { if err != nil {
return "", err return "", err
} }
@ -223,7 +222,7 @@ func NewSource(paths []string) (*Source, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
dt, err := ioutil.ReadAll(&io.LimitedReader{R: f, N: 100 * 1024}) dt, err := io.ReadAll(&io.LimitedReader{R: f, N: 100 * 1024})
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -3,12 +3,12 @@ package util
import ( import (
"bytes" "bytes"
"errors" "errors"
"io/ioutil"
"time" "time"
"os"
) )
func ReadUptime() (time.Duration, error) { func ReadUptime() (time.Duration, error) {
buf, err := ioutil.ReadFile("/proc/uptime") buf, err := os.ReadFile("/proc/uptime")
if err != nil { if err != nil {
return 0, err return 0, err
} }

View File

@ -2,7 +2,6 @@ package util
import ( import (
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -21,12 +20,12 @@ func MirrorToTempFileIfPathIsDescriptor(file string) (string, bool) {
if !strings.HasPrefix(file, "/dev/fd") { if !strings.HasPrefix(file, "/dev/fd") {
return file, false return file, false
} }
b, err := ioutil.ReadFile(file) b, err := os.ReadFile(file)
if err != nil { if err != nil {
// if anything goes wrong return original path // if anything goes wrong return original path
return file, false return file, false
} }
tmpfile, err := ioutil.TempFile(os.TempDir(), "buildah-temp-file") tmpfile, err := os.CreateTemp(os.TempDir(), "buildah-temp-file")
if err != nil { if err != nil {
return file, false return file, false
} }

View File

@ -178,7 +178,7 @@ type runMountArtifacts struct {
// SSHAuthSock is the path to the ssh auth sock inside the container // SSHAuthSock is the path to the ssh auth sock inside the container
SSHAuthSock string SSHAuthSock string
// TargetLocks to be unlocked if there are any. // TargetLocks to be unlocked if there are any.
TargetLocks []lockfile.Locker TargetLocks []*lockfile.LockFile
} }
// RunMountInfo are the available run mounts for this run // RunMountInfo are the available run mounts for this run

View File

@ -9,7 +9,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net" "net"
"os" "os"
"os/exec" "os/exec"
@ -556,7 +555,7 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [
}() }()
// Make sure we read the container's exit status when it exits. // Make sure we read the container's exit status when it exits.
pidValue, err := ioutil.ReadFile(pidFile) pidValue, err := os.ReadFile(pidFile)
if err != nil { if err != nil {
return 1, err return 1, err
} }
@ -1185,7 +1184,7 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run
logrus.Errorf("did not get container create message from subprocess: %v", err) logrus.Errorf("did not get container create message from subprocess: %v", err)
} else { } else {
pidFile := filepath.Join(bundlePath, "pid") pidFile := filepath.Join(bundlePath, "pid")
pidValue, err := ioutil.ReadFile(pidFile) pidValue, err := os.ReadFile(pidFile)
if err != nil { if err != nil {
return err return err
} }
@ -1199,7 +1198,7 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run
defer teardown() defer teardown()
} }
if err != nil { if err != nil {
return err return fmt.Errorf("setup network: %w", err)
} }
// only add hosts if we manage the hosts file // only add hosts if we manage the hosts file
@ -1464,7 +1463,7 @@ func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMap
agents := make([]*sshagent.AgentServer, 0, len(mounts)) agents := make([]*sshagent.AgentServer, 0, len(mounts))
sshCount := 0 sshCount := 0
defaultSSHSock := "" defaultSSHSock := ""
targetLocks := []lockfile.Locker{} targetLocks := []*lockfile.LockFile{}
succeeded := false succeeded := false
defer func() { defer func() {
if !succeeded { if !succeeded {
@ -1655,7 +1654,7 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr
switch secr.SourceType { switch secr.SourceType {
case "env": case "env":
data = []byte(os.Getenv(secr.Source)) data = []byte(os.Getenv(secr.Source))
tmpFile, err := ioutil.TempFile(define.TempDir, "buildah*") tmpFile, err := os.CreateTemp(define.TempDir, "buildah*")
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }
@ -1666,7 +1665,7 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }
data, err = ioutil.ReadFile(secr.Source) data, err = os.ReadFile(secr.Source)
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }
@ -1680,7 +1679,7 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr
if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0755); err != nil { if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0755); err != nil {
return nil, "", err return nil, "", err
} }
if err := ioutil.WriteFile(ctrFileOnHost, data, 0644); err != nil { if err := os.WriteFile(ctrFileOnHost, data, 0644); err != nil {
return nil, "", err return nil, "", err
} }

View File

@ -6,7 +6,6 @@ package buildah
import ( import (
"errors" "errors"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
@ -72,7 +71,7 @@ func setChildProcess() error {
} }
func (b *Builder) Run(command []string, options RunOptions) error { func (b *Builder) Run(command []string, options RunOptions) error {
p, err := ioutil.TempDir("", Package) p, err := os.MkdirTemp("", Package)
if err != nil { if err != nil {
return err return err
} }
@ -305,8 +304,8 @@ func setupSpecialMountSpecChanges(spec *spec.Spec, shmSize string) ([]specs.Moun
return spec.Mounts, nil return spec.Mounts, nil
} }
// If this function succeeds and returns a non-nil lockfile.Locker, the caller must unlock it (when??). // If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??).
func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps) (*spec.Mount, lockfile.Locker, error) { func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps) (*spec.Mount, *lockfile.LockFile, error) {
return nil, nil, errors.New("cache mounts not supported on freebsd") return nil, nil, errors.New("cache mounts not supported on freebsd")
} }

View File

@ -7,7 +7,6 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
@ -70,7 +69,7 @@ func setChildProcess() error {
// Run runs the specified command in the container's root filesystem. // Run runs the specified command in the container's root filesystem.
func (b *Builder) Run(command []string, options RunOptions) error { func (b *Builder) Run(command []string, options RunOptions) error {
p, err := ioutil.TempDir("", define.Package) p, err := os.MkdirTemp("", define.Package)
if err != nil { if err != nil {
return err return err
} }
@ -480,7 +479,7 @@ func setupRootlessNetwork(pid int) (teardown func(), err error) {
defer rootlessSlirpSyncR.Close() defer rootlessSlirpSyncR.Close()
// Be sure there are no fds inherited to slirp4netns except the sync pipe // Be sure there are no fds inherited to slirp4netns except the sync pipe
files, err := ioutil.ReadDir("/proc/self/fd") files, err := os.ReadDir("/proc/self/fd")
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot list open fds: %w", err) return nil, fmt.Errorf("cannot list open fds: %w", err)
} }
@ -1199,8 +1198,8 @@ func checkIdsGreaterThan5(ids []spec.LinuxIDMapping) bool {
return false return false
} }
// If this function succeeds and returns a non-nil lockfile.Locker, the caller must unlock it (when??). // If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??).
func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps) (*spec.Mount, lockfile.Locker, error) { func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps) (*spec.Mount, *lockfile.LockFile, error) {
var optionMounts []specs.Mount var optionMounts []specs.Mount
mount, targetLock, err := internalParse.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints) mount, targetLock, err := internalParse.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints)
if err != nil { if err != nil {

View File

@ -5,7 +5,7 @@ package buildah
import ( import (
"fmt" "fmt"
"io/ioutil" "os"
"github.com/containers/common/pkg/seccomp" "github.com/containers/common/pkg/seccomp"
"github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-spec/specs-go"
@ -22,7 +22,7 @@ func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
} }
spec.Linux.Seccomp = seccompConfig spec.Linux.Seccomp = seccompConfig
default: default:
seccompProfile, err := ioutil.ReadFile(seccompProfilePath) seccompProfile, err := os.ReadFile(seccompProfilePath)
if err != nil { if err != nil {
return fmt.Errorf("opening seccomp profile failed: %w", err) return fmt.Errorf("opening seccomp profile failed: %w", err)
} }

View File

@ -441,7 +441,14 @@ func (m byDestination) Len() int {
} }
func (m byDestination) Less(i, j int) bool { func (m byDestination) Less(i, j int) bool {
return m.parts(i) < m.parts(j) iparts, jparts := m.parts(i), m.parts(j)
switch {
case iparts < jparts:
return true
case iparts > jparts:
return false
}
return filepath.Clean(m[i].Destination) < filepath.Clean(m[j].Destination)
} }
func (m byDestination) Swap(i, j int) { func (m byDestination) Swap(i, j int) {
@ -453,7 +460,7 @@ func (m byDestination) parts(i int) int {
} }
func SortMounts(m []specs.Mount) []specs.Mount { func SortMounts(m []specs.Mount) []specs.Mount {
sort.Sort(byDestination(m)) sort.Stable(byDestination(m))
return m return m
} }

2
vendor/modules.txt vendored
View File

@ -95,7 +95,7 @@ github.com/containernetworking/cni/pkg/version
# github.com/containernetworking/plugins v1.1.1 # github.com/containernetworking/plugins v1.1.1
## explicit; go 1.17 ## explicit; go 1.17
github.com/containernetworking/plugins/pkg/ns github.com/containernetworking/plugins/pkg/ns
# github.com/containers/buildah v1.28.1-0.20221130132810-cf661299d14f # github.com/containers/buildah v1.28.1-0.20221219201600-ca578b290144
## explicit; go 1.17 ## explicit; go 1.17
github.com/containers/buildah github.com/containers/buildah
github.com/containers/buildah/bind github.com/containers/buildah/bind