vendor: update common and buildah

vendor the following dependencies:

- https://github.com/containers/common/pull/2375
- https://github.com/containers/buildah/pull/6074

Closes: https://github.com/containers/podman/issues/25634

Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
This commit is contained in:
Giuseppe Scrivano
2025-03-20 11:57:58 +01:00
parent 94e77af09d
commit 260035d069
49 changed files with 566 additions and 639 deletions

View File

@ -6,7 +6,7 @@ env:
#### Global variables used for all tasks
####
# Name of the ultimate destination branch for this CI run, PR or post-merge.
DEST_BRANCH: "release-1.39"
DEST_BRANCH: "main"
GOPATH: "/var/tmp/go"
GOSRC: "${GOPATH}/src/github.com/containers/buildah"
GOCACHE: "/tmp/go-build"
@ -33,7 +33,7 @@ env:
DEBIAN_NAME: "debian-13"
# Image identifiers
IMAGE_SUFFIX: "c20250107t132430z-f41f40d13"
IMAGE_SUFFIX: "c20250131t121915z-f41f40d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
@ -50,7 +50,7 @@ env:
gcp_credentials: ENCRYPTED[ae0bf7370f0b6e446bc61d0865a2c55d3e166b3fab9466eb0393e38e1c66a31ca4c71ddc7e0139d47d075c36dd6d3fd7]
# Default timeout for each task
timeout_in: 120m
timeout_in: 30m
# Default VM to use unless set or modified by task
gce_instance: &standardvm
@ -95,12 +95,12 @@ smoke_task:
gce_instance:
memory: "12G"
cpu: 4
cpu: 8
# Don't bother running on branches (including cron), or for tags.
skip: $CIRRUS_PR == ''
timeout_in: 30m
timeout_in: 10m
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}'
@ -122,7 +122,7 @@ vendor_task:
# Runs within Cirrus's "community cluster"
container:
image: docker.io/library/golang:1.22
image: docker.io/library/golang:1.23
cpu: 1
memory: 1
@ -154,6 +154,8 @@ cross_build_task:
unit_task:
name: 'Unit tests w/ $STORAGE_DRIVER'
gce_instance:
cpu: 4
alias: unit
skip: &not_build_docs >-
$CIRRUS_CHANGE_TITLE =~ '.*CI:DOCS.*' ||
@ -162,8 +164,6 @@ unit_task:
- smoke
- vendor
timeout_in: 90m
matrix:
- env:
STORAGE_DRIVER: 'vfs'
@ -181,10 +181,9 @@ conformance_task:
depends_on: *smoke_vendor
gce_instance:
cpu: 4
image_name: "${DEBIAN_CACHE_IMAGE_NAME}"
timeout_in: 65m
matrix:
- env:
STORAGE_DRIVER: 'vfs'
@ -208,7 +207,7 @@ integration_task:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
# Disabled until we update to f40/41 as f39 does not have go 1.22
# Disabled until we update to f41/42 as f40 does not have go 1.22
# - env:
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
@ -222,7 +221,7 @@ integration_task:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
# Disabled until we update to f40/41 as f39 does not have go 1.22
# Disabled until we update to f41/42 as f40 does not have go 1.22
# - env:
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
@ -234,7 +233,8 @@ integration_task:
gce_instance:
image_name: "$IMAGE_NAME"
cpu: 4
cpu: 8
memory: "8G"
# Separate scripts for separate outputs, makes debugging easier.
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
@ -282,7 +282,8 @@ integration_rootless_task:
gce_instance:
image_name: "$IMAGE_NAME"
cpu: 4
cpu: 8
memory: "8G"
# Separate scripts for separate outputs, makes debugging easier.
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
@ -302,7 +303,8 @@ in_podman_task:
depends_on: *smoke_vendor
gce_instance:
cpu: 4
cpu: 8
memory: "8G"
env:
# This is key, cause the scripts to re-execute themselves inside a container.

View File

@ -5,6 +5,27 @@
downstream_package_name: buildah
upstream_tag_template: v{version}
# These files get synced from upstream to downstream (Fedora / CentOS Stream) on every
# propose-downstream job. This is done so tests maintained upstream can be run
# downstream in Zuul CI and Bodhi.
# Ref: https://packit.dev/docs/configuration#files_to_sync
files_to_sync:
- src: rpm/gating.yaml
dest: gating.yaml
delete: true
- src: plans/
dest: plans/
delete: true
mkpath: true
- src: tests/tmt/
dest: tests/tmt/
delete: true
mkpath: true
- src: .fmf/
dest: .fmf/
delete: true
- .packit.yaml
packages:
buildah-fedora:
pkg_tool: fedpkg
@ -25,9 +46,13 @@ jobs:
notifications: &copr_build_failure_notification
failure_comment:
message: "Ephemeral COPR build failed. @containers/packit-build please check."
targets:
- fedora-all-x86_64
- fedora-all-aarch64
targets: &fedora_copr_targets
# f40 ships go 1.22 and we require go 1.23 now. This should be revert to fedora-all
# once either f40 is rebased to go 1.23 or f42 is released and f40 EOL.
- fedora-latest-stable-x86_64
- fedora-latest-stable-aarch64
- fedora-development-x86_64
- fedora-development-aarch64
enable_net: true
- job: copr_build
@ -47,7 +72,7 @@ jobs:
trigger: pull_request
packages: [buildah-centos]
notifications: *copr_build_failure_notification
targets:
targets: &centos_copr_targets
- centos-stream-9-x86_64
- centos-stream-9-aarch64
- centos-stream-10-x86_64
@ -66,6 +91,33 @@ jobs:
project: podman-next
enable_net: true
# Tests on Fedora for main branch PRs
- job: tests
trigger: pull_request
packages: [buildah-fedora]
targets: &fedora_copr_test_targets
# See the other comment above, this should be reverted to fedora-all when possible.
- fedora-latest-stable-x86_64
- fedora-development-x86_64
tf_extra_params:
environments:
- artifacts:
- type: repository-file
id: https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/repo/fedora-$releasever/rhcontainerbot-podman-next-fedora-$releasever.repo
# Tests on CentOS Stream for main branch PRs
- job: tests
trigger: pull_request
packages: [buildah-centos]
targets: &centos_copr_test_targets
- centos-stream-9-x86_64
- centos-stream-10-x86_64
tf_extra_params:
environments:
- artifacts:
- type: repository-file
id: https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/repo/centos-stream-$releasever/rhcontainerbot-podman-next-centos-stream-$releasever.repo
# Sync to Fedora
- job: propose_downstream
trigger: release

View File

@ -2,15 +2,6 @@
# Changelog
## v1.39.2 (2025-03-03)
[release-1.39] Bump c/image to v5.34.1, c/common v0.62.1
## v1.39.1 (2025-02-25)
chroot createPlatformContainer: use MS_REMOUNT
chore(deps): update module github.com/go-jose/go-jose/v4 to v4.0.5 [security]
## v1.39.0 (2025-01-31)
Bump c/storage v1.57.1, c/image 5.34.0, c/common v0.62.0

View File

@ -1,3 +1,3 @@
## The Buildah Project Community Code of Conduct
The Buildah Project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).
The Buildah Project, as part of Podman Container Tools, follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).

View File

@ -22,9 +22,10 @@ STRIP ?= strip
GO := go
GO_LDFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-ldflags"; fi)
GO_GCFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-gcflags"; fi)
NPROCS := $(shell nproc)
export GO_BUILD=$(GO) build
export GO_TEST=$(GO) test
RACEFLAGS := $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race)
export GO_TEST=$(GO) test -parallel=$(NPROCS)
RACEFLAGS ?= $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race)
COMMIT_NO ?= $(shell git rev-parse HEAD 2> /dev/null || true)
GIT_COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),${COMMIT_NO}-dirty,${COMMIT_NO})

View File

@ -12,6 +12,7 @@ import (
"os"
"path"
"path/filepath"
"slices"
"strconv"
"strings"
"sync"
@ -94,6 +95,8 @@ type AddAndCopyOptions struct {
// RetryDelay is how long to wait before retrying attempts to retrieve
// remote contents.
RetryDelay time.Duration
// Parents preserve parent directories of source content
Parents bool
}
// gitURLFragmentSuffix matches fragments to use as Git reference and build
@ -263,6 +266,25 @@ func globbedToGlobbable(glob string) string {
return result
}
// getParentsPrefixToRemoveAndParentsToSkip gets from the pattern the prefix before the "pivot point",
// the location in the source path marked by the path component named "."
// (i.e. where "/./" occurs in the path). And list of parents to skip.
// In case "/./" is not present is returned "/".
func getParentsPrefixToRemoveAndParentsToSkip(pattern string, contextDir string) (string, []string) {
prefix, _, found := strings.Cut(strings.TrimPrefix(pattern, contextDir), "/./")
if !found {
return string(filepath.Separator), []string{}
}
prefix = strings.TrimPrefix(filepath.Clean(string(filepath.Separator)+prefix), string(filepath.Separator))
out := []string{}
parentPath := prefix
for parentPath != "/" && parentPath != "." {
out = append(out, parentPath)
parentPath = filepath.Dir(parentPath)
}
return prefix, out
}
// Add copies the contents of the specified sources into the container's root
// filesystem, optionally extracting contents of local files that look like
// non-empty archives.
@ -476,7 +498,6 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
if err := copier.Mkdir(mountPoint, extractDirectory, mkdirOptions); err != nil {
return fmt.Errorf("ensuring target directory exists: %w", err)
}
// Copy each source in turn.
for _, src := range sources {
var multiErr *multierror.Error
@ -587,7 +608,6 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
if localSourceStat == nil {
continue
}
// Iterate through every item that matched the glob.
itemsCopied := 0
for _, globbed := range localSourceStat.Globbed {
@ -640,6 +660,25 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
return false, false, nil
})
}
if options.Parents {
parentsPrefixToRemove, parentsToSkip := getParentsPrefixToRemoveAndParentsToSkip(src, options.ContextDir)
writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) {
if slices.Contains(parentsToSkip, hdr.Name) && hdr.Typeflag == tar.TypeDir {
return true, false, nil
}
hdr.Name = strings.TrimPrefix(hdr.Name, parentsPrefixToRemove)
hdr.Name = strings.TrimPrefix(hdr.Name, "/")
if hdr.Typeflag == tar.TypeLink {
hdr.Linkname = strings.TrimPrefix(hdr.Linkname, parentsPrefixToRemove)
hdr.Linkname = strings.TrimPrefix(hdr.Linkname, "/")
}
if hdr.Name == "" {
return true, false, nil
}
return false, false, nil
})
}
writer = newTarFilterer(writer, func(_ *tar.Header) (bool, bool, io.Reader) {
itemsCopied++
return false, false, nil
@ -656,6 +695,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
StripSetuidBit: options.StripSetuidBit,
StripSetgidBit: options.StripSetgidBit,
StripStickyBit: options.StripStickyBit,
Parents: options.Parents,
}
getErr = copier.Get(contextDir, contextDir, getOptions, []string{globbedToGlobbable(globbed)}, writer)
closeErr = writer.Close()

View File

@ -1,11 +1,3 @@
- Changelog for v1.39.2 (2025-03-03)
* [release-1.39] Bump c/image to v5.34.1, c/common v0.62.1
- Changelog for v1.39.1 (2025-02-25)
* chroot createPlatformContainer: use MS_REMOUNT
* chore(deps): update module github.com/go-jose/go-jose/v4 to v4.0.5 [security]
- Changelog for v1.39.0 (2025-01-31)
* Bump c/storage v1.57.1, c/image 5.34.0, c/common v0.62.0
* Update module github.com/containers/storage to v1.57.0

View File

@ -6,11 +6,11 @@ import (
"fmt"
"maps"
"os"
"runtime"
"slices"
"strings"
"time"
"github.com/containerd/platforms"
"github.com/containers/buildah/define"
"github.com/containers/buildah/docker"
internalUtil "github.com/containers/buildah/internal/util"
@ -137,18 +137,19 @@ func (b *Builder) fixupConfig(sys *types.SystemContext) {
if b.OCIv1.Created == nil || b.OCIv1.Created.IsZero() {
b.OCIv1.Created = &now
}
currentPlatformSpecification := platforms.DefaultSpec()
if b.OS() == "" {
if sys != nil && sys.OSChoice != "" {
b.SetOS(sys.OSChoice)
} else {
b.SetOS(runtime.GOOS)
b.SetOS(currentPlatformSpecification.OS)
}
}
if b.Architecture() == "" {
if sys != nil && sys.ArchitectureChoice != "" {
b.SetArchitecture(sys.ArchitectureChoice)
} else {
b.SetArchitecture(runtime.GOARCH)
b.SetArchitecture(currentPlatformSpecification.Architecture)
}
// in case the arch string we started with was shorthand for a known arch+variant pair, normalize it
ps := internalUtil.NormalizePlatform(ociv1.Platform{OS: b.OS(), Architecture: b.Architecture(), Variant: b.Variant()})
@ -158,6 +159,8 @@ func (b *Builder) fixupConfig(sys *types.SystemContext) {
if b.Variant() == "" {
if sys != nil && sys.VariantChoice != "" {
b.SetVariant(sys.VariantChoice)
} else {
b.SetVariant(currentPlatformSpecification.Variant)
}
// in case the arch string we started with was shorthand for a known arch+variant pair, normalize it
ps := internalUtil.NormalizePlatform(ociv1.Platform{OS: b.OS(), Architecture: b.Architecture(), Variant: b.Variant()})

View File

@ -13,6 +13,7 @@ import (
"os/user"
"path"
"path/filepath"
"slices"
"strconv"
"strings"
"sync"
@ -350,6 +351,7 @@ type GetOptions struct {
ChmodDirs *os.FileMode // set permissions on directories. no effect on archives being extracted
ChownFiles *idtools.IDPair // set ownership of files. no effect on archives being extracted
ChmodFiles *os.FileMode // set permissions on files. no effect on archives being extracted
Parents bool // maintain the sources parent directory in the destination
StripSetuidBit bool // strip the setuid bit off of items being copied. no effect on archives being extracted
StripSetgidBit bool // strip the setgid bit off of items being copied. no effect on archives being extracted
StripStickyBit bool // strip the sticky bit off of items being copied. no effect on archives being extracted
@ -1182,6 +1184,49 @@ func errorIsPermission(err error) bool {
return errors.Is(err, os.ErrPermission) || strings.Contains(err.Error(), "permission denied")
}
func getParents(path string, stopPath string) []string {
out := []string{}
for path != "/" && path != "." && path != stopPath {
path = filepath.Dir(path)
if path == stopPath {
continue
}
out = append(out, path)
}
slices.Reverse(out)
return out
}
func checkLinks(item string, req request, info os.FileInfo) (string, os.FileInfo, error) {
// chase links. if we hit a dead end, we should just fail
oldItem := item
followedLinks := 0
const maxFollowedLinks = 16
for !req.GetOptions.NoDerefSymlinks && info.Mode()&os.ModeType == os.ModeSymlink && followedLinks < maxFollowedLinks {
path, err := os.Readlink(item)
if err != nil {
continue
}
if filepath.IsAbs(path) || looksLikeAbs(path) {
path = filepath.Join(req.Root, path)
} else {
path = filepath.Join(filepath.Dir(item), path)
}
item = path
if _, err = convertToRelSubdirectory(req.Root, item); err != nil {
return "", nil, fmt.Errorf("copier: get: computing path of %q(%q) relative to %q: %w", oldItem, item, req.Root, err)
}
if info, err = os.Lstat(item); err != nil {
return "", nil, fmt.Errorf("copier: get: lstat %q(%q): %w", oldItem, item, err)
}
followedLinks++
}
if followedLinks >= maxFollowedLinks {
return "", nil, fmt.Errorf("copier: get: resolving symlink %q(%q): %w", oldItem, item, syscall.ELOOP)
}
return item, info, nil
}
func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMatcher, idMappings *idtools.IDMappings) (*response, func() error, error) {
statRequest := req
statRequest.Request = requestStat
@ -1196,15 +1241,25 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
return errorResponse("copier: get: expected at least one glob pattern, got 0")
}
// build a queue of items by globbing
var queue []string
type queueItem struct {
glob string
parents []string
}
var queue []queueItem
globMatchedCount := 0
for _, glob := range req.Globs {
globMatched, err := extendedGlob(glob)
if err != nil {
return errorResponse("copier: get: glob %q: %v", glob, err)
}
globMatchedCount += len(globMatched)
queue = append(queue, globMatched...)
for _, path := range globMatched {
var parents []string
if req.GetOptions.Parents {
parents = getParents(path, req.Directory)
}
globMatchedCount++
queue = append(queue, queueItem{glob: path, parents: parents})
}
}
// no matches -> error
if len(queue) == 0 {
@ -1219,7 +1274,9 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
defer tw.Close()
hardlinkChecker := new(hardlinkChecker)
itemsCopied := 0
for i, item := range queue {
addedParents := map[string]struct{}{}
for i, qItem := range queue {
item := qItem.glob
// if we're not discarding the names of individual directories, keep track of this one
relNamePrefix := ""
if req.GetOptions.KeepDirectoryNames {
@ -1230,31 +1287,53 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
if err != nil {
return fmt.Errorf("copier: get: lstat %q: %w", item, err)
}
// chase links. if we hit a dead end, we should just fail
followedLinks := 0
const maxFollowedLinks = 16
for !req.GetOptions.NoDerefSymlinks && info.Mode()&os.ModeType == os.ModeSymlink && followedLinks < maxFollowedLinks {
path, err := os.Readlink(item)
if req.GetOptions.Parents && info.Mode().IsDir() {
if !slices.Contains(qItem.parents, item) {
qItem.parents = append(qItem.parents, item)
}
}
// Copy parents in to tarball first if exists
for _, parent := range qItem.parents {
oldParent := parent
parentInfo, err := os.Lstat(parent)
if err != nil {
return fmt.Errorf("copier: get: lstat %q: %w", parent, err)
}
parent, parentInfo, err = checkLinks(parent, req, parentInfo)
if err != nil {
return err
}
parentName, err := convertToRelSubdirectory(req.Directory, oldParent)
if err != nil {
return fmt.Errorf("copier: get: error computing path of %q relative to %q: %w", parent, req.Directory, err)
}
if parentName == "" || parentName == "." {
// skip the "." entry
continue
}
if filepath.IsAbs(path) || looksLikeAbs(path) {
path = filepath.Join(req.Root, path)
} else {
path = filepath.Join(filepath.Dir(item), path)
if _, ok := addedParents[parentName]; ok {
continue
}
item = path
if _, err = convertToRelSubdirectory(req.Root, item); err != nil {
return fmt.Errorf("copier: get: computing path of %q(%q) relative to %q: %w", queue[i], item, req.Root, err)
addedParents[parentName] = struct{}{}
if err := copierHandlerGetOne(parentInfo, "", parentName, parent, req.GetOptions, tw, hardlinkChecker, idMappings); err != nil {
if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
continue
} else if errors.Is(err, os.ErrNotExist) {
logrus.Warningf("copier: file disappeared while reading: %q", parent)
return nil
}
return fmt.Errorf("copier: get: %q: %w", queue[i].glob, err)
}
if info, err = os.Lstat(item); err != nil {
return fmt.Errorf("copier: get: lstat %q(%q): %w", queue[i], item, err)
}
followedLinks++
itemsCopied++
}
if followedLinks >= maxFollowedLinks {
return fmt.Errorf("copier: get: resolving symlink %q(%q): %w", queue[i], item, syscall.ELOOP)
item, info, err = checkLinks(item, req, info)
if err != nil {
return err
}
// evaluate excludes relative to the root directory
if info.Mode().IsDir() {
// we don't expand any of the contents that are archives
@ -1354,6 +1433,12 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
ok = filepath.SkipDir
}
}
if req.GetOptions.Parents {
rel, err = convertToRelSubdirectory(req.Directory, path)
if err != nil {
return fmt.Errorf("copier: get: error computing path of %q relative to %q: %w", path, req.Root, err)
}
}
// add the item to the outgoing tar stream
if err := copierHandlerGetOne(info, symlinkTarget, rel, path, options, tw, hardlinkChecker, idMappings); err != nil {
if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
@ -1368,7 +1453,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
}
// walk the directory tree, checking/adding items individually
if err := filepath.WalkDir(item, walkfn); err != nil {
return fmt.Errorf("copier: get: %q(%q): %w", queue[i], item, err)
return fmt.Errorf("copier: get: %q(%q): %w", queue[i].glob, item, err)
}
itemsCopied++
} else {
@ -1379,15 +1464,24 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
if skip {
continue
}
// add the item to the outgoing tar stream. in
// cases where this was a symlink that we
// dereferenced, be sure to use the name of the
// link.
if err := copierHandlerGetOne(info, "", filepath.Base(queue[i]), item, req.GetOptions, tw, hardlinkChecker, idMappings); err != nil {
name := filepath.Base(queue[i].glob)
if req.GetOptions.Parents {
name, err = convertToRelSubdirectory(req.Directory, queue[i].glob)
if err != nil {
return fmt.Errorf("copier: get: error computing path of %q relative to %q: %w", item, req.Root, err)
}
if name == "" || name == "." {
// skip the "." entry
continue
}
}
if err := copierHandlerGetOne(info, "", name, item, req.GetOptions, tw, hardlinkChecker, idMappings); err != nil {
if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
continue
}
return fmt.Errorf("copier: get: %q: %w", queue[i], err)
return fmt.Errorf("copier: get: %q: %w", queue[i].glob, err)
}
itemsCopied++
}

View File

@ -29,7 +29,7 @@ const (
// identify working containers.
Package = "buildah"
// Version for the Package. Also used by .packit.sh for Packit builds.
Version = "1.39.2"
Version = "1.40.0-dev"
// DefaultRuntime if containers.conf fails.
DefaultRuntime = "runc"

View File

@ -28,6 +28,7 @@ import (
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/shortnames"
istorage "github.com/containers/image/v5/storage"
@ -92,12 +93,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
}
logger.SetLevel(logrus.GetLevel())
var dockerfiles []io.ReadCloser
defer func(dockerfiles ...io.ReadCloser) {
for _, d := range dockerfiles {
d.Close()
}
}(dockerfiles...)
var dockerfiles []io.Reader
for _, tag := range append([]string{options.Output}, options.AdditionalTags...) {
if tag == "" {
@ -109,7 +105,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
}
for _, dfile := range paths {
var data io.ReadCloser
var data io.Reader
if strings.HasPrefix(dfile, "http://") || strings.HasPrefix(dfile, "https://") {
logger.Debugf("reading remote Dockerfile %q", dfile)
@ -117,8 +113,8 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
if resp.ContentLength == 0 {
resp.Body.Close()
return "", nil, fmt.Errorf("no contents in %q", dfile)
}
data = resp.Body
@ -145,13 +141,12 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
if err != nil {
return "", nil, fmt.Errorf("reading build instructions: %w", err)
}
defer contents.Close()
dinfo, err = contents.Stat()
if err != nil {
contents.Close()
return "", nil, fmt.Errorf("reading info about %q: %w", dfile, err)
}
if dinfo.Mode().IsRegular() && dinfo.Size() == 0 {
contents.Close()
return "", nil, fmt.Errorf("no contents in %q", dfile)
}
data = contents
@ -163,7 +158,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
if err != nil {
return "", nil, err
}
data = io.NopCloser(pData)
data = pData
}
dockerfiles = append(dockerfiles, data)
@ -369,7 +364,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
return "", nil, err
}
defer imgSource.Close()
manifestBytes, _, err := imgSource.GetManifest(ctx, nil)
manifestBytes, _, err := image.UnparsedInstance(imgSource, nil).Manifest(ctx)
if err != nil {
return "", nil, err
}
@ -430,6 +425,7 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr
builtinArgDefaults["TARGETVARIANT"] = defaultPlatform.Variant
builtinArgDefaults["TARGETARCH"] = defaultPlatform.Architecture
builtinArgDefaults["TARGETPLATFORM"] = defaultPlatform.OS + "/" + defaultPlatform.Architecture
builtinArgDefaults["TARGETPLATFORM"] = defaultPlatform.OS + "/" + defaultPlatform.Architecture
if defaultPlatform.Variant != "" {
builtinArgDefaults["TARGETPLATFORM"] += "/" + defaultPlatform.Variant
}
@ -453,6 +449,7 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr
for k, v := range builtinArgDefaults {
b.BuiltinArgDefaults[k] = v
}
defaultContainerConfig, err := config.Default()
if err != nil {
return "", nil, fmt.Errorf("failed to get container config: %w", err)
@ -567,7 +564,7 @@ func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfi
logrus.Debugf("preparing to read image manifest for %q: %v", baseImage, err)
continue
}
candidateBytes, candidateType, err := src.GetManifest(ctx, nil)
candidateBytes, candidateType, err := image.UnparsedInstance(src, nil).Manifest(ctx)
_ = src.Close()
if err != nil {
logrus.Debugf("reading image manifest for %q: %v", baseImage, err)

View File

@ -368,9 +368,6 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
if cp.Link {
return errors.New("COPY --link is not supported")
}
if cp.Parents {
return errors.New("COPY --parents is not supported")
}
if len(cp.Excludes) > 0 {
excludes = append(slices.Clone(excludes), cp.Excludes...)
}
@ -427,7 +424,13 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
data = strings.TrimPrefix(data, "\n")
// add breakline when heredoc ends for docker compat
data = data + "\n"
tmpFile, err := os.Create(filepath.Join(parse.GetTempDir(), path.Base(filepath.ToSlash(file.Name))))
// Create seperate subdir for this file.
tmpDir, err := os.MkdirTemp(parse.GetTempDir(), "buildah-heredoc")
if err != nil {
return fmt.Errorf("unable to create tmp dir for heredoc run %q: %w", parse.GetTempDir(), err)
}
defer os.RemoveAll(tmpDir)
tmpFile, err := os.Create(filepath.Join(tmpDir, path.Base(filepath.ToSlash(file.Name))))
if err != nil {
return fmt.Errorf("unable to create tmp file for COPY instruction at %q: %w", parse.GetTempDir(), err)
}
@ -442,7 +445,7 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
tmpFile.Close()
return fmt.Errorf("unable to write contents of heredoc file at %q: %w", tmpFile.Name(), err)
}
copySources = append(copySources, filepath.Base(tmpFile.Name()))
copySources = append(copySources, filepath.Join(filepath.Base(tmpDir), filepath.Base(tmpFile.Name())))
tmpFile.Close()
}
contextDir = parse.GetTempDir()
@ -554,7 +557,17 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
return fmt.Errorf("source can't be a URL for COPY")
}
} else {
sources = append(sources, filepath.Join(contextDir, src))
// filepath.Join clean path so /./ is removed
if _, suffix, found := strings.Cut(src, "/./"); found && copy.Parents {
fullPath := filepath.Join(contextDir, src)
suffix = filepath.Clean(suffix)
prefix := strings.TrimSuffix(fullPath, suffix)
prefix = filepath.Clean(prefix)
src = prefix + "/./" + suffix
} else {
src = filepath.Join(contextDir, src)
}
sources = append(sources, src)
}
}
options := buildah.AddAndCopyOptions{
@ -575,6 +588,7 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
InsecureSkipTLSVerify: s.executor.systemContext.DockerInsecureSkipTLSVerify,
MaxRetries: s.executor.maxPullPushRetries,
RetryDelay: s.executor.retryPullPushDelay,
Parents: copy.Parents,
}
if len(copy.Files) > 0 {
// If we are copying heredoc files, we need to temporary place
@ -1937,17 +1951,20 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri
if len(node.Original) > 4 {
shArg = node.Original[4:]
}
if buildArgs != "" {
return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + shArg + appendCheckSum, nil
}
result := "/bin/sh -c " + shArg
heredoc := ""
result := ""
if len(node.Heredocs) > 0 {
for _, doc := range node.Heredocs {
heredocContent := strings.TrimSpace(doc.Content)
result = result + "\n" + heredocContent
heredoc = heredoc + "\n" + heredocContent
}
}
return result + appendCheckSum, nil
if buildArgs != "" {
result = result + "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " "
}
result = result + "/bin/sh -c " + shArg + heredoc + appendCheckSum
return result, nil
case "ADD", "COPY":
destination := node
for destination.Next != nil {

View File

@ -39,7 +39,8 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
defer src.Close()
imageDigest := ""
manifestBytes, manifestType, err := src.GetManifest(ctx, nil)
unparsedTop := image.UnparsedInstance(src, nil)
manifestBytes, manifestType, err := unparsedTop.Manifest(ctx)
if err != nil {
return nil, fmt.Errorf("loading image manifest for %q: %w", transports.ImageName(ref), err)
}
@ -48,6 +49,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
}
var instanceDigest *digest.Digest
unparsedInstance := unparsedTop // for instanceDigest
if manifest.MIMETypeIsMultiImage(manifestType) {
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
if err != nil {
@ -58,9 +60,10 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
return nil, fmt.Errorf("finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err)
}
instanceDigest = &instance
unparsedInstance = image.UnparsedInstance(src, instanceDigest)
}
image, err := image.FromUnparsedImage(ctx, systemContext, image.UnparsedInstance(src, instanceDigest))
image, err := image.FromUnparsedImage(ctx, systemContext, unparsedInstance)
if err != nil {
return nil, fmt.Errorf("instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err)
}

View File

@ -21,9 +21,9 @@ import (
"github.com/containers/buildah/pkg/overlay"
"github.com/containers/luksy"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/system"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/go-units"
digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"

View File

@ -557,14 +557,19 @@ func GetCacheMount(sys *types.SystemContext, args []string, store storage.Store,
return newMount, "", "", "", nil, fmt.Errorf("unable to create build cache directory: %w", err)
}
ownerInfo := fmt.Sprintf(":%d:%d", uid, gid)
if id != "" {
// Don't let the user control where we place the directory.
dirID := digest.FromString(id).Encoded()[:16]
// Don't let the user try to inject pathname components by directly using
// the ID when constructing the cache directory location; distinguish
// between caches by ID and ownership
dirID := digest.FromString(id + ownerInfo).Encoded()[:16]
thisCacheRoot = filepath.Join(cacheParent, dirID)
buildahLockFilesDir = filepath.Join(cacheParent, BuildahCacheLockfileDir, dirID)
} else {
// Don't let the user control where we place the directory.
dirID := digest.FromString(newMount.Destination).Encoded()[:16]
// Don't let the user try to inject pathname components by directly using
// the target path when constructing the cache directory location;
// distinguish between caches by mount target location and ownership
dirID := digest.FromString(newMount.Destination + ownerInfo).Encoded()[:16]
thisCacheRoot = filepath.Join(cacheParent, dirID)
buildahLockFilesDir = filepath.Join(cacheParent, BuildahCacheLockfileDir, dirID)
}

View File

@ -195,7 +195,8 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
return nil, fmt.Errorf("instantiating image for %q: %w", transports.ImageName(ref), err)
}
defer srcSrc.Close()
manifestBytes, manifestType, err := srcSrc.GetManifest(ctx, nil)
unparsedTop := image.UnparsedInstance(srcSrc, nil)
manifestBytes, manifestType, err := unparsedTop.Manifest(ctx)
if err != nil {
return nil, fmt.Errorf("loading image manifest for %q: %w", transports.ImageName(ref), err)
}
@ -203,6 +204,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
imageDigest = manifestDigest.String()
}
var instanceDigest *digest.Digest
unparsedInstance := unparsedTop // for instanceDigest
if manifest.MIMETypeIsMultiImage(manifestType) {
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
if err != nil {
@ -213,8 +215,9 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
return nil, fmt.Errorf("finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err)
}
instanceDigest = &instance
unparsedInstance = image.UnparsedInstance(srcSrc, instanceDigest)
}
src, err = image.FromUnparsedImage(ctx, systemContext, image.UnparsedInstance(srcSrc, instanceDigest))
src, err = image.FromUnparsedImage(ctx, systemContext, unparsedInstance)
if err != nil {
return nil, fmt.Errorf("instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err)
}

View File

@ -47,6 +47,7 @@ import (
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/reexec"
"github.com/containers/storage/pkg/regexp"
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/runtime-spec/specs-go"
@ -57,6 +58,10 @@ import (
"golang.org/x/term"
)
const maxHostnameLen = 64
var validHostnames = regexp.Delayed("[A-Za-z0-9][A-Za-z0-9.-]+")
func (b *Builder) createResolvConf(rdir string, chownOpts *idtools.IDPair) (string, error) {
cfile := filepath.Join(rdir, "resolv.conf")
f, err := os.Create(cfile)
@ -2092,3 +2097,21 @@ func relabel(path, mountLabel string, shared bool) error {
}
return nil
}
// mapContainerNameToHostname returns the passed-in string with characters that
// don't match validHostnames (defined above) stripped out.
func mapContainerNameToHostname(containerName string) string {
match := validHostnames.FindStringIndex(containerName)
if match == nil {
return ""
}
trimmed := containerName[match[0]:]
match[1] -= match[0]
match[0] = 0
for match[1] != len(trimmed) && match[1] < match[0]+maxHostnameLen {
trimmed = trimmed[:match[1]] + trimmed[match[1]+1:]
match = validHostnames.FindStringIndex(trimmed)
match[1] = min(match[1], maxHostnameLen)
}
return trimmed[:match[1]]
}

View File

@ -586,7 +586,17 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions
} else if b.Hostname() != "" {
g.SetHostname(b.Hostname())
} else {
g.SetHostname(stringid.TruncateID(b.ContainerID))
hostname := stringid.TruncateID(b.ContainerID)
defConfig, err := config.Default()
if err != nil {
return false, "", fmt.Errorf("failed to get container config: %w", err)
}
if defConfig.Containers.ContainerNameAsHostName {
if mapped := mapContainerNameToHostname(b.Container); mapped != "" {
hostname = mapped
}
}
g.SetHostname(hostname)
}
} else {
g.SetHostname("")

View File

@ -991,7 +991,17 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions
} else if b.Hostname() != "" {
g.SetHostname(b.Hostname())
} else {
g.SetHostname(stringid.TruncateID(b.ContainerID))
hostname := stringid.TruncateID(b.ContainerID)
defConfig, err := config.Default()
if err != nil {
return false, "", fmt.Errorf("failed to get container config: %w", err)
}
if defConfig.Containers.ContainerNameAsHostName {
if mapped := mapContainerNameToHostname(b.Container); mapped != "" {
hostname = mapped
}
}
g.SetHostname(hostname)
}
} else {
g.SetHostname("")
@ -1223,9 +1233,17 @@ func setupMaskedPaths(g *generate.Generator, opts *define.CommonBuildOptions) {
if slices.Contains(opts.Unmasks, "all") {
return
}
for _, mp := range append(config.DefaultMaskedPaths, opts.Masks...) {
if slices.Contains(opts.Unmasks, mp) {
continue
nextMaskedPath:
for _, mp := range append(config.DefaultMaskedPaths(), opts.Masks...) {
for _, unmask := range opts.Unmasks {
match, err := filepath.Match(unmask, mp)
if err != nil {
logrus.Warnf("Invalid unmask pattern %q: %v", unmask, err)
continue
}
if match {
continue nextMaskedPath
}
}
g.AddLinuxMaskedPaths(mp)
}

View File

@ -39,6 +39,13 @@ func CommonNetworkCreate(n NetUtil, network *types.Network) error {
network.NetworkInterface = name
}
}
// Validate interface name if specified
if network.NetworkInterface != "" {
if err := ValidateInterfaceName(network.NetworkInterface); err != nil {
return fmt.Errorf("network interface name %s invalid: %w", network.NetworkInterface, err)
}
}
return nil
}

View File

@ -4,6 +4,8 @@ import (
"errors"
"fmt"
"net"
"strings"
"unicode"
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/libnetwork/util"
@ -159,3 +161,23 @@ func validatePerNetworkOpts(network *types.Network, netOpts *types.PerNetworkOpt
}
return nil
}
// ValidateInterfaceName validates the interface name based on the following rules:
// 1. The name must be less than MaxInterfaceNameLength characters
// 2. The name must not be "." or ".."
// 3. The name must not contain / or : or any whitespace characters
// ref to https://github.com/torvalds/linux/blob/81e4f8d68c66da301bb881862735bd74c6241a19/include/uapi/linux/if.h#L33C18-L33C20
func ValidateInterfaceName(ifName string) error {
if len(ifName) > types.MaxInterfaceNameLength {
return fmt.Errorf("interface name is too long: interface names must be %d characters or less: %w", types.MaxInterfaceNameLength, types.ErrInvalidArg)
}
if ifName == "." || ifName == ".." {
return fmt.Errorf("interface name is . or ..: %w", types.ErrInvalidArg)
}
if strings.ContainsFunc(ifName, func(r rune) bool {
return r == '/' || r == ':' || unicode.IsSpace(r)
}) {
return fmt.Errorf("interface name contains / or : or whitespace characters: %w", types.ErrInvalidArg)
}
return nil
}

View File

@ -30,4 +30,7 @@ var (
// NotHexRegex is a regular expression to check if a string is
// a hexadecimal string.
NotHexRegex = regexp.Delayed(`[^0-9a-fA-F]`)
// MaxInterfaceNameLength is the maximum length of a network interface name
MaxInterfaceNameLength = 15
)

View File

@ -8,6 +8,7 @@ import (
"path/filepath"
"runtime"
"strings"
"sync"
"github.com/containers/common/internal/attributedstring"
nettypes "github.com/containers/common/libnetwork/types"
@ -36,8 +37,8 @@ const (
defaultInitName = "catatonit"
)
var (
DefaultMaskedPaths = []string{
func getMaskedPaths() ([]string, error) {
maskedPaths := []string{
"/proc/acpi",
"/proc/kcore",
"/proc/keys",
@ -49,8 +50,34 @@ var (
"/sys/devices/virtual/powercap",
"/sys/firmware",
"/sys/fs/selinux",
"/proc/interrupts",
}
maskedPathsToGlob := []string{
"/sys/devices/system/cpu/cpu*/thermal_throttle",
}
for _, p := range maskedPathsToGlob {
matches, err := filepath.Glob(p)
if err != nil {
return nil, err
}
maskedPaths = append(maskedPaths, matches...)
}
return maskedPaths, nil
}
var DefaultMaskedPaths = sync.OnceValue(func() []string {
maskedPaths, err := getMaskedPaths()
// this should never happen, the only error possible
// is ErrBadPattern and the patterns that were added must be valid
if err != nil {
panic(err)
}
return maskedPaths
})
var (
DefaultReadOnlyPaths = []string{
"/proc/asound",
"/proc/bus",