vendor: update buildah to latest main

Includes one breaking change for the flag as BuildOutputs now accept a
slice.

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
This commit is contained in:
Paul Holzinger
2025-06-05 11:47:46 +02:00
parent 96abeafc61
commit ac71bc6cf2
39 changed files with 824 additions and 234 deletions

View File

@ -523,7 +523,7 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *Buil
Annotations: flags.Annotation,
Args: args,
BlobDirectory: flags.BlobCache,
BuildOutput: flags.BuildOutput,
BuildOutputs: flags.BuildOutputs,
CacheFrom: cacheFrom,
CacheTo: cacheTo,
CacheTTL: cacheTTL,

4
go.mod
View File

@ -12,7 +12,7 @@ require (
github.com/checkpoint-restore/checkpointctl v1.3.0
github.com/checkpoint-restore/go-criu/v7 v7.2.0
github.com/containernetworking/plugins v1.7.1
github.com/containers/buildah v1.40.1-0.20250523151639-b535d02d0ee1
github.com/containers/buildah v1.40.1-0.20250604193037-b8d8cc375f30
github.com/containers/common v0.63.2-0.20250604184922-bb2062b6265c
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/gvisor-tap-vsock v0.8.6
@ -59,7 +59,7 @@ require (
github.com/opencontainers/runtime-spec v1.2.1
github.com/opencontainers/runtime-tools v0.9.1-0.20250523060157-0ea5ed0382a2
github.com/opencontainers/selinux v1.12.0
github.com/openshift/imagebuilder v1.2.16-0.20250224193648-e87e4e105fd8
github.com/openshift/imagebuilder v1.2.16
github.com/rootless-containers/rootlesskit/v2 v2.3.5
github.com/shirou/gopsutil/v4 v4.25.5
github.com/sirupsen/logrus v1.9.3

8
go.sum
View File

@ -64,8 +64,8 @@ github.com/containernetworking/cni v1.3.0 h1:v6EpN8RznAZj9765HhXQrtXgX+ECGebEYEm
github.com/containernetworking/cni v1.3.0/go.mod h1:Bs8glZjjFfGPHMw6hQu82RUgEPNGEaBb9KS5KtNMnJ4=
github.com/containernetworking/plugins v1.7.1 h1:CNAR0jviDj6FS5Vg85NTgKWLDzZPfi/lj+VJfhMDTIs=
github.com/containernetworking/plugins v1.7.1/go.mod h1:xuMdjuio+a1oVQsHKjr/mgzuZ24leAsqUYRnzGoXHy0=
github.com/containers/buildah v1.40.1-0.20250523151639-b535d02d0ee1 h1:3bNWDmqh9tx0iAXPzBJugj/oC0nTD9yTXCyIu/Mj/LE=
github.com/containers/buildah v1.40.1-0.20250523151639-b535d02d0ee1/go.mod h1:8BVLrM6nRl/dRMYxZ+TrmoWPXzkCY99rZOYvJoXpIyE=
github.com/containers/buildah v1.40.1-0.20250604193037-b8d8cc375f30 h1:kCt0fnVBvXY9J98pUDeUc0gHKrhRwaBTWWD3otLutCE=
github.com/containers/buildah v1.40.1-0.20250604193037-b8d8cc375f30/go.mod h1:QDecwvjrr+e0VD5GYv2dw7tsiqrz673r8B4rIYFP11Y=
github.com/containers/common v0.63.2-0.20250604184922-bb2062b6265c h1:j4epZCkQt8Jdpz2GsUzvqY4MfaOfJamrNpZnmbV84Ug=
github.com/containers/common v0.63.2-0.20250604184922-bb2062b6265c/go.mod h1:efNRNweihnq5nXALnAPDXTpC7uJtnFV4pNuETTfvI8s=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
@ -368,8 +368,8 @@ github.com/opencontainers/runtime-tools v0.9.1-0.20250523060157-0ea5ed0382a2 h1:
github.com/opencontainers/runtime-tools v0.9.1-0.20250523060157-0ea5ed0382a2/go.mod h1:MXdPzqAA8pHC58USHqNCSjyLnRQ6D+NjbpP+02Z1U/0=
github.com/opencontainers/selinux v1.12.0 h1:6n5JV4Cf+4y0KNXW48TLj5DwfXpvWlxXplUkdTrmPb8=
github.com/opencontainers/selinux v1.12.0/go.mod h1:BTPX+bjVbWGXw7ZZWUbdENt8w0htPSrlgOOysQaU62U=
github.com/openshift/imagebuilder v1.2.16-0.20250224193648-e87e4e105fd8 h1:iPRNMpzJ4HEtIXseOxdIkgNYlp7HJShtEk7URm1BUSU=
github.com/openshift/imagebuilder v1.2.16-0.20250224193648-e87e4e105fd8/go.mod h1:gASl6jikVG3bCFnLjG6Ow5TeKwKVvrqUUj8C7EUmqc8=
github.com/openshift/imagebuilder v1.2.16 h1:Vqjy5uPoVDJiX5JUKHo0Cf440ih5cKI7lVe2ZJ2X+RA=
github.com/openshift/imagebuilder v1.2.16/go.mod h1:gASl6jikVG3bCFnLjG6Ow5TeKwKVvrqUUj8C7EUmqc8=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=

View File

@ -22,6 +22,8 @@ env:
IN_PODMAN: 'false'
# root or rootless
PRIV_NAME: root
# default "mention the $BUILDAH_RUNTIME in the task alias, with initial whitespace" value
RUNTIME_N: ""
####
#### Cache-image names to test with
@ -196,7 +198,7 @@ conformance_task:
integration_task:
name: "Integration $DISTRO_NV w/ $STORAGE_DRIVER"
name: "Integration $DISTRO_NV$RUNTIME_N w/ $STORAGE_DRIVER"
alias: integration
skip: *not_build_docs
depends_on: *smoke_vendor
@ -207,10 +209,26 @@ integration_task:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
BUILDAH_RUNTIME: crun
RUNTIME_N: " using crun"
- env:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
BUILDAH_RUNTIME: crun
RUNTIME_N: " using crun"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${DEBIAN_NAME}"
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
@ -220,10 +238,26 @@ integration_task:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
BUILDAH_RUNTIME: crun
RUNTIME_N: " using crun"
- env:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
BUILDAH_RUNTIME: crun
RUNTIME_N: " using crun"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${DEBIAN_NAME}"
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
@ -253,7 +287,7 @@ integration_task:
golang_version_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh golang'
integration_rootless_task:
name: "Integration rootless $DISTRO_NV w/ $STORAGE_DRIVER"
name: "Integration rootless $DISTRO_NV$RUNTIME_N w/ $STORAGE_DRIVER"
alias: integration_rootless
skip: *not_build_docs
depends_on: *smoke_vendor
@ -266,11 +300,29 @@ integration_rootless_task:
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless
BUILDAH_RUNTIME: crun
RUNTIME_N: " using crun"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless
BUILDAH_RUNTIME: crun
RUNTIME_N: " using crun"
- env:
DISTRO_NV: "${DEBIAN_NAME}"
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"

View File

@ -52,6 +52,9 @@ jobs:
- fedora-all-x86_64
- fedora-all-aarch64
enable_net: true
# Disable osh diff scan until Go support is available
# Ref: https://github.com/openscanhub/known-false-positives/pull/30#issuecomment-2858698495
osh_diff_scan_after_copr_build: false
# Ignore until golang is updated in distro buildroot to 1.23.3+
- job: copr_build

View File

@ -173,5 +173,5 @@ Normally, a maintainer will only be removed if they are considered to be
inactive for a long period of time or are viewed as disruptive to the community.
The current list of maintainers can be found in the
[MAINTAINERS](MAINTAINERS) file.
[MAINTAINERS](./MAINTAINERS.md) file.

View File

@ -1,3 +1,12 @@
## The Buildah Project Community Governance
The Buildah project, as part of Podman Container Tools, follows the [Podman Project Governance](https://github.com/containers/podman/blob/main/GOVERNANCE.md).
The Buildah project, as part of Podman Container Tools, follows the [Podman Project Governance](https://github.com/containers/podman/blob/main/GOVERNANCE.md)
except sections found in this document, which override those found in Podman's Governance.
---
# Maintainers File
The definitive source of truth for maintainers of this repository is the local [MAINTAINERS.md](./MAINTAINERS.md) file. The [MAINTAINERS.md](https://github.com/containers/podman/blob/main/MAINTAINERS.md) file in the main Podman repository is used for project-spanning roles, including Core Maintainer and Community Manager. Some repositories in the project will also have a local [OWNERS](./OWNERS) file, which the CI system uses to map users to roles. Any changes to the [OWNERS](./OWNERS) file must make a corresponding change to the [MAINTAINERS.md](./MAINTAINERS.md) file to ensure that the file remains up to date. Most changes to [MAINTAINERS.md](./MAINTAINERS.md) will require a change to the repositorys [OWNERS](.OWNERS) file (e.g., adding a Reviewer), but some will not (e.g., promoting a Maintainer to a Core Maintainer, which comes with no additional CI-related privileges).
Any Core Maintainers listed in Podmans [MAINTAINERS.md](https://github.com/containers/podman/blob/main/MAINTAINERS.md) file should also be added to the list of “approvers” in the local [OWNERS](./OWNERS) file and as a Core Maintainer in the list of “Maintainers” in the local [MAINTAINERS.md](./MAINTAINERS.md) file.

View File

@ -1,6 +1,6 @@
# Buildah Maintainers
[GOVERNANCE.md](https://github.com/containers/podman/blob/main/GOVERNANCE.md)
[GOVERNANCE.md](GOVERNANCE.md)
describes the project's governance and the Project Roles used below.
## Maintainers
@ -16,9 +16,9 @@ describes the project's governance and the Project Roles used below.
| Neil Smith | [Neil-Smith](https://github.com/Neil-Smith) | Community Manager | [Red Hat](https://github.com/RedHatOfficial) |
| Tom Sweeney | [TomSweeneyRedHat](https://github.com/TomSweeneyRedHat/) | Maintainer and Community Manager | [Red Hat](https://github.com/RedHatOfficial) |
| Lokesh Mandvekar | [lsm5](https://github.com/lsm5) | Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
| Aditya Rajan | [flouthoc](https://github.com/flouthoc) | Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
| Dan Walsh | [rhatdan](https://github.com/rhatdan) | Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
| Ashley Cui | [ashley-cui](https://github.com/ashley-cui) | Reviewer | [Red Hat](https://github.com/RedHatOfficial) |
| Aditya Rajan | [flouthoc](https://github.com/flouthoc) | Reviewer | [Red Hat](https://github.com/RedHatOfficial) |
| Jan Rodák | [Honny1](https://github.com/Honny1) | Reviewer | [Red Hat](https://github.com/RedHatOfficial) |
| Valentin Rothberg | [vrothberg](https://github.com/vrothberg) | Reviewer | [Red Hat](https://github.com/RedHatOfficial) |
@ -29,3 +29,7 @@ None at present
## Credits
The structure of this document was based off of the equivalent one in the [CRI-O Project](https://github.com/cri-o/cri-o/blob/main/MAINTAINERS.md).
## Note
If there is a discrepancy between the [MAINTAINERS.md](https://github.com/containers/podman/blob/main/MAINTAINERS.md) file in the main Podman repository and this file regarding Core Maintainers or Community Managers, the file in the Podman Repository is considered the source of truth.

View File

@ -59,23 +59,7 @@ export GOLANGCI_LINT_VERSION := 2.1.0
# Note: Uses the -N -l go compiler options to disable compiler optimizations
# and inlining. Using these build options allows you to subsequently
# use source debugging tools like delve.
all: bin/buildah bin/imgtype bin/copy bin/inet bin/tutorial docs
# Update nix/nixpkgs.json its latest stable commit
.PHONY: nixpkgs
nixpkgs:
@nix run \
-f channel:nixos-20.09 nix-prefetch-git \
-c nix-prefetch-git \
--no-deepClone \
https://github.com/nixos/nixpkgs refs/heads/nixos-20.09 > nix/nixpkgs.json
# Build statically linked binary
.PHONY: static
static:
@nix build -f nix/
mkdir -p ./bin
cp -rfp ./result/bin/* ./bin/
all: bin/buildah bin/imgtype bin/copy bin/inet bin/tutorial bin/dumpspec docs
bin/buildah: $(SOURCES) internal/mkcw/embed/entrypoint_amd64.gz
$(GO_BUILD) $(BUILDAH_LDFLAGS) $(GO_GCFLAGS) "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah
@ -107,6 +91,9 @@ bin/buildah.%: $(SOURCES)
mkdir -p ./bin
GOOS=$(word 2,$(subst ., ,$@)) GOARCH=$(word 3,$(subst ., ,$@)) $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ -tags "containers_image_openpgp" ./cmd/buildah
bin/dumpspec: $(SOURCES)
$(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/dumpspec
bin/imgtype: $(SOURCES)
$(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/imgtype/imgtype.go

View File

@ -1,5 +1,6 @@
approvers:
- baude
- flouthoc
- giuseppe
- lsm5
- Luap99

View File

@ -4,6 +4,7 @@
# [Buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool that facilitates building [Open Container Initiative (OCI)](https://www.opencontainers.org/) container images
[![Go Report Card](https://goreportcard.com/badge/github.com/containers/buildah)](https://goreportcard.com/report/github.com/containers/buildah)
[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/10579/badge)](https://www.bestpractices.dev/projects/10579)
The Buildah package provides a command line tool that can be used to

48
vendor/github.com/containers/buildah/ROADMAP.md generated vendored Normal file
View File

@ -0,0 +1,48 @@
![buildah logo (light)](logos/buildah-logo_large.png#gh-light-mode-only)
![buildah logo (dark)](logos/buildah-logo_reverse_large.png#gh-dark-mode-only)
# Buildah Roadmap
The Buildah development team reviews feature requests from its various stakeholders for consideration
quarterly along with the Podman Development team. These features are then prioritized and the top
features are then assigned to one or more engineers.
## Future feature considerations
The following features are of general importantance to Buildah. While these features have no timeline
associated with them yet, they will likely be on future quarterly milestones.
* Ongoing work around partial pull support (zstd:chunked)
* Improved support for the BuildKit API.
* Performance and stability improvements.
* Reductions to the size of the Buildah binary.
## Milestones and commitments by quarter
This section is a historical account of what features were prioritized by quarter. Results of the prioritization will be added at start of each quarter (Jan, Apr, July, Oct).
### 2025 Q2 ####
#### Releases ####
- [ ] Buildah 1.40
#### Features ####
- [ ] Reduce binary size of Buildah
- [ ] Additional Containerfile command options
#### CNCF ####
- [ ] Add and adhere to Governance model
- [ ] Update Maintainers file
### 2025 Q1 ####
#### Releases ####
- [x] Buildah 1.39
#### Features ####
- [x] Artifact add --options
#### CNCF ####
- [x] Create Governance documentation
- [x] Create Maintainers file

View File

@ -95,8 +95,13 @@ type AddAndCopyOptions struct {
// RetryDelay is how long to wait before retrying attempts to retrieve
// remote contents.
RetryDelay time.Duration
// Parents preserve parent directories of source content
// Parents specifies that we should preserve either all of the parent
// directories of source locations, or the ones which follow "/./" in
// the source paths for source locations which include such a
// component.
Parents bool
// Timestamp is a timestamp to override on all content as it is being read.
Timestamp *time.Time
}
// gitURLFragmentSuffix matches fragments to use as Git reference and build
@ -123,7 +128,7 @@ func sourceIsRemote(source string) bool {
}
// getURL writes a tar archive containing the named content
func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, writer io.Writer, chmod *os.FileMode, srcDigest digest.Digest, certPath string, insecureSkipTLSVerify types.OptionalBool) error {
func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, writer io.Writer, chmod *os.FileMode, srcDigest digest.Digest, certPath string, insecureSkipTLSVerify types.OptionalBool, timestamp *time.Time) error {
url, err := url.Parse(src)
if err != nil {
return err
@ -154,15 +159,19 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
name = path.Base(url.Path)
}
// If there's a date on the content, use it. If not, use the Unix epoch
// for compatibility.
// or a specified value for compatibility.
date := time.Unix(0, 0).UTC()
lastModified := response.Header.Get("Last-Modified")
if lastModified != "" {
d, err := time.Parse(time.RFC1123, lastModified)
if err != nil {
return fmt.Errorf("parsing last-modified time: %w", err)
if timestamp != nil {
date = timestamp.UTC()
} else {
lastModified := response.Header.Get("Last-Modified")
if lastModified != "" {
d, err := time.Parse(time.RFC1123, lastModified)
if err != nil {
return fmt.Errorf("parsing last-modified time %q: %w", lastModified, err)
}
date = d.UTC()
}
date = d
}
// Figure out the size of the content.
size := response.ContentLength
@ -532,6 +541,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
StripSetuidBit: options.StripSetuidBit,
StripSetgidBit: options.StripSetgidBit,
StripStickyBit: options.StripStickyBit,
Timestamp: options.Timestamp,
}
writer := io.WriteCloser(pipeWriter)
repositoryDir := filepath.Join(cloneDir, subdir)
@ -540,7 +550,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
} else {
go func() {
getErr = retry.IfNecessary(context.TODO(), func() error {
return getURL(src, chownFiles, mountPoint, renameTarget, pipeWriter, chmodDirsFiles, srcDigest, options.CertPath, options.InsecureSkipTLSVerify)
return getURL(src, chownFiles, mountPoint, renameTarget, pipeWriter, chmodDirsFiles, srcDigest, options.CertPath, options.InsecureSkipTLSVerify, options.Timestamp)
}, &retry.Options{
MaxRetry: options.MaxRetries,
Delay: options.RetryDelay,
@ -696,6 +706,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
StripSetgidBit: options.StripSetgidBit,
StripStickyBit: options.StripStickyBit,
Parents: options.Parents,
Timestamp: options.Timestamp,
}
getErr = copier.Get(contextDir, contextDir, getOptions, []string{globbedToGlobbable(globbed)}, writer)
closeErr = writer.Close()

View File

@ -1,11 +0,0 @@
//go:build !linux && !(freebsd && cgo)
package chroot
import (
"errors"
)
func getPtyDescriptors() (int, int, error) {
return -1, -1, errors.New("getPtyDescriptors not supported on this platform")
}

View File

@ -18,6 +18,7 @@ import (
"syscall"
"github.com/containers/buildah/bind"
"github.com/containers/buildah/internal/pty"
"github.com/containers/buildah/util"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/reexec"
@ -217,7 +218,7 @@ func runUsingChrootMain() {
var stderr io.Writer
fdDesc := make(map[int]string)
if options.Spec.Process.Terminal {
ptyMasterFd, ptyFd, err := getPtyDescriptors()
ptyMasterFd, ptyFd, err := pty.GetPtyDescriptors()
if err != nil {
logrus.Errorf("error opening PTY descriptors: %v", err)
os.Exit(1)

View File

@ -58,9 +58,20 @@ type CommitOptions struct {
// ReportWriter is an io.Writer which will be used to log the writing
// of the new image.
ReportWriter io.Writer
// HistoryTimestamp is the timestamp used when creating new items in the
// image's history. If unset, the current time will be used.
// HistoryTimestamp specifies a timestamp to use for the image's
// created-on date, the corresponding field in new history entries, and
// the timestamps to set on contents in new layer diffs. If left
// unset, the current time is used for the configuration and manifest,
// and timestamps of layer contents are used as-is.
HistoryTimestamp *time.Time
// SourceDateEpoch specifies a timestamp to use for the image's
// created-on date and the corresponding field in new history entries.
// If left unset, the current time is used for the configuration and
// manifest.
SourceDateEpoch *time.Time
// RewriteTimestamp, if set, forces timestamps in generated layers to
// not be later than the SourceDateEpoch, if it is set.
RewriteTimestamp bool
// github.com/containers/image/types SystemContext to hold credentials
// and other authentication/authorization information.
SystemContext *types.SystemContext
@ -274,8 +285,9 @@ func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpe
// if commit was successful and the image destination was local.
func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options CommitOptions) (string, reference.Canonical, digest.Digest, error) {
var (
imgID string
src types.ImageReference
imgID string
src types.ImageReference
destinationTimestamp *time.Time
)
// If we weren't given a name, build a destination reference using a
@ -293,6 +305,10 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
timestamp := time.Unix(0, 0).UTC()
options.HistoryTimestamp = &timestamp
}
destinationTimestamp = options.HistoryTimestamp
if options.SourceDateEpoch != nil {
destinationTimestamp = options.SourceDateEpoch
}
nameToRemove := ""
if dest == nil {
nameToRemove = stringid.GenerateRandomID() + "-tmp"
@ -415,7 +431,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
var manifestBytes []byte
if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil, options.HistoryTimestamp), options.MaxRetries, options.RetryDelay); err != nil {
if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil, destinationTimestamp), options.MaxRetries, options.RetryDelay); err != nil {
return imgID, nil, "", fmt.Errorf("copying layers and metadata for container %q: %w", b.ContainerID, err)
}
// If we've got more names to attach, and we know how to do that for

View File

@ -146,18 +146,9 @@ func (b *Builder) fixupConfig(sys *types.SystemContext) {
if b.Architecture() == "" {
if sys != nil && sys.ArchitectureChoice != "" {
b.SetArchitecture(sys.ArchitectureChoice)
} else {
b.SetArchitecture(currentPlatformSpecification.Architecture)
}
// in case the arch string we started with was shorthand for a known arch+variant pair, normalize it
ps := internalUtil.NormalizePlatform(ociv1.Platform{OS: b.OS(), Architecture: b.Architecture(), Variant: b.Variant()})
b.SetArchitecture(ps.Architecture)
b.SetVariant(ps.Variant)
}
if b.Variant() == "" {
if sys != nil && sys.VariantChoice != "" {
b.SetVariant(sys.VariantChoice)
} else {
b.SetArchitecture(currentPlatformSpecification.Architecture)
b.SetVariant(currentPlatformSpecification.Variant)
}
// in case the arch string we started with was shorthand for a known arch+variant pair, normalize it

View File

@ -14,6 +14,7 @@ import (
"path"
"path/filepath"
"slices"
"sort"
"strconv"
"strings"
"sync"
@ -48,6 +49,7 @@ func init() {
// "**" component in the pattern, filepath.Glob() will be called with the "**"
// replaced with all of the subdirectories under that point, and the results
// will be concatenated.
// The matched paths are returned in lexical order, which makes the output deterministic.
func extendedGlob(pattern string) (matches []string, err error) {
subdirs := func(dir string) []string {
var subdirectories []string
@ -113,6 +115,7 @@ func extendedGlob(pattern string) (matches []string, err error) {
}
matches = append(matches, theseMatches...)
}
sort.Strings(matches)
return matches, nil
}
@ -138,30 +141,34 @@ func isArchivePath(path string) bool {
type requestType string
const (
requestEval requestType = "EVAL"
requestStat requestType = "STAT"
requestGet requestType = "GET"
requestPut requestType = "PUT"
requestMkdir requestType = "MKDIR"
requestRemove requestType = "REMOVE"
requestQuit requestType = "QUIT"
requestEval requestType = "EVAL"
requestStat requestType = "STAT"
requestGet requestType = "GET"
requestPut requestType = "PUT"
requestMkdir requestType = "MKDIR"
requestRemove requestType = "REMOVE"
requestQuit requestType = "QUIT"
requestEnsure requestType = "ENSURE"
requestConditionalRemove requestType = "CONDRM"
)
// Request encodes a single request.
type request struct {
Request requestType
Root string // used by all requests
preservedRoot string
rootPrefix string // used to reconstruct paths being handed back to the caller
Directory string // used by all requests
preservedDirectory string
Globs []string `json:",omitempty"` // used by stat, get
preservedGlobs []string
StatOptions StatOptions `json:",omitempty"`
GetOptions GetOptions `json:",omitempty"`
PutOptions PutOptions `json:",omitempty"`
MkdirOptions MkdirOptions `json:",omitempty"`
RemoveOptions RemoveOptions `json:",omitempty"`
Request requestType
Root string // used by all requests
preservedRoot string
rootPrefix string // used to reconstruct paths being handed back to the caller
Directory string // used by all requests
preservedDirectory string
Globs []string `json:",omitempty"` // used by stat, get
preservedGlobs []string
StatOptions StatOptions `json:",omitempty"`
GetOptions GetOptions `json:",omitempty"`
PutOptions PutOptions `json:",omitempty"`
MkdirOptions MkdirOptions `json:",omitempty"`
RemoveOptions RemoveOptions `json:",omitempty"`
EnsureOptions EnsureOptions `json:",omitempty"`
ConditionalRemoveOptions ConditionalRemoveOptions `json:",omitempty"`
}
func (req *request) Excludes() []string {
@ -180,6 +187,10 @@ func (req *request) Excludes() []string {
return nil
case requestQuit:
return nil
case requestEnsure:
return nil
case requestConditionalRemove:
return nil
default:
panic(fmt.Sprintf("not an implemented request type: %q", req.Request))
}
@ -201,6 +212,10 @@ func (req *request) UIDMap() []idtools.IDMap {
return nil
case requestQuit:
return nil
case requestEnsure:
return req.EnsureOptions.UIDMap
case requestConditionalRemove:
return req.ConditionalRemoveOptions.UIDMap
default:
panic(fmt.Sprintf("not an implemented request type: %q", req.Request))
}
@ -222,6 +237,10 @@ func (req *request) GIDMap() []idtools.IDMap {
return nil
case requestQuit:
return nil
case requestEnsure:
return req.EnsureOptions.GIDMap
case requestConditionalRemove:
return req.ConditionalRemoveOptions.GIDMap
default:
panic(fmt.Sprintf("not an implemented request type: %q", req.Request))
}
@ -229,13 +248,15 @@ func (req *request) GIDMap() []idtools.IDMap {
// Response encodes a single response.
type response struct {
Error string `json:",omitempty"`
Stat statResponse `json:",omitempty"`
Eval evalResponse `json:",omitempty"`
Get getResponse `json:",omitempty"`
Put putResponse `json:",omitempty"`
Mkdir mkdirResponse `json:",omitempty"`
Remove removeResponse `json:",omitempty"`
Error string `json:",omitempty"`
Stat statResponse `json:",omitempty"`
Eval evalResponse `json:",omitempty"`
Get getResponse `json:",omitempty"`
Put putResponse `json:",omitempty"`
Mkdir mkdirResponse `json:",omitempty"`
Remove removeResponse `json:",omitempty"`
Ensure ensureResponse `json:",omitempty"`
ConditionalRemove conditionalRemoveResponse `json:",omitempty"`
}
// statResponse encodes a response for a single Stat request.
@ -282,6 +303,16 @@ type mkdirResponse struct{}
// removeResponse encodes a response for a single Remove request.
type removeResponse struct{}
// ensureResponse encodes a response to an Ensure request.
type ensureResponse struct {
Created []string // paths that were created because they weren't already present
}
// conditionalRemoveResponse encodes a response to a conditionalRemove request.
type conditionalRemoveResponse struct {
Removed []string // paths that were removed
}
// EvalOptions controls parts of Eval()'s behavior.
type EvalOptions struct{}
@ -363,6 +394,7 @@ type GetOptions struct {
NoDerefSymlinks bool // don't follow symlinks when globs match them
IgnoreUnreadable bool // ignore errors reading items, instead of returning an error
NoCrossDevice bool // if a subdirectory is a mountpoint with a different device number, include it but skip its contents
Timestamp *time.Time // timestamp to force on all contents
}
// Get produces an archive containing items that match the specified glob
@ -952,6 +984,12 @@ func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*re
case requestRemove:
resp := copierHandlerRemove(req)
return resp, nil, nil
case requestEnsure:
resp := copierHandlerEnsure(req, idMappings)
return resp, nil, nil
case requestConditionalRemove:
resp := copierHandlerConditionalRemove(req, idMappings)
return resp, nil, nil
case requestQuit:
return nil, nil, nil
}
@ -1599,6 +1637,16 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
if options.Rename != nil {
hdr.Name = handleRename(options.Rename, hdr.Name)
}
if options.Timestamp != nil {
timestamp := options.Timestamp.UTC()
hdr.ModTime = timestamp
if !hdr.AccessTime.IsZero() {
hdr.AccessTime = timestamp
}
if !hdr.ChangeTime.IsZero() {
hdr.ChangeTime = timestamp
}
}
if err = tw.WriteHeader(hdr); err != nil {
return fmt.Errorf("writing tar header from %q to pipe: %w", contentPath, err)
}
@ -1677,6 +1725,16 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
}
defer f.Close()
}
if options.Timestamp != nil {
timestamp := options.Timestamp.UTC()
hdr.ModTime = timestamp
if !hdr.AccessTime.IsZero() {
hdr.AccessTime = timestamp
}
if !hdr.ChangeTime.IsZero() {
hdr.ChangeTime = timestamp
}
}
// output the header
if err = tw.WriteHeader(hdr); err != nil {
return fmt.Errorf("writing header for %s (%s): %w", contentPath, hdr.Name, err)
@ -2181,3 +2239,257 @@ func copierHandlerRemove(req request) *response {
}
return &response{Error: "", Remove: removeResponse{}}
}
// EnsurePath is a single item being passed to an Ensure() call.
type EnsurePath struct {
Path string // a pathname, relative to the Directory, possibly relative to the root
Typeflag byte // can be either TypeReg or TypeDir, everything else is currently ignored
ModTime *time.Time // mtime to set on newly-created items, default is to leave them be
Chmod *os.FileMode // mode, defaults to 000 for files and 700 for directories
Chown *idtools.IDPair // owner settings to set on newly-created items, defaults to 0:0
}
// EnsureOptions controls parts of Ensure()'s behavior.
type EnsureOptions struct {
UIDMap, GIDMap []idtools.IDMap // map from hostIDs to containerIDs in the chroot
Paths []EnsurePath
}
// Ensure ensures that the specified mount point targets exist under the root.
// If the root directory is not specified, the current root directory is used.
// If root is specified and the current OS supports it, and the calling process
// has the necessary privileges, the operation is performed in a chrooted
// context.
func Ensure(root, directory string, options EnsureOptions) ([]string, error) {
req := request{
Request: requestEnsure,
Root: root,
Directory: directory,
EnsureOptions: options,
}
resp, err := copier(nil, nil, req)
if err != nil {
return nil, err
}
if resp.Error != "" {
return nil, errors.New(resp.Error)
}
return resp.Ensure.Created, nil
}
func copierHandlerEnsure(req request, idMappings *idtools.IDMappings) *response {
errorResponse := func(fmtspec string, args ...any) *response {
return &response{Error: fmt.Sprintf(fmtspec, args...), Ensure: ensureResponse{}}
}
slices.SortFunc(req.EnsureOptions.Paths, func(a, b EnsurePath) int { return strings.Compare(a.Path, b.Path) })
var created []string
for _, item := range req.EnsureOptions.Paths {
uid, gid := 0, 0
if item.Chown != nil {
uid, gid = item.Chown.UID, item.Chown.UID
}
var mode os.FileMode
switch item.Typeflag {
case tar.TypeReg:
mode = 0o000
case tar.TypeDir:
mode = 0o700
default:
continue
}
if item.Chmod != nil {
mode = *item.Chmod
}
if idMappings != nil && !idMappings.Empty() {
containerDirPair := idtools.IDPair{UID: uid, GID: gid}
hostDirPair, err := idMappings.ToHost(containerDirPair)
if err != nil {
return errorResponse("copier: ensure: error mapping container filesystem owner %d:%d to host filesystem owners: %v", uid, gid, err)
}
uid, gid = hostDirPair.UID, hostDirPair.GID
}
directory, err := resolvePath(req.Root, req.Directory, true, nil)
if err != nil {
return errorResponse("copier: ensure: error resolving %q: %v", req.Directory, err)
}
rel, err := convertToRelSubdirectory(req.Root, directory)
if err != nil {
return errorResponse("copier: ensure: error computing path of %q relative to %q: %v", directory, req.Root, err)
}
subdir := ""
components := strings.Split(filepath.Join(rel, item.Path), string(os.PathSeparator))
components = slices.DeleteFunc(components, func(s string) bool { return s == "" || s == "." })
for i, component := range components {
parentPath := subdir
if parentPath == "" {
parentPath = "."
}
leaf := filepath.Join(subdir, component)
parentInfo, err := os.Stat(filepath.Join(req.Root, parentPath))
if err != nil {
return errorResponse("copier: ensure: checking datestamps on %q (%d: %v): %v", parentPath, i, components, err)
}
if i < len(components)-1 || item.Typeflag == tar.TypeDir {
err = os.Mkdir(filepath.Join(req.Root, leaf), mode)
subdir = leaf
} else if item.Typeflag == tar.TypeReg {
var f *os.File
if f, err = os.OpenFile(filepath.Join(req.Root, leaf), os.O_CREATE|os.O_EXCL|os.O_RDWR, mode); err == nil {
f.Close()
}
} else {
continue
}
if err == nil {
createdLeaf := leaf
if len(createdLeaf) > 1 {
createdLeaf = strings.TrimPrefix(createdLeaf, string(os.PathSeparator))
}
created = append(created, createdLeaf)
if err = chown(filepath.Join(req.Root, leaf), uid, uid); err != nil {
return errorResponse("copier: ensure: error setting owner of %q to %d:%d: %v", leaf, uid, gid, err)
}
if err = chmod(filepath.Join(req.Root, leaf), mode); err != nil {
return errorResponse("copier: ensure: error setting permissions on %q to 0%o: %v", leaf, mode)
}
if item.ModTime != nil {
if err := os.Chtimes(filepath.Join(req.Root, leaf), *item.ModTime, *item.ModTime); err != nil {
return errorResponse("copier: ensure: resetting datestamp on %q: %v", leaf, err)
}
}
} else {
// FreeBSD can return EISDIR for "mkdir /":
// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=59739.
if !errors.Is(err, os.ErrExist) && !errors.Is(err, syscall.EISDIR) {
return errorResponse("copier: ensure: error checking item %q: %v", leaf, err)
}
}
if err := os.Chtimes(filepath.Join(req.Root, parentPath), parentInfo.ModTime(), parentInfo.ModTime()); err != nil {
return errorResponse("copier: ensure: resetting datestamp on %q: %v", parentPath, err)
}
}
}
slices.Sort(created)
return &response{Error: "", Ensure: ensureResponse{Created: created}}
}
// ConditionalRemovePath is a single item being passed to an ConditionalRemove() call.
type ConditionalRemovePath struct {
Path string // a pathname, relative to the Directory, possibly relative to the root
ModTime *time.Time // mtime to expect this item to have, if it's a condition
Mode *os.FileMode // mode to expect this item to have, if it's a condition
Owner *idtools.IDPair // owner to expect this item to have, if it's a condition
}
// ConditionalRemoveOptions controls parts of ConditionalRemove()'s behavior.
type ConditionalRemoveOptions struct {
UIDMap, GIDMap []idtools.IDMap // map from hostIDs to containerIDs in the chroot
Paths []ConditionalRemovePath
}
// ConditionalRemove removes the set of named items if they're present and
// currently match the additional conditions, returning the list of items it
// removed. Directories will also only be removed if they have no contents,
// and will be left in place otherwise.
func ConditionalRemove(root, directory string, options ConditionalRemoveOptions) ([]string, error) {
req := request{
Request: requestConditionalRemove,
Root: root,
Directory: directory,
ConditionalRemoveOptions: options,
}
resp, err := copier(nil, nil, req)
if err != nil {
return nil, err
}
if resp.Error != "" {
return nil, errors.New(resp.Error)
}
return resp.ConditionalRemove.Removed, nil
}
func copierHandlerConditionalRemove(req request, idMappings *idtools.IDMappings) *response {
errorResponse := func(fmtspec string, args ...any) *response {
return &response{Error: fmt.Sprintf(fmtspec, args...), ConditionalRemove: conditionalRemoveResponse{}}
}
slices.SortFunc(req.ConditionalRemoveOptions.Paths, func(a, b ConditionalRemovePath) int { return strings.Compare(b.Path, a.Path) })
var removed []string
for _, item := range req.ConditionalRemoveOptions.Paths {
uid, gid := 0, 0
if item.Owner != nil {
uid, gid = item.Owner.UID, item.Owner.GID
}
if idMappings != nil && !idMappings.Empty() {
containerDirPair := idtools.IDPair{UID: uid, GID: gid}
hostDirPair, err := idMappings.ToHost(containerDirPair)
if err != nil {
return errorResponse("copier: conditionalRemove: error mapping container filesystem owner %d:%d to host filesystem owners: %v", uid, gid, err)
}
uid, gid = hostDirPair.UID, hostDirPair.GID
}
directory, err := resolvePath(req.Root, req.Directory, true, nil)
if err != nil {
return errorResponse("copier: conditionalRemove: error resolving %q: %v", req.Directory, err)
}
rel, err := convertToRelSubdirectory(req.Root, directory)
if err != nil {
return errorResponse("copier: conditionalRemove: error computing path of %q relative to %q: %v", directory, req.Root, err)
}
components := strings.Split(filepath.Join(rel, item.Path), string(os.PathSeparator))
components = slices.DeleteFunc(components, func(s string) bool { return s == "" || s == "." })
if len(components) == 0 {
continue
}
itemPath := filepath.Join(append([]string{req.Root}, components...)...)
itemInfo, err := os.Lstat(itemPath)
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
return errorResponse("copier: conditionalRemove: checking on candidate %q: %v", itemPath, err)
}
// okay?
removed = append(removed, item.Path)
continue
}
parentPath := filepath.Dir(itemPath)
parentInfo, err := os.Stat(parentPath)
if err != nil {
return errorResponse("copier: conditionalRemove: checking on parent directory %q: %v", parentPath, err)
}
if item.Mode != nil && itemInfo.Mode().Perm()&fs.ModePerm != *item.Mode&fs.ModePerm {
// mismatch, modified? ignore
continue
}
if item.ModTime != nil && !item.ModTime.Equal(itemInfo.ModTime()) {
// mismatch, modified? ignore
continue
}
if item.Owner != nil {
ownerUID, ownerGID, err := owner(itemInfo)
if err != nil {
return errorResponse("copier: conditionalRemove: checking ownership of %q: %v", itemPath, err)
}
if uid != ownerUID || gid != ownerGID {
// mismatch, modified? ignore
continue
}
}
if err := os.Remove(itemPath); err != nil && !errors.Is(err, os.ErrNotExist) {
if !errors.Is(err, syscall.EEXIST) && !errors.Is(err, syscall.ENOTEMPTY) {
return errorResponse("copier: conditionalRemove: removing %q: %v", itemPath, err)
}
// okay? not removed, but it wasn't empty, so okay?
continue
}
removed = append(removed, item.Path)
if err := os.Chtimes(parentPath, parentInfo.ModTime(), parentInfo.ModTime()); err != nil {
return errorResponse("copier: conditionalRemove: resetting datestamp on %q: %v", parentPath, err)
}
}
slices.Sort(removed)
return &response{Error: "", ConditionalRemove: conditionalRemoveResponse{Removed: removed}}
}

View File

@ -70,6 +70,13 @@ func lutimes(_ bool, path string, atime, mtime time.Time) error {
return unix.Lutimes(path, []unix.Timeval{unix.NsecToTimeval(atime.UnixNano()), unix.NsecToTimeval(mtime.UnixNano())})
}
func owner(info os.FileInfo) (int, int, error) {
if st, ok := info.Sys().(*syscall.Stat_t); ok {
return int(st.Uid), int(st.Gid), nil
}
return -1, -1, syscall.ENOSYS
}
// sameDevice returns true unless we're sure that they're not on the same device
func sameDevice(a, b os.FileInfo) bool {
aSys := a.Sys()

View File

@ -77,6 +77,10 @@ func lutimes(isSymlink bool, path string, atime, mtime time.Time) error {
return windows.UtimesNano(path, []windows.Timespec{windows.NsecToTimespec(atime.UnixNano()), windows.NsecToTimespec(mtime.UnixNano())})
}
func owner(info os.FileInfo) (int, int, error) {
return -1, -1, syscall.ENOSYS
}
// sameDevice returns true since we can't be sure that they're not on the same device
func sameDevice(a, b os.FileInfo) bool {
return true

View File

@ -167,9 +167,12 @@ type BuildOptions struct {
AdditionalBuildContexts map[string]*AdditionalBuildContext
// Name of the image to write to.
Output string
// BuildOutput specifies if any custom build output is selected for following build.
// It allows end user to export recently built rootfs into a directory or tar.
// See the documentation of 'buildah build --output' for the details of the format.
// BuildOutputs specifies if any custom build output is selected for
// following build. It allows the end user to export the image's
// rootfs to a directory or a tar archive. See the documentation of
// 'buildah build --output' for the details of the syntax.
BuildOutputs []string
// Deprecated: use BuildOutputs instead.
BuildOutput string
// ConfidentialWorkload controls whether or not, and if so, how, we produce an
// image that's meant to be run using krun as a VM instead of a conventional

View File

@ -133,7 +133,7 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk
}
hdr, err = tarReader.Next()
}
if err != io.EOF {
if !errors.Is(err, io.EOF) {
filterer.err = fmt.Errorf("reading tar archive: %w", err)
break
}
@ -146,7 +146,11 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk
if err == nil {
err = err1
}
pipeReader.CloseWithError(err)
if err != nil {
pipeReader.CloseWithError(err)
} else {
pipeReader.Close()
}
filterer.wg.Done()
}()
return filterer

View File

@ -71,6 +71,8 @@ type containerImageRef struct {
dconfig []byte
created *time.Time
createdBy string
layerModTime *time.Time
layerLatestModTime *time.Time
historyComment string
annotations map[string]string
preferredManifestType string
@ -232,7 +234,7 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo
// as long as we DON'T Close() the tar Writer.
filename, _, _, err := i.makeExtraImageContentDiff(false)
if err != nil {
errChan <- err
errChan <- fmt.Errorf("creating part of archive with extra content: %w", err)
return
}
file, err := os.Open(filename)
@ -242,7 +244,7 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo
}
defer file.Close()
if _, err = io.Copy(pipeWriter, file); err != nil {
errChan <- err
errChan <- fmt.Errorf("writing contents of %q: %w", filename, err)
return
}
}
@ -960,28 +962,9 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
// Use specified timestamps in the layer, if we're doing that for history
// entries.
if i.created != nil {
// Tweak the contents of layers we're creating.
nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
writeCloser = newTarFilterer(nestedWriteCloser, func(hdr *tar.Header) (bool, bool, io.Reader) {
// Changing a zeroed field to a non-zero field can affect the
// format that the library uses for writing the header, so only
// change fields that are already set to avoid changing the
// format (and as a result, changing the length) of the header
// that we write.
if !hdr.ModTime.IsZero() {
hdr.ModTime = *i.created
}
if !hdr.AccessTime.IsZero() {
hdr.AccessTime = *i.created
}
if !hdr.ChangeTime.IsZero() {
hdr.ChangeTime = *i.created
}
return false, false, nil
})
writer = writeCloser
}
nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
writeCloser = makeFilteredLayerWriteCloser(nestedWriteCloser, i.layerModTime, i.layerLatestModTime)
writer = writeCloser
// Okay, copy from the raw diff through the filter, compressor, and counter and
// digesters.
size, err := io.Copy(writer, rc)
@ -1212,7 +1195,7 @@ func (i *containerImageRef) makeExtraImageContentDiff(includeFooter bool) (_ str
return err
}
if _, err := io.Copy(tw, content); err != nil {
return err
return fmt.Errorf("writing content for %q: %w", path, err)
}
if err := tw.Flush(); err != nil {
return err
@ -1229,9 +1212,47 @@ func (i *containerImageRef) makeExtraImageContentDiff(includeFooter bool) (_ str
return diff.Name(), digester.Digest(), counter.Count, nil
}
// makeFilteredLayerWriteCloser returns either the passed-in WriteCloser, or if
// layerModeTime or layerLatestModTime are set, a WriteCloser which modifies
// the tarball that's written to it so that timestamps in headers are set to
// layerModTime exactly (if a value is provided for it), and then clamped to be
// no later than layerLatestModTime (if a value is provided for it).
// This implies that if both values are provided, the archive's timestamps will
// be set to the earlier of the two values.
func makeFilteredLayerWriteCloser(wc io.WriteCloser, layerModTime, layerLatestModTime *time.Time) io.WriteCloser {
if layerModTime == nil && layerLatestModTime == nil {
return wc
}
wc = newTarFilterer(wc, func(hdr *tar.Header) (skip, replaceContents bool, replacementContents io.Reader) {
// Changing a zeroed field to a non-zero field can affect the
// format that the library uses for writing the header, so only
// change fields that are already set to avoid changing the
// format (and as a result, changing the length) of the header
// that we write.
modTime := hdr.ModTime
if layerModTime != nil {
modTime = *layerModTime
}
if layerLatestModTime != nil && layerLatestModTime.Before(modTime) {
modTime = *layerLatestModTime
}
if !hdr.ModTime.IsZero() {
hdr.ModTime = modTime
}
if !hdr.AccessTime.IsZero() {
hdr.AccessTime = modTime
}
if !hdr.ChangeTime.IsZero() {
hdr.ChangeTime = modTime
}
return false, false, nil
})
return wc
}
// makeLinkedLayerInfos calculates the size and digest information for a layer
// we intend to add to the image that we're committing.
func (b *Builder) makeLinkedLayerInfos(layers []LinkedLayer, layerType string) ([]commitLinkedLayerInfo, error) {
func (b *Builder) makeLinkedLayerInfos(layers []LinkedLayer, layerType string, layerModTime, layerLatestModTime *time.Time) ([]commitLinkedLayerInfo, error) {
if layers == nil {
return nil, nil
}
@ -1255,48 +1276,50 @@ func (b *Builder) makeLinkedLayerInfos(layers []LinkedLayer, layerType string) (
linkedLayer: layer,
}
if err = func() error {
cdir, err := b.store.ContainerDirectory(b.ContainerID)
if err != nil {
return fmt.Errorf("determining directory for working container: %w", err)
}
f, err := os.CreateTemp(cdir, "")
if err != nil {
return fmt.Errorf("creating temporary file to hold blob for %q: %w", info.linkedLayer.BlobPath, err)
}
defer f.Close()
var rc io.ReadCloser
var what string
if st.IsDir() {
// if it's a directory, archive it and digest the archive while we're storing a copy somewhere
cdir, err := b.store.ContainerDirectory(b.ContainerID)
if err != nil {
return fmt.Errorf("determining directory for working container: %w", err)
}
f, err := os.CreateTemp(cdir, "")
if err != nil {
return fmt.Errorf("creating temporary file to hold blob for %q: %w", info.linkedLayer.BlobPath, err)
}
defer f.Close()
rc, err := chrootarchive.Tar(info.linkedLayer.BlobPath, nil, info.linkedLayer.BlobPath)
what = "directory"
rc, err = chrootarchive.Tar(info.linkedLayer.BlobPath, nil, info.linkedLayer.BlobPath)
if err != nil {
return fmt.Errorf("generating a layer blob from %q: %w", info.linkedLayer.BlobPath, err)
}
digester := digest.Canonical.Digester()
sizeCounter := ioutils.NewWriteCounter(digester.Hash())
_, copyErr := io.Copy(f, io.TeeReader(rc, sizeCounter))
if err := rc.Close(); err != nil {
return fmt.Errorf("storing a copy of %q: %w", info.linkedLayer.BlobPath, err)
}
if copyErr != nil {
return fmt.Errorf("storing a copy of %q: %w", info.linkedLayer.BlobPath, copyErr)
}
info.uncompressedDigest = digester.Digest()
info.size = sizeCounter.Count
info.linkedLayer.BlobPath = f.Name()
} else {
// if it's not a directory, just digest it
f, err := os.Open(info.linkedLayer.BlobPath)
what = "file"
// if it's not a directory, just digest it while we're storing a copy somewhere
rc, err = os.Open(info.linkedLayer.BlobPath)
if err != nil {
return err
}
defer f.Close()
sizeCounter := ioutils.NewWriteCounter(io.Discard)
uncompressedDigest, err := digest.Canonical.FromReader(io.TeeReader(f, sizeCounter))
if err != nil {
return err
}
info.uncompressedDigest = uncompressedDigest
info.size = sizeCounter.Count
}
digester := digest.Canonical.Digester()
sizeCountedFile := ioutils.NewWriteCounter(io.MultiWriter(digester.Hash(), f))
wc := makeFilteredLayerWriteCloser(ioutils.NopWriteCloser(sizeCountedFile), layerModTime, layerLatestModTime)
_, copyErr := io.Copy(wc, rc)
wcErr := wc.Close()
if err := rc.Close(); err != nil {
return fmt.Errorf("storing a copy of %s %q: closing reader: %w", what, info.linkedLayer.BlobPath, err)
}
if copyErr != nil {
return fmt.Errorf("storing a copy of %s %q: copying data: %w", what, info.linkedLayer.BlobPath, copyErr)
}
if wcErr != nil {
return fmt.Errorf("storing a copy of %s %q: closing writer: %w", what, info.linkedLayer.BlobPath, wcErr)
}
info.uncompressedDigest = digester.Digest()
info.size = sizeCountedFile.Count
info.linkedLayer.BlobPath = f.Name()
return nil
}(); err != nil {
return nil, err
@ -1341,10 +1364,18 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
if err != nil {
return nil, fmt.Errorf("encoding docker-format image configuration %#v: %w", b.Docker, err)
}
var created *time.Time
var created, layerModTime, layerLatestModTime *time.Time
if options.HistoryTimestamp != nil {
historyTimestampUTC := options.HistoryTimestamp.UTC()
created = &historyTimestampUTC
layerModTime = &historyTimestampUTC
}
if options.SourceDateEpoch != nil {
sourceDateEpochUTC := options.SourceDateEpoch.UTC()
created = &sourceDateEpochUTC
if options.RewriteTimestamp {
layerLatestModTime = &sourceDateEpochUTC
}
}
createdBy := b.CreatedBy()
if createdBy == "" {
@ -1372,11 +1403,11 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
}
}
preLayerInfos, err := b.makeLinkedLayerInfos(append(slices.Clone(b.PrependedLinkedLayers), slices.Clone(options.PrependedLinkedLayers)...), "prepended layer")
preLayerInfos, err := b.makeLinkedLayerInfos(append(slices.Clone(b.PrependedLinkedLayers), slices.Clone(options.PrependedLinkedLayers)...), "prepended layer", layerModTime, layerLatestModTime)
if err != nil {
return nil, err
}
postLayerInfos, err := b.makeLinkedLayerInfos(append(slices.Clone(options.AppendedLinkedLayers), slices.Clone(b.AppendedLinkedLayers)...), "appended layer")
postLayerInfos, err := b.makeLinkedLayerInfos(append(slices.Clone(options.AppendedLinkedLayers), slices.Clone(b.AppendedLinkedLayers)...), "appended layer", layerModTime, layerLatestModTime)
if err != nil {
return nil, err
}
@ -1395,6 +1426,8 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
dconfig: dconfig,
created: created,
createdBy: createdBy,
layerModTime: layerModTime,
layerLatestModTime: layerLatestModTime,
historyComment: b.HistoryComment(),
annotations: b.Annotations(),
preferredManifestType: manifestType,

View File

@ -151,9 +151,9 @@ type Executor struct {
logPrefix string
unsetEnvs []string
unsetLabels []string
processLabel string // Shares processLabel of first stage container with containers of other stages in same build
mountLabel string // Shares mountLabel of first stage container with containers of other stages in same build
buildOutput string // Specifies instructions for any custom build output
processLabel string // Shares processLabel of first stage container with containers of other stages in same build
mountLabel string // Shares mountLabel of first stage container with containers of other stages in same build
buildOutputs []string // Specifies instructions for any custom build output
osVersion string
osFeatures []string
envs []string
@ -229,6 +229,11 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
}
}
buildOutputs := slices.Clone(options.BuildOutputs)
if options.BuildOutput != "" { //nolint:staticcheck
buildOutputs = append(buildOutputs, options.BuildOutput) //nolint:staticcheck
}
exec := Executor{
args: options.Args,
cacheFrom: options.CacheFrom,
@ -314,7 +319,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
logPrefix: logPrefix,
unsetEnvs: slices.Clone(options.UnsetEnvs),
unsetLabels: slices.Clone(options.UnsetLabels),
buildOutput: options.BuildOutput,
buildOutputs: buildOutputs,
osVersion: options.OSVersion,
osFeatures: slices.Clone(options.OSFeatures),
envs: slices.Clone(options.Envs),

View File

@ -418,7 +418,7 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
data = strings.TrimPrefix(data, "\n")
// add breakline when heredoc ends for docker compat
data = data + "\n"
// Create seperate subdir for this file.
// Create separate subdir for this file.
tmpDir, err := os.MkdirTemp(parse.GetTempDir(), "buildah-heredoc")
if err != nil {
return fmt.Errorf("unable to create tmp dir for heredoc run %q: %w", parse.GetTempDir(), err)
@ -1270,13 +1270,15 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
}
// Parse and populate buildOutputOption if needed
var buildOutputOption define.BuildOutputOption
canGenerateBuildOutput := (s.executor.buildOutput != "" && lastStage)
if canGenerateBuildOutput {
logrus.Debugf("Generating custom build output with options %q", s.executor.buildOutput)
buildOutputOption, err = parse.GetBuildOutput(s.executor.buildOutput)
if err != nil {
return "", nil, false, fmt.Errorf("failed to parse build output: %w", err)
var buildOutputOptions []define.BuildOutputOption
if lastStage && len(s.executor.buildOutputs) > 0 {
for _, buildOutput := range s.executor.buildOutputs {
logrus.Debugf("generating custom build output with options %q", buildOutput)
buildOutputOption, err := parse.GetBuildOutput(buildOutput)
if err != nil {
return "", nil, false, fmt.Errorf("failed to parse build output %q: %w", buildOutput, err)
}
buildOutputOptions = append(buildOutputOptions, buildOutputOption)
}
}
@ -1311,7 +1313,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
}
// Generate build output from the new image, or the preexisting
// one if we didn't actually do anything, if needed.
if canGenerateBuildOutput {
for _, buildOutputOption := range buildOutputOptions {
if err := s.generateBuildOutput(buildOutputOption); err != nil {
return "", nil, onlyBaseImage, err
}
@ -1466,7 +1468,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
}
logImageID(imgID)
// Generate build output if needed.
if canGenerateBuildOutput {
for _, buildOutputOption := range buildOutputOptions {
if err := s.generateBuildOutput(buildOutputOption); err != nil {
return "", nil, false, err
}
@ -1697,7 +1699,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return "", nil, false, fmt.Errorf("committing container for step %+v: %w", *step, err)
}
// Generate build output if needed.
if canGenerateBuildOutput {
for _, buildOutputOption := range buildOutputOptions {
if err := s.generateBuildOutput(buildOutputOption); err != nil {
return "", nil, false, err
}
@ -1737,7 +1739,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return "", nil, false, fmt.Errorf("committing final squash step %+v: %w", *step, err)
}
// Generate build output if needed.
if canGenerateBuildOutput {
for _, buildOutputOption := range buildOutputOptions {
if err := s.generateBuildOutput(buildOutputOption); err != nil {
return "", nil, false, err
}
@ -1752,7 +1754,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// then generate output manually since there is no opportunity
// for us to perform `commit` anywhere in the code.
// Generate build output if needed.
if canGenerateBuildOutput {
for _, buildOutputOption := range buildOutputOptions {
if err := s.generateBuildOutput(buildOutputOption); err != nil {
return "", nil, false, err
}

View File

@ -1,6 +1,6 @@
//go:build freebsd && cgo
package chroot
package pty
// #include <fcntl.h>
// #include <stdlib.h>
@ -37,7 +37,9 @@ func unlockpt(fd int) error {
return nil
}
func getPtyDescriptors() (int, int, error) {
// GetPtyDescriptors allocates a new pseudoterminal and returns the control and
// pseudoterminal file descriptors.
func GetPtyDescriptors() (int, int, error) {
// Create a pseudo-terminal and open the control side
controlFd, err := openpt()
if err != nil {

View File

@ -1,6 +1,6 @@
//go:build linux
package chroot
package pty
import (
"fmt"
@ -11,9 +11,11 @@ import (
"golang.org/x/sys/unix"
)
// Open a PTY using the /dev/ptmx device. The main advantage of using
// this instead of posix_openpt is that it avoids cgo.
func getPtyDescriptors() (int, int, error) {
// GetPtyDescriptors allocates a new pseudoterminal and returns the control and
// pseudoterminal file descriptors. This implementation uses the /dev/ptmx
// device. The main advantage of using this instead of posix_openpt is that it
// avoids cgo.
func GetPtyDescriptors() (int, int, error) {
// Create a pseudo-terminal -- open a copy of the master side.
controlFd, err := unix.Open("/dev/ptmx", os.O_RDWR, 0o600)
if err != nil {

View File

@ -0,0 +1,13 @@
//go:build !linux && !(freebsd && cgo)
package pty
import (
"errors"
)
// GetPtyDescriptors would allocate a new pseudoterminal and return the control and
// pseudoterminal file descriptors, if only it could.
func GetPtyDescriptors() (int, int, error) {
return -1, -1, errors.New("GetPtyDescriptors not supported on this platform")
}

View File

@ -6,6 +6,9 @@ const (
// external items which are downloaded for a build, typically a tarball
// being used as an additional build context.
BuildahExternalArtifactsDir = "buildah-external-artifacts"
// SourceDateEpochName is the name of the SOURCE_DATE_EPOCH environment
// variable when it's read from the environment by our main().
SourceDateEpochName = "SOURCE_DATE_EPOCH"
)
// StageMountDetails holds the Stage/Image mountpoint returned by StageExecutor

View File

@ -263,12 +263,16 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
timestamp = &t
}
if c.Flag("output").Changed {
buildOption, err := parse.GetBuildOutput(iopts.BuildOutput)
if err != nil {
return options, nil, nil, err
}
if buildOption.IsStdout {
iopts.Quiet = true
for _, buildOutput := range iopts.BuildOutputs {
// if any of these go to stdout, we need to avoid
// interspersing our random output in with it
buildOption, err := parse.GetBuildOutput(buildOutput)
if err != nil {
return options, nil, nil, err
}
if buildOption.IsStdout {
iopts.Quiet = true
}
}
}
var confidentialWorkloadOptions define.ConfidentialWorkloadOptions
@ -351,7 +355,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
Architecture: systemContext.ArchitectureChoice,
Args: args,
BlobDirectory: iopts.BlobCache,
BuildOutput: iopts.BuildOutput,
BuildOutputs: iopts.BuildOutputs,
CacheFrom: cacheFrom,
CacheTo: cacheTo,
CacheTTL: cacheTTL,

View File

@ -108,7 +108,7 @@ type BudResults struct {
SkipUnusedStages bool
Stdin bool
Tag []string
BuildOutput string
BuildOutputs []string
Target string
TLSVerify bool
Jobs int
@ -307,7 +307,7 @@ newer: only pull base and SBOM scanner images when newer images exist on the r
fs.StringArrayVar(&flags.SSH, "ssh", []string{}, "SSH agent socket or keys to expose to the build. (format: default|<id>[=<socket>|<key>[,<key>]])")
fs.BoolVar(&flags.Stdin, "stdin", false, "pass stdin into containers")
fs.StringArrayVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image")
fs.StringVarP(&flags.BuildOutput, "output", "o", "", "output destination (format: type=local,dest=path)")
fs.StringArrayVarP(&flags.BuildOutputs, "output", "o", nil, "output destination (format: type=local,dest=path)")
fs.StringVar(&flags.Target, "target", "", "set the target build stage to build")
fs.Int64Var(&flags.Timestamp, "timestamp", 0, "set created timestamp to the specified epoch seconds to allow for deterministic builds, defaults to current time")
fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")

View File

@ -23,6 +23,7 @@ import (
"github.com/containers/buildah/internal/sbom"
"github.com/containers/buildah/internal/tmpdir"
"github.com/containers/buildah/pkg/sshagent"
"github.com/containers/common/libnetwork/etchosts"
"github.com/containers/common/pkg/auth"
"github.com/containers/common/pkg/config"
"github.com/containers/common/pkg/parse"
@ -365,6 +366,9 @@ func validateExtraHost(val string) error {
if len(arr) != 2 || len(arr[0]) == 0 {
return fmt.Errorf("bad format for add-host: %q", val)
}
if arr[1] == etchosts.HostGateway {
return nil
}
if _, err := validateIPAddress(arr[1]); err != nil {
return fmt.Errorf("invalid IP address in add-host: %q", arr[1])
}
@ -704,7 +708,7 @@ func AuthConfig(creds string) (*types.DockerAuthConfig, error) {
// GetBuildOutput is responsible for parsing custom build output argument i.e `build --output` flag.
// Takes `buildOutput` as string and returns BuildOutputOption
func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) {
if len(buildOutput) == 1 && buildOutput == "-" {
if buildOutput == "-" {
// Feature parity with buildkit, output tar to stdout
// Read more here: https://docs.docker.com/engine/reference/commandline/build/#custom-build-outputs
return define.BuildOutputOption{
@ -723,56 +727,48 @@ func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) {
}
isDir := true
isStdout := false
typeSelected := false
pathSelected := false
path := ""
tokens := strings.Split(buildOutput, ",")
for _, option := range tokens {
arr := strings.SplitN(option, "=", 2)
if len(arr) != 2 {
typeSelected := ""
pathSelected := ""
for _, option := range strings.Split(buildOutput, ",") {
key, value, found := strings.Cut(option, "=")
if !found {
return define.BuildOutputOption{}, fmt.Errorf("invalid build output options %q, expected format key=value", buildOutput)
}
switch arr[0] {
switch key {
case "type":
if typeSelected {
return define.BuildOutputOption{}, fmt.Errorf("duplicate %q not supported", arr[0])
if typeSelected != "" {
return define.BuildOutputOption{}, fmt.Errorf("duplicate %q not supported", key)
}
typeSelected = true
switch arr[1] {
typeSelected = value
switch typeSelected {
case "local":
isDir = true
case "tar":
isDir = false
default:
return define.BuildOutputOption{}, fmt.Errorf("invalid type %q selected for build output options %q", arr[1], buildOutput)
return define.BuildOutputOption{}, fmt.Errorf("invalid type %q selected for build output options %q", value, buildOutput)
}
case "dest":
if pathSelected {
return define.BuildOutputOption{}, fmt.Errorf("duplicate %q not supported", arr[0])
if pathSelected != "" {
return define.BuildOutputOption{}, fmt.Errorf("duplicate %q not supported", key)
}
pathSelected = true
path = arr[1]
pathSelected = value
default:
return define.BuildOutputOption{}, fmt.Errorf("unrecognized key %q in build output option: %q", arr[0], buildOutput)
return define.BuildOutputOption{}, fmt.Errorf("unrecognized key %q in build output option: %q", key, buildOutput)
}
}
if !typeSelected || !pathSelected {
return define.BuildOutputOption{}, fmt.Errorf("invalid build output option %q, accepted keys are type and dest must be present", buildOutput)
if typeSelected == "" || pathSelected == "" {
return define.BuildOutputOption{}, fmt.Errorf(`invalid build output option %q, accepted keys are "type" and "dest" must be present`, buildOutput)
}
if path == "-" {
if pathSelected == "-" {
if isDir {
return define.BuildOutputOption{}, fmt.Errorf("invalid build output option %q, type=local and dest=- is not supported", buildOutput)
return define.BuildOutputOption{}, fmt.Errorf(`invalid build output option %q, "type=local" can not be used with "dest=-"`, buildOutput)
}
return define.BuildOutputOption{
Path: "",
IsDir: false,
IsStdout: true,
}, nil
}
return define.BuildOutputOption{Path: path, IsDir: isDir, IsStdout: isStdout}, nil
return define.BuildOutputOption{Path: pathSelected, IsDir: isDir, IsStdout: isStdout}, nil
}
// TeeType parses a string value and returns a TeeType

View File

@ -696,8 +696,9 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [
return 1, fmt.Errorf("parsing container state %q from %s: %w", string(stateOutput), runtime, err)
}
switch state.Status {
case "running":
case "stopped":
case specs.StateCreating, specs.StateCreated, specs.StateRunning:
// all fine
case specs.StateStopped:
atomic.StoreUint32(&stopped, 1)
default:
return 1, fmt.Errorf("container status unexpectedly changed to %q", state.Status)

View File

@ -543,6 +543,33 @@ rootless=%d
defer b.cleanupTempVolumes()
// Handle mount flags that request that the source locations for "bind" mountpoints be
// relabeled, and filter those flags out of the list of mount options we pass to the
// runtime.
for i := range spec.Mounts {
switch spec.Mounts[i].Type {
default:
continue
case "bind", "rbind":
// all good, keep going
}
zflag := ""
for _, opt := range spec.Mounts[i].Options {
if opt == "z" || opt == "Z" {
zflag = opt
}
}
if zflag == "" {
continue
}
spec.Mounts[i].Options = slices.DeleteFunc(spec.Mounts[i].Options, func(opt string) bool {
return opt == "z" || opt == "Z"
})
if err := relabel(spec.Mounts[i].Source, b.MountLabel, zflag == "z"); err != nil {
return fmt.Errorf("setting file label %q on %q: %w", b.MountLabel, spec.Mounts[i].Source, err)
}
}
switch isolation {
case define.IsolationOCI:
var moreCreateArgs []string
@ -1139,16 +1166,19 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
if err := relabel(host, mountLabel, true); err != nil {
return specs.Mount{}, err
}
options = slices.DeleteFunc(options, func(o string) bool { return o == "z" })
}
if foundZ {
if err := relabel(host, mountLabel, false); err != nil {
return specs.Mount{}, err
}
options = slices.DeleteFunc(options, func(o string) bool { return o == "Z" })
}
if foundU {
if err := chown.ChangeHostPathOwnership(host, true, idMaps.processUID, idMaps.processGID); err != nil {
return specs.Mount{}, err
}
options = slices.DeleteFunc(options, func(o string) bool { return o == "U" })
}
if foundO {
if (upperDir != "" && workDir == "") || (workDir != "" && upperDir == "") {

View File

@ -25,8 +25,8 @@ before_install:
- docker pull centos:7
- docker pull alpine
- docker pull registry.fedoraproject.org/fedora-minimal
- docker pull registry.fedoraproject.org/fedora-minimal:41-x86_64
- docker pull registry.fedoraproject.org/fedora-minimal:41-aarch64
- docker pull registry.fedoraproject.org/fedora-minimal:42-x86_64
- docker pull registry.fedoraproject.org/fedora-minimal:42-aarch64
- chmod -R go-w ./dockerclient/testdata
script:

View File

@ -106,8 +106,8 @@ docker rmi busybox; docker pull busybox
docker rmi alpine; docker pull alpine
docker rmi centos:7; docker pull centos:7
docker rmi registry.fedoraproject.org/fedora-minimal; docker pull registry.fedoraproject.org/fedora-minimal
docker rmi registry.fedoraproject.org/fedora-minimal:41-x86_64; docker pull registry.fedoraproject.org/fedora-minimal:41-x86_64
docker rmi registry.fedoraproject.org/fedora-minimal:41-aarch64; docker pull registry.fedoraproject.org/fedora-minimal:41-aarch64
docker rmi registry.fedoraproject.org/fedora-minimal:42-x86_64; docker pull registry.fedoraproject.org/fedora-minimal:42-x86_64
docker rmi registry.fedoraproject.org/fedora-minimal:42-aarch64; docker pull registry.fedoraproject.org/fedora-minimal:42-aarch64
chmod -R go-w ./dockerclient/testdata
go test ./dockerclient -tags conformance -timeout 30m
```

View File

@ -2,6 +2,7 @@ package imagebuilder
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
@ -9,6 +10,7 @@ import (
"os"
"path/filepath"
"runtime"
"slices"
"strconv"
"strings"
@ -288,6 +290,55 @@ type Stage struct {
}
func NewStages(node *parser.Node, b *Builder) (Stages, error) {
getStageFrom := func(stageIndex int, root *parser.Node) (from string, as string, err error) {
for _, child := range root.Children {
if !strings.EqualFold(child.Value, command.From) {
continue
}
if child.Next == nil {
return "", "", errors.New("FROM requires an argument")
}
if child.Next.Value == "" {
return "", "", errors.New("FROM requires a non-empty argument")
}
from = child.Next.Value
if name, ok := extractNameFromNode(child); ok {
as = name
}
return from, as, nil
}
return "", "", fmt.Errorf("stage %d requires a FROM instruction (%q)", stageIndex+1, root.Original)
}
argInstructionsInStages := make(map[string][]string)
setStageInheritedArgs := func(s *Stage) error {
from, as, err := getStageFrom(s.Position, s.Node)
if err != nil {
return err
}
inheritedArgs := argInstructionsInStages[from]
thisStageArgs := slices.Clone(inheritedArgs)
for _, child := range s.Node.Children {
if !strings.EqualFold(child.Value, command.Arg) {
continue
}
if child.Next == nil {
return errors.New("ARG requires an argument")
}
if child.Next.Value == "" {
return errors.New("ARG requires a non-empty argument")
}
next := child.Next
for next != nil {
thisStageArgs = append(thisStageArgs, next.Value)
next = next.Next
}
}
if as != "" {
argInstructionsInStages[as] = thisStageArgs
}
argInstructionsInStages[strconv.Itoa(s.Position)] = thisStageArgs
return arg(s.Builder, inheritedArgs, nil, nil, "", nil)
}
var stages Stages
var headingArgs []string
if err := b.extractHeadingArgsFromNode(node); err != nil {
@ -297,8 +348,8 @@ func NewStages(node *parser.Node, b *Builder) (Stages, error) {
headingArgs = append(headingArgs, k)
}
for i, root := range SplitBy(node, command.From) {
name, _ := extractNameFromNode(root.Children[0])
if len(name) == 0 {
name, hasName := extractNameFromNode(root.Children[0])
if !hasName {
name = strconv.Itoa(i)
}
filteredUserArgs := make(map[string]string)
@ -317,12 +368,16 @@ func NewStages(node *parser.Node, b *Builder) (Stages, error) {
if err != nil {
return nil, err
}
stages = append(stages, Stage{
stage := Stage{
Position: i,
Name: processedName,
Builder: b.builderForStage(headingArgs),
Node: root,
})
}
if err := setStageInheritedArgs(&stage); err != nil {
return nil, err
}
stages = append(stages, stage)
}
return stages, nil
}

View File

@ -12,7 +12,7 @@
#
%global golang_version 1.19
%{!?version: %global version 1.2.16-dev}
%{!?version: %global version 1.2.16}
%{!?release: %global release 1}
%global package_name imagebuilder
%global product_name Container Image Builder

5
vendor/modules.txt vendored
View File

@ -111,7 +111,7 @@ github.com/containernetworking/cni/pkg/version
# github.com/containernetworking/plugins v1.7.1
## explicit; go 1.23.0
github.com/containernetworking/plugins/pkg/ns
# github.com/containers/buildah v1.40.1-0.20250523151639-b535d02d0ee1
# github.com/containers/buildah v1.40.1-0.20250604193037-b8d8cc375f30
## explicit; go 1.23.3
github.com/containers/buildah
github.com/containers/buildah/bind
@ -126,6 +126,7 @@ github.com/containers/buildah/internal/mkcw
github.com/containers/buildah/internal/mkcw/types
github.com/containers/buildah/internal/open
github.com/containers/buildah/internal/parse
github.com/containers/buildah/internal/pty
github.com/containers/buildah/internal/sbom
github.com/containers/buildah/internal/tmpdir
github.com/containers/buildah/internal/util
@ -835,7 +836,7 @@ github.com/opencontainers/runtime-tools/validate/capabilities
github.com/opencontainers/selinux/go-selinux
github.com/opencontainers/selinux/go-selinux/label
github.com/opencontainers/selinux/pkg/pwalkdir
# github.com/openshift/imagebuilder v1.2.16-0.20250224193648-e87e4e105fd8
# github.com/openshift/imagebuilder v1.2.16
## explicit; go 1.22.0
github.com/openshift/imagebuilder
github.com/openshift/imagebuilder/dockerfile/command