diff --git a/go.mod b/go.mod index 98cca6453e..e2d7498a60 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/checkpoint-restore/checkpointctl v1.3.0 github.com/checkpoint-restore/go-criu/v7 v7.2.0 github.com/containernetworking/plugins v1.7.1 - github.com/containers/buildah v1.40.1-0.20250604193037-b8d8cc375f30 + github.com/containers/buildah v1.41.0 github.com/containers/common v0.64.0 github.com/containers/conmon v2.0.20+incompatible github.com/containers/gvisor-tap-vsock v0.8.6 @@ -98,7 +98,7 @@ require ( github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/containernetworking/cni v1.3.0 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect - github.com/containers/luksy v0.0.0-20250408185436-4bb4c3f825be // indirect + github.com/containers/luksy v0.0.0-20250609192159-bc60f96d4194 // indirect github.com/coreos/go-oidc/v3 v3.14.1 // indirect github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect @@ -110,7 +110,7 @@ require ( github.com/ebitengine/purego v0.8.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/fsouza/go-dockerclient v1.12.0 // indirect + github.com/fsouza/go-dockerclient v1.12.1 // indirect github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -137,7 +137,7 @@ require ( github.com/mdlayher/socket v0.5.1 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect - github.com/moby/buildkit v0.22.0 // indirect + github.com/moby/buildkit v0.23.2 // indirect github.com/moby/go-archive v0.1.0 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/mountinfo v0.7.2 // indirect diff --git a/go.sum b/go.sum index fe0fbfa062..91a8c6d054 100644 --- a/go.sum +++ b/go.sum @@ -62,8 +62,8 @@ github.com/containernetworking/cni v1.3.0 h1:v6EpN8RznAZj9765HhXQrtXgX+ECGebEYEm github.com/containernetworking/cni v1.3.0/go.mod h1:Bs8glZjjFfGPHMw6hQu82RUgEPNGEaBb9KS5KtNMnJ4= github.com/containernetworking/plugins v1.7.1 h1:CNAR0jviDj6FS5Vg85NTgKWLDzZPfi/lj+VJfhMDTIs= github.com/containernetworking/plugins v1.7.1/go.mod h1:xuMdjuio+a1oVQsHKjr/mgzuZ24leAsqUYRnzGoXHy0= -github.com/containers/buildah v1.40.1-0.20250604193037-b8d8cc375f30 h1:kCt0fnVBvXY9J98pUDeUc0gHKrhRwaBTWWD3otLutCE= -github.com/containers/buildah v1.40.1-0.20250604193037-b8d8cc375f30/go.mod h1:QDecwvjrr+e0VD5GYv2dw7tsiqrz673r8B4rIYFP11Y= +github.com/containers/buildah v1.41.0 h1:GU350UeX6BkZrgCE3SB/d1Hu4xBaHUX07ayiJTvJD54= +github.com/containers/buildah v1.41.0/go.mod h1:1Ds26B4E4Z3NeLdi3xjjk8S72KVv2/xiFYYpwfFDgXI= github.com/containers/common v0.64.0 h1:Jdjq1e5tqrLov9tcAVc/AfvQCgX4krhcfDBgOXwrSfw= github.com/containers/common v0.64.0/go.mod h1:bq2UIiFP8vUJdgM+WN8E8jkD7wF69SpDRGzU7epJljg= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= @@ -76,8 +76,8 @@ github.com/containers/libhvee v0.10.0 h1:7VLv8keWZpHuGmWvyY4c1mVH5V1JYb1G78VC+8A github.com/containers/libhvee v0.10.0/go.mod h1:at0h8lRcK5jCKfQgU/e6Io0Mw12F36zRLjXVOXRoDTM= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= -github.com/containers/luksy v0.0.0-20250408185436-4bb4c3f825be h1:6E1dD/4g8Kq04jvI5BVqUIx2Z5Nl6+dqYuX9syTVZjI= -github.com/containers/luksy v0.0.0-20250408185436-4bb4c3f825be/go.mod h1:xY4YWmawqtrpLFV7mYSkHfFDwEO+6Fo0bT18Xnsg7M0= +github.com/containers/luksy v0.0.0-20250609192159-bc60f96d4194 h1:mm+XFgCXPx3pFFkFJ0CH6KgX1os5jfrD/T6S/6ht4FE= +github.com/containers/luksy v0.0.0-20250609192159-bc60f96d4194/go.mod h1:ab2XWZtMgybWBznSwo8BEPeIeSpspKh+wlnkq/UY2Uo= github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g= @@ -144,8 +144,8 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fsouza/go-dockerclient v1.12.0 h1:S2f2crEUbBNCFiF06kR/GvioEB8EMsb3Td/bpawD+aU= -github.com/fsouza/go-dockerclient v1.12.0/go.mod h1:YWUtjg8japrqD/80L98nTtCoxQFp5B5wrSsnyeB5lFo= +github.com/fsouza/go-dockerclient v1.12.1 h1:FMoLq+Zhv9Oz/rFmu6JWkImfr6CBgZOPcL+bHW4gS0o= +github.com/fsouza/go-dockerclient v1.12.1/go.mod h1:OqsgJJcpCwqyM3JED7TdfM9QVWS5O7jSYwXxYKmOooY= github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -213,8 +213,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E= github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -286,8 +286,8 @@ github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU= github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= -github.com/moby/buildkit v0.22.0 h1:aWN06w1YGSVN1XfeZbj2ZbgY+zi5xDAjEFI8Cy9fTjA= -github.com/moby/buildkit v0.22.0/go.mod h1:j4pP5hxiTWcz7xuTK2cyxQislHl/N2WWHzOy43DlLJw= +github.com/moby/buildkit v0.23.2 h1:gt/dkfcpgTXKx+B9I310kV767hhVqTvEyxGgI3mqsGQ= +github.com/moby/buildkit v0.23.2/go.mod h1:iEjAfPQKIuO+8y6OcInInvzqTMiKMbb2RdJz1K/95a0= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ= @@ -476,8 +476,8 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRND go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk= go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= @@ -488,8 +488,8 @@ go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5J go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index 09021936e5..6485e628f2 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,6 +2,119 @@ # Changelog +## v1.41.0 (2025-07-16) + + Bump to c/storage v1.59.0, c/image v5.36.0, ... c/common v0.64.0 + stage_executor: check platform of cache candidates + fix(deps): update module golang.org/x/crypto to v0.40.0 + fix(deps): update module golang.org/x/term to v0.33.0 + fix(deps): update module golang.org/x/sync to v0.16.0 + fix(deps): update module github.com/docker/docker to v28.3.2+incompatible + ADD/COPY --link support added + RPM/TMT: account for passwd binary moving to tests + buildah: move passwd command to tests + Update "bud with --cpu-shares" test, and rename it + Remove BUILDTAG btrfs_noversion as no longer effective + fix(deps): update module github.com/docker/docker to v28.3.1+incompatible + fix(deps): update module github.com/moby/buildkit to v0.23.2 + fix(deps): update github.com/containers/luksy digest to bc60f96 + chore(typos): fix typos + vendor: update c/{common,image,storage} to main + chore(deps): update module github.com/go-viper/mapstructure/v2 to v2.3.0 [security] + fix(deps): update module go.etcd.io/bbolt to v1.4.2 + Update Neil Smith's GitHub username in MAINTAINERS.md + Accept SOURCE_DATE_EPOCH as a build-arg + fix(deps): update module github.com/docker/docker to v28.3.0+incompatible + Add conditional release-checking system test + info,inspect: use the "formats" package to get some builtins + Use containers/common's formats package instead of our own + build, commit: set the OCI ...created annotation on OCI images + commit: exclude parents of mount targets, too + run: clean up parents of mount targets, too + tarFilterer: always flush after writing + Builder: drop the TempVolumes field + Update module github.com/moby/buildkit to v0.23.1 + Update module github.com/opencontainers/cgroups to v0.0.3 + Add CommitOptions.OmitLayerHistoryEntry, for skipping the new bits + Update module github.com/fsouza/go-dockerclient to v1.12.1 + conformance: use mirrored frontend and base images + commit-with-extra-files test: use $TEST_SCRATCH_DIR + fix(deps): update module github.com/moby/buildkit to v0.23.0 + "root fs only mounted once" test: accept root with only the rw option + Run with --device /dev/fuse and not just -v /dev/fuse:/dev/fuse + CI: pass $BUILDAH_RUNTIME through to in-container test runs + CI: ensure rootless groups aren't duplicates + build: add support for --inherit-annotations + CI: give the rootless test user some supplemental groups + bud,run: runc does not support keep-groups + Fix lint issue in TestCommitCompression + Add a unit test for compression types in OCI images + Support zstd compression in image commit + fix(deps): update module go.etcd.io/bbolt to v1.4.1 + rpm: build rpm with libsqlite3 tag + Makefile: use libsqlite3 build when possible + commit,build: --source-date-epoch/--timestamp omit identity label + docs: add --setopt "*.countme=false" to dnf examples + Builder.sbomScan(): don't break non-root scanners + build: --source-date-epoch/--timestamp use static hostname/cid + fix(deps): update module golang.org/x/crypto to v0.39.0 + fix(deps): update module golang.org/x/sync to v0.15.0 + build: add --source-date-epoch and --rewrite-timestamp flags + build,config: add support for --unsetannotation + commit: add --source-date-epoch and --rewrite-timestamp flags + fix(deps): update module github.com/openshift/imagebuilder to v1.2.16 + vendor latest c/{common,image,storage} + Tweak our handling of variant values, again + Don't BuildRequires: ostree-devel + parse, validateExtraHost: honor Hostgateway in format + remove static nix build + Ensure extendedGlob returns paths in lexical order + CI: run integration tests on Fedora with both crun and runc + buildah-build(1): clarify that --cgroup-parent affects RUN instructions + runUsingRuntime: use named constants for runtime states + Add a dummy "runtime" that just dumps its config file + run: handle relabeling bind mounts ourselves + fix link to Maintainers file + Update to avoid deprecated types + fix(deps): update module github.com/docker/docker to v28.2.0+incompatible + [skip-ci] Packit: cleanup redundant targets and unused anchors + [skip-ci] Packit: set fedora-all after F40 EOL + Use Fedora 42 instead of 41 in that one conformance test + [CI:DOCS] README.md: add openssf passing badge + fix(deps): update module github.com/moby/buildkit to v0.22.0 + copier: add Ensure and ConditionalRemove + [CI:DOCS] update a couple of lists in the build man page + build: allow --output to be specified multiple times + add: add a new --timestamp flag + tests/helpers.bash: add some helpers for parsing images + pkg/parse.GetBuildOutput(): use strings.Cut() + [skip-ci] Packit: Disable osh_diff_scan + internal/util.SetHas(): handle maps of [generic]generic + Refactor NewImageSource to add a manifest type abstraction (#5743) + [skip-ci] Packit: Ignore ELN and CentOS Stream jobs + imagebuildah: select most recent layer for cache + [CI:DOCS] Add CNCF roadmap, touchup other CNCF files + fix(deps): update module golang.org/x/crypto to v0.38.0 + Fix typo in comment (#6167) + Support label_users in buildah + fix(deps): update module golang.org/x/sync to v0.14.0 + fix(deps): update github.com/containers/luksy digest to 4bb4c3f + test/serve: fix a descriptor leak, add preliminary directory support + fix(deps): update module github.com/opencontainers/cgroups to v0.0.2 + fix(deps): update module github.com/moby/buildkit to v0.21.1 + Update to avoid deprecated types + fix(deps): update module github.com/opencontainers/runc to v1.3.0 + Only filter if containerImageRef.created != nil + Drop superfluous cast + Remove UID/GID scrubbing. + fix(deps): update module github.com/seccomp/libseccomp-golang to v0.11.0 + cirrus: turn prior fedora testing back on + chore(deps): update dependency containers/automation_images to v20250422 + fix(deps): update module github.com/docker/docker to v28.1.1+incompatible + Bump to Buildah v1.41.0-dev + CI vendor_task: pin to go 1.23.3 for now + fix(deps): update module github.com/containers/common to v0.63.0 + ## v1.40.0 (2025-04-17) Bump c/storage to v1.58.0, c/image v5.35.0, c/common v0.63.0 diff --git a/vendor/github.com/containers/buildah/MAINTAINERS.md b/vendor/github.com/containers/buildah/MAINTAINERS.md index a74b519df4..bf585df890 100644 --- a/vendor/github.com/containers/buildah/MAINTAINERS.md +++ b/vendor/github.com/containers/buildah/MAINTAINERS.md @@ -13,7 +13,7 @@ describes the project's governance and the Project Roles used below. | Paul Holzinger | [Luap99](https://github.com/Luap99) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) | | Giuseppe Scrivano | [giuseppe](https://github.com/giuseppe) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) | | Miloslav Trmač | [mtrmac](https://github.com/mtrmac) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) | -| Neil Smith | [Neil-Smith](https://github.com/Neil-Smith) | Community Manager | [Red Hat](https://github.com/RedHatOfficial) | +| Neil Smith | [actionmancan](https://github.com/actionmancan) | Community Manager | [Red Hat](https://github.com/RedHatOfficial) | | Tom Sweeney | [TomSweeneyRedHat](https://github.com/TomSweeneyRedHat/) | Maintainer and Community Manager | [Red Hat](https://github.com/RedHatOfficial) | | Lokesh Mandvekar | [lsm5](https://github.com/lsm5) | Maintainer | [Red Hat](https://github.com/RedHatOfficial) | | Aditya Rajan | [flouthoc](https://github.com/flouthoc) | Maintainer | [Red Hat](https://github.com/RedHatOfficial) | diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile index 32a09eced2..fd3d24ef6f 100644 --- a/vendor/github.com/containers/buildah/Makefile +++ b/vendor/github.com/containers/buildah/Makefile @@ -1,9 +1,9 @@ export GOPROXY=https://proxy.golang.org APPARMORTAG := $(shell hack/apparmor_tag.sh) -STORAGETAGS := $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./hack/libsubid_tag.sh) +STORAGETAGS := $(shell ./btrfs_installed_tag.sh) $(shell ./hack/libsubid_tag.sh) SECURITYTAGS ?= seccomp $(APPARMORTAG) -TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) $(shell ./hack/systemd_tag.sh) +TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) $(shell ./hack/systemd_tag.sh) $(shell ./hack/sqlite_tag.sh) ifeq ($(shell uname -s),FreeBSD) # FreeBSD needs CNI until netavark is supported TAGS += cni @@ -59,7 +59,7 @@ export GOLANGCI_LINT_VERSION := 2.1.0 # Note: Uses the -N -l go compiler options to disable compiler optimizations # and inlining. Using these build options allows you to subsequently # use source debugging tools like delve. -all: bin/buildah bin/imgtype bin/copy bin/inet bin/tutorial bin/dumpspec docs +all: bin/buildah bin/imgtype bin/copy bin/inet bin/tutorial bin/dumpspec bin/passwd docs bin/buildah: $(SOURCES) internal/mkcw/embed/entrypoint_amd64.gz $(GO_BUILD) $(BUILDAH_LDFLAGS) $(GO_GCFLAGS) "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah @@ -106,9 +106,12 @@ bin/tutorial: $(SOURCES) bin/inet: tests/inet/inet.go $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/inet/inet.go +bin/passwd: tests/passwd/passwd.go + $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/passwd/passwd.go + .PHONY: clean clean: - $(RM) -r bin tests/testreport/testreport + $(RM) -r bin tests/testreport/testreport tests/conformance/testdata/mount-targets/true $(MAKE) -C docs clean .PHONY: docs @@ -146,7 +149,7 @@ install.completions: install -m 644 contrib/completions/bash/buildah $(DESTDIR)/$(BASHINSTALLDIR)/buildah .PHONY: test-conformance -test-conformance: +test-conformance: tests/conformance/testdata/mount-targets/true $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -timeout 60m ./tests/conformance .PHONY: test-integration @@ -156,6 +159,9 @@ test-integration: install.tools tests/testreport/testreport: tests/testreport/testreport.go $(GO_BUILD) $(GO_LDFLAGS) "-linkmode external -extldflags -static" -tags "$(STORAGETAGS) $(SECURITYTAGS)" -o tests/testreport/testreport ./tests/testreport/testreport.go +tests/conformance/testdata/mount-targets/true: tests/conformance/testdata/mount-targets/true.go + $(GO_BUILD) $(GO_LDFLAGS) "-linkmode external -extldflags -static" -o tests/conformance/testdata/mount-targets/true tests/conformance/testdata/mount-targets/true.go + .PHONY: test-unit test-unit: tests/testreport/testreport $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd | grep -v chroot | grep -v copier) -timeout 45m @@ -166,8 +172,8 @@ test-unit: tests/testreport/testreport vendor-in-container: goversion=$(shell sed -e '/^go /!d' -e '/^go /s,.* ,,g' go.mod) ; \ - if test -d `go env GOCACHE` && test -w `go env GOCACHE` ; then \ - podman run --privileged --rm --env HOME=/root -v `go env GOCACHE`:/root/.cache/go-build --env GOCACHE=/root/.cache/go-build -v `pwd`:/src -w /src docker.io/library/golang:$$goversion make vendor ; \ + if test -d `$(GO) env GOCACHE` && test -w `$(GO) env GOCACHE` ; then \ + podman run --privileged --rm --env HOME=/root -v `$(GO) env GOCACHE`:/root/.cache/go-build --env GOCACHE=/root/.cache/go-build -v `pwd`:/src -w /src docker.io/library/golang:$$goversion make vendor ; \ else \ podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:$$goversion make vendor ; \ fi @@ -177,7 +183,7 @@ vendor: $(GO) mod tidy $(GO) mod vendor $(GO) mod verify - if test -n "$(strip $(shell go env GOTOOLCHAIN))"; then go mod edit -toolchain none ; fi + if test -n "$(strip $(shell $(GO) env GOTOOLCHAIN))"; then go mod edit -toolchain none ; fi .PHONY: lint lint: install.tools diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go index 9a937944da..2a88382cd0 100644 --- a/vendor/github.com/containers/buildah/add.go +++ b/vendor/github.com/containers/buildah/add.go @@ -33,6 +33,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/moby/sys/userns" digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) @@ -102,6 +103,15 @@ type AddAndCopyOptions struct { Parents bool // Timestamp is a timestamp to override on all content as it is being read. Timestamp *time.Time + // Link, when set to true, creates an independent layer containing the copied content + // that sits on top of existing layers. This layer can be cached and reused + // separately, and is not affected by filesystem changes from previous instructions. + Link bool + // BuildMetadata is consulted only when Link is true. Contains metadata used by + // imagebuildah for cache evaluation of linked layers (inheritLabels, unsetAnnotations, + // inheritAnnotations, newAnnotations). This field is internally managed and should + // not be set by external API users. + BuildMetadata string } // gitURLFragmentSuffix matches fragments to use as Git reference and build @@ -495,15 +505,75 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption } destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) - // Create the target directory if it doesn't exist yet. + var putRoot, putDir, stagingDir string + var createdDirs []string + var latestTimestamp time.Time + mkdirOptions := copier.MkdirOptions{ UIDMap: destUIDMap, GIDMap: destGIDMap, ChownNew: chownDirs, } - if err := copier.Mkdir(mountPoint, extractDirectory, mkdirOptions); err != nil { - return fmt.Errorf("ensuring target directory exists: %w", err) + + // If --link is specified, we create a staging directory to hold the content + // that will then become an independent layer + if options.Link { + containerDir, err := b.store.ContainerDirectory(b.ContainerID) + if err != nil { + return fmt.Errorf("getting container directory for %q: %w", b.ContainerID, err) + } + + stagingDir, err = os.MkdirTemp(containerDir, "link-stage-") + if err != nil { + return fmt.Errorf("creating staging directory for link %q: %w", b.ContainerID, err) + } + + putRoot = stagingDir + + cleanDest := filepath.Clean(destination) + + if strings.Contains(cleanDest, "..") { + return fmt.Errorf("invalid destination path %q: contains path traversal", destination) + } + + if renameTarget != "" { + putDir = filepath.Dir(filepath.Join(stagingDir, cleanDest)) + } else { + putDir = filepath.Join(stagingDir, cleanDest) + } + + putDirAbs, err := filepath.Abs(putDir) + if err != nil { + return fmt.Errorf("failed to resolve absolute path: %w", err) + } + + stagingDirAbs, err := filepath.Abs(stagingDir) + if err != nil { + return fmt.Errorf("failed to resolve staging directory absolute path: %w", err) + } + + if !strings.HasPrefix(putDirAbs, stagingDirAbs+string(os.PathSeparator)) && putDirAbs != stagingDirAbs { + return fmt.Errorf("destination path %q escapes staging directory", destination) + } + if err := copier.Mkdir(putRoot, putDirAbs, mkdirOptions); err != nil { + return fmt.Errorf("ensuring target directory exists: %w", err) + } + tempPath := putDir + for tempPath != stagingDir && tempPath != filepath.Dir(tempPath) { + if _, err := os.Stat(tempPath); err == nil { + createdDirs = append(createdDirs, tempPath) + } + tempPath = filepath.Dir(tempPath) + } + } else { + if err := copier.Mkdir(mountPoint, extractDirectory, mkdirOptions); err != nil { + return fmt.Errorf("ensuring target directory exists: %w", err) + } + + putRoot = extractDirectory + putDir = extractDirectory } + // Copy each source in turn. for _, src := range sources { var multiErr *multierror.Error @@ -580,7 +650,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption ChmodFiles: nil, IgnoreDevices: userns.RunningInUserNS(), } - putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher)) + putErr = copier.Put(putRoot, putDir, putOptions, io.TeeReader(pipeReader, hasher)) } hashCloser.Close() pipeReader.Close() @@ -658,6 +728,9 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption itemsCopied++ } st := localSourceStat.Results[globbed] + if options.Link && st.ModTime.After(latestTimestamp) { + latestTimestamp = st.ModTime + } pipeReader, pipeWriter := io.Pipe() wg.Add(1) go func() { @@ -741,12 +814,13 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption ChmodFiles: nil, IgnoreDevices: userns.RunningInUserNS(), } - putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher)) + putErr = copier.Put(putRoot, putDir, putOptions, io.TeeReader(pipeReader, hasher)) } hashCloser.Close() pipeReader.Close() wg.Done() }() + wg.Wait() if getErr != nil { getErr = fmt.Errorf("reading %q: %w", src, getErr) @@ -776,6 +850,58 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption return fmt.Errorf("no items matching glob %q copied (%d filtered out%s): %w", localSourceStat.Glob, len(localSourceStat.Globbed), excludesFile, syscall.ENOENT) } } + + if options.Link { + if !latestTimestamp.IsZero() { + for _, dir := range createdDirs { + if err := os.Chtimes(dir, latestTimestamp, latestTimestamp); err != nil { + logrus.Warnf("failed to set timestamp on directory %q: %v", dir, err) + } + } + } + var created time.Time + if options.Timestamp != nil { + created = *options.Timestamp + } else if !latestTimestamp.IsZero() { + created = latestTimestamp + } else { + created = time.Unix(0, 0).UTC() + } + + command := "ADD" + if !extract { + command = "COPY" + } + + contentType, digest := b.ContentDigester.Digest() + summary := contentType + if digest != "" { + if summary != "" { + summary = summary + ":" + } + summary = summary + digest.Encoded() + logrus.Debugf("added content from --link %s", summary) + } + + createdBy := "/bin/sh -c #(nop) " + command + " --link " + summary + " in " + destination + " " + options.BuildMetadata + history := v1.History{ + Created: &created, + CreatedBy: createdBy, + Comment: b.HistoryComment(), + } + + linkedLayer := LinkedLayer{ + History: history, + BlobPath: stagingDir, + } + + b.AppendedLinkedLayers = append(b.AppendedLinkedLayers, linkedLayer) + + if err := b.Save(); err != nil { + return fmt.Errorf("saving builder state after queuing linked layer: %w", err) + } + } + return nil } diff --git a/vendor/github.com/containers/buildah/btrfs_tag.sh b/vendor/github.com/containers/buildah/btrfs_tag.sh deleted file mode 100644 index ea753d4d02..0000000000 --- a/vendor/github.com/containers/buildah/btrfs_tag.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -${CPP:-${CC:-cc} -E} ${CPPFLAGS} - > /dev/null 2> /dev/null << EOF -#include -EOF -if test $? -ne 0 ; then - echo btrfs_noversion -fi diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index dcadc46f26..643b823b07 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -180,6 +180,7 @@ type Builder struct { // Format to use for a container image we eventually commit, when we do. Format string // TempVolumes are temporary mount points created during Run() calls. + // Deprecated: do not use. TempVolumes map[string]bool // ContentDigester counts the digest of all Add()ed content since it was // last restarted. diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt index 182a6afea5..f4a39e739a 100644 --- a/vendor/github.com/containers/buildah/changelog.txt +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -1,3 +1,115 @@ +- Changelog for v1.41.0 (2025-07-16) + * Bump to c/storage v1.59.0, c/image v5.36.0, ... c/common v0.64.0 + * stage_executor: check platform of cache candidates + * fix(deps): update module golang.org/x/crypto to v0.40.0 + * fix(deps): update module golang.org/x/term to v0.33.0 + * fix(deps): update module golang.org/x/sync to v0.16.0 + * fix(deps): update module github.com/docker/docker to v28.3.2+incompatible + * ADD/COPY --link support added + * RPM/TMT: account for passwd binary moving to tests + * buildah: move passwd command to tests + * Update "bud with --cpu-shares" test, and rename it + * Remove BUILDTAG btrfs_noversion as no longer effective + * fix(deps): update module github.com/docker/docker to v28.3.1+incompatible + * fix(deps): update module github.com/moby/buildkit to v0.23.2 + * fix(deps): update github.com/containers/luksy digest to bc60f96 + * chore(typos): fix typos + * vendor: update c/{common,image,storage} to main + * chore(deps): update module github.com/go-viper/mapstructure/v2 to v2.3.0 [security] + * fix(deps): update module go.etcd.io/bbolt to v1.4.2 + * Update Neil Smith's GitHub username in MAINTAINERS.md + * Accept SOURCE_DATE_EPOCH as a build-arg + * fix(deps): update module github.com/docker/docker to v28.3.0+incompatible + * Add conditional release-checking system test + * info,inspect: use the "formats" package to get some builtins + * Use containers/common's formats package instead of our own + * build, commit: set the OCI ...created annotation on OCI images + * commit: exclude parents of mount targets, too + * run: clean up parents of mount targets, too + * tarFilterer: always flush after writing + * Builder: drop the TempVolumes field + * Update module github.com/moby/buildkit to v0.23.1 + * Update module github.com/opencontainers/cgroups to v0.0.3 + * Add CommitOptions.OmitLayerHistoryEntry, for skipping the new bits + * Update module github.com/fsouza/go-dockerclient to v1.12.1 + * conformance: use mirrored frontend and base images + * commit-with-extra-files test: use $TEST_SCRATCH_DIR + * fix(deps): update module github.com/moby/buildkit to v0.23.0 + * "root fs only mounted once" test: accept root with only the rw option + * Run with --device /dev/fuse and not just -v /dev/fuse:/dev/fuse + * CI: pass $BUILDAH_RUNTIME through to in-container test runs + * CI: ensure rootless groups aren't duplicates + * build: add support for --inherit-annotations + * CI: give the rootless test user some supplemental groups + * bud,run: runc does not support keep-groups + * Fix lint issue in TestCommitCompression + * Add a unit test for compression types in OCI images + * Support zstd compression in image commit + * fix(deps): update module go.etcd.io/bbolt to v1.4.1 + * rpm: build rpm with libsqlite3 tag + * Makefile: use libsqlite3 build when possible + * commit,build: --source-date-epoch/--timestamp omit identity label + * docs: add --setopt "*.countme=false" to dnf examples + * Builder.sbomScan(): don't break non-root scanners + * build: --source-date-epoch/--timestamp use static hostname/cid + * fix(deps): update module golang.org/x/crypto to v0.39.0 + * fix(deps): update module golang.org/x/sync to v0.15.0 + * build: add --source-date-epoch and --rewrite-timestamp flags + * build,config: add support for --unsetannotation + * commit: add --source-date-epoch and --rewrite-timestamp flags + * fix(deps): update module github.com/openshift/imagebuilder to v1.2.16 + * vendor latest c/{common,image,storage} + * Tweak our handling of variant values, again + * Don't BuildRequires: ostree-devel + * parse, validateExtraHost: honor Hostgateway in format + * remove static nix build + * Ensure extendedGlob returns paths in lexical order + * CI: run integration tests on Fedora with both crun and runc + * buildah-build(1): clarify that --cgroup-parent affects RUN instructions + * runUsingRuntime: use named constants for runtime states + * Add a dummy "runtime" that just dumps its config file + * run: handle relabeling bind mounts ourselves + * fix link to Maintainers file + * Update to avoid deprecated types + * fix(deps): update module github.com/docker/docker to v28.2.0+incompatible + * [skip-ci] Packit: cleanup redundant targets and unused anchors + * [skip-ci] Packit: set fedora-all after F40 EOL + * Use Fedora 42 instead of 41 in that one conformance test + * [CI:DOCS] README.md: add openssf passing badge + * fix(deps): update module github.com/moby/buildkit to v0.22.0 + * copier: add Ensure and ConditionalRemove + * [CI:DOCS] update a couple of lists in the build man page + * build: allow --output to be specified multiple times + * add: add a new --timestamp flag + * tests/helpers.bash: add some helpers for parsing images + * pkg/parse.GetBuildOutput(): use strings.Cut() + * [skip-ci] Packit: Disable osh_diff_scan + * internal/util.SetHas(): handle maps of [generic]generic + * Refactor NewImageSource to add a manifest type abstraction (#5743) + * [skip-ci] Packit: Ignore ELN and CentOS Stream jobs + * imagebuildah: select most recent layer for cache + * [CI:DOCS] Add CNCF roadmap, touchup other CNCF files + * fix(deps): update module golang.org/x/crypto to v0.38.0 + * Fix typo in comment (#6167) + * Support label_users in buildah + * fix(deps): update module golang.org/x/sync to v0.14.0 + * fix(deps): update github.com/containers/luksy digest to 4bb4c3f + * test/serve: fix a descriptor leak, add preliminary directory support + * fix(deps): update module github.com/opencontainers/cgroups to v0.0.2 + * fix(deps): update module github.com/moby/buildkit to v0.21.1 + * Update to avoid deprecated types + * fix(deps): update module github.com/opencontainers/runc to v1.3.0 + * Only filter if containerImageRef.created != nil + * Drop superfluous cast + * Remove UID/GID scrubbing. + * fix(deps): update module github.com/seccomp/libseccomp-golang to v0.11.0 + * cirrus: turn prior fedora testing back on + * chore(deps): update dependency containers/automation_images to v20250422 + * fix(deps): update module github.com/docker/docker to v28.1.1+incompatible + * Bump to Buildah v1.41.0-dev + * CI vendor_task: pin to go 1.23.3 for now + * fix(deps): update module github.com/containers/common to v0.63.0 + - Changelog for v1.40.0 (2025-04-17) * Bump c/storage to v1.58.0, c/image v5.35.0, c/common v0.63.0 * fix(deps): update module github.com/docker/docker to v28.1.0+incompatible diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go index 1905d4390b..c78c012043 100644 --- a/vendor/github.com/containers/buildah/commit.go +++ b/vendor/github.com/containers/buildah/commit.go @@ -30,9 +30,10 @@ import ( ) const ( - // BuilderIdentityAnnotation is the name of the annotation key containing - // the name and version of the producer of the image stored as an - // annotation on commit. + // BuilderIdentityAnnotation is the name of the label which will be set + // to contain the name and version of the producer of the image at + // commit-time. (N.B. yes, the constant's name includes "Annotation", + // but it's added as a label.) BuilderIdentityAnnotation = "io.buildah.version" ) @@ -93,9 +94,18 @@ type CommitOptions struct { // EmptyLayer tells the builder to omit the diff for the working // container. EmptyLayer bool + // OmitLayerHistoryEntry tells the builder to omit the diff for the + // working container and to not add an entry in the commit history. By + // default, the rest of the image's history is preserved, subject to + // the OmitHistory setting. N.B.: setting this flag, without any + // PrependedEmptyLayers, AppendedEmptyLayers, PrependedLinkedLayers, or + // AppendedLinkedLayers will more or less produce a copy of the base + // image. + OmitLayerHistoryEntry bool // OmitTimestamp forces epoch 0 as created timestamp to allow for // deterministic, content-addressable builds. - // Deprecated use HistoryTimestamp instead. + // Deprecated: use HistoryTimestamp or SourceDateEpoch (possibly with + // RewriteTimestamp) instead. OmitTimestamp bool // SignBy is the fingerprint of a GPG key to use for signing the image. SignBy string @@ -121,7 +131,8 @@ type CommitOptions struct { // contents of a rootfs. ConfidentialWorkloadOptions ConfidentialWorkloadOptions // UnsetEnvs is a list of environments to not add to final image. - // Deprecated: use UnsetEnv() before committing instead. + // Deprecated: use UnsetEnv() before committing, or set OverrideChanges + // instead. UnsetEnvs []string // OverrideConfig is an optional Schema2Config which can override parts // of the working container's configuration for the image that is being @@ -145,6 +156,11 @@ type CommitOptions struct { // the image in Docker format. Newer BuildKit-based builds don't set // this field. CompatSetParent types.OptionalBool + // CompatLayerOmissions causes the "/dev", "/proc", and "/sys" + // directories to be omitted from the layer diff and related output, as + // the classic builder did. Newer BuildKit-based builds include them + // in the built image by default. + CompatLayerOmissions types.OptionalBool // PrependedLinkedLayers and AppendedLinkedLayers are combinations of // history entries and locations of either directory trees (if // directories, per os.Stat()) or uncompressed layer blobs which should @@ -153,6 +169,15 @@ type CommitOptions struct { // corresponding members in the Builder object, in the committed image // is not guaranteed. PrependedLinkedLayers, AppendedLinkedLayers []LinkedLayer + // UnsetAnnotations is a list of annotations (names only) to withhold + // from the image. + UnsetAnnotations []string + // Annotations is a list of annotations (in the form "key=value") to + // add to the image. + Annotations []string + // CreatedAnnotation controls whether or not an "org.opencontainers.image.created" + // annotation is present in the output image. + CreatedAnnotation types.OptionalBool } // LinkedLayer combines a history entry with the location of either a directory @@ -300,7 +325,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options // work twice. if options.OmitTimestamp { if options.HistoryTimestamp != nil { - return imgID, nil, "", fmt.Errorf("OmitTimestamp ahd HistoryTimestamp can not be used together") + return imgID, nil, "", fmt.Errorf("OmitTimestamp and HistoryTimestamp can not be used together") } timestamp := time.Unix(0, 0).UTC() options.HistoryTimestamp = ×tamp diff --git a/vendor/github.com/containers/buildah/define/build.go b/vendor/github.com/containers/buildah/define/build.go index e3a6a6d243..7ca9d0d995 100644 --- a/vendor/github.com/containers/buildah/define/build.go +++ b/vendor/github.com/containers/buildah/define/build.go @@ -49,7 +49,8 @@ type CommonBuildOptions struct { CPUSetMems string // HTTPProxy determines whether *_proxy env vars from the build host are passed into the container. HTTPProxy bool - // IdentityLabel if set ensures that default `io.buildah.version` label is not applied to build image. + // IdentityLabel if set controls whether or not a `io.buildah.version` label is added to the built image. + // Setting this to false does not clear the label if it would be inherited from the base image. IdentityLabel types.OptionalBool // Memory is the upper limit (in bytes) on how much memory running containers can use. Memory int64 @@ -242,6 +243,9 @@ type BuildOptions struct { // InheritLabels controls whether or not built images will retain the labels // which were set in their base images InheritLabels types.OptionalBool + // InheritAnnotations controls whether or not built images will retain the annotations + // which were set in their base images + InheritAnnotations types.OptionalBool // AddCapabilities is a list of capabilities to add to the default set when // handling RUN instructions. AddCapabilities []string @@ -295,9 +299,23 @@ type BuildOptions struct { SignBy string // Architecture specifies the target architecture of the image to be built. Architecture string - // Timestamp sets the created timestamp to the specified time, allowing - // for deterministic, content-addressable builds. + // Timestamp specifies a timestamp to use for the image's created-on + // date, the corresponding field in new history entries, the timestamps + // to set on contents in new layer diffs, and the timestamps to set on + // contents written as specified in the BuildOutput field. If left + // unset, the current time is used for the configuration and manifest, + // and layer contents are recorded as-is. Timestamp *time.Time + // SourceDateEpoch specifies a timestamp to use for the image's + // created-on date and the corresponding field in new history entries, + // and any content written as specified in the BuildOutput field. If + // left unset, the current time is used for the configuration and + // manifest, and layer and BuildOutput contents retain their original + // timestamps. + SourceDateEpoch *time.Time + // RewriteTimestamp, if set, forces timestamps in generated layers to + // not be later than the SourceDateEpoch, if it is also set. + RewriteTimestamp bool // OS is the specifies the operating system of the image to be built. OS string // MaxPullPushRetries is the maximum number of attempts we'll make to pull or push any one @@ -340,6 +358,8 @@ type BuildOptions struct { UnsetEnvs []string // UnsetLabels is a list of labels to not add to final image from base image. UnsetLabels []string + // UnsetAnnotations is a list of annotations to not add to final image from base image. + UnsetAnnotations []string // Envs is a list of environment variables to set in the final image. Envs []string // OSFeatures specifies operating system features the image requires. @@ -389,6 +409,13 @@ type BuildOptions struct { // provides a minimal initial configuration with a working directory // set in it. CompatScratchConfig types.OptionalBool + // CompatLayerOmissions causes the "/dev", "/proc", and "/sys" + // directories to be omitted from the image and related output. Newer + // BuildKit-based builds include them in the built image by default. + CompatLayerOmissions types.OptionalBool // NoPivotRoot inhibits the usage of pivot_root when setting up the rootfs NoPivotRoot bool + // CreatedAnnotation controls whether or not an "org.opencontainers.image.created" + // annotation is present in the output image. + CreatedAnnotation types.OptionalBool } diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go index e712e3c2d8..6bb2cef00a 100644 --- a/vendor/github.com/containers/buildah/define/types.go +++ b/vendor/github.com/containers/buildah/define/types.go @@ -29,7 +29,7 @@ const ( // identify working containers. Package = "buildah" // Version for the Package. Also used by .packit.sh for Packit builds. - Version = "1.41.0-dev" + Version = "1.41.0" // DefaultRuntime if containers.conf fails. DefaultRuntime = "runc" diff --git a/vendor/github.com/containers/buildah/digester.go b/vendor/github.com/containers/buildah/digester.go index caa85f930d..65e115699c 100644 --- a/vendor/github.com/containers/buildah/digester.go +++ b/vendor/github.com/containers/buildah/digester.go @@ -61,7 +61,14 @@ type tarFilterer struct { } func (t *tarFilterer) Write(p []byte) (int, error) { - return t.pipeWriter.Write(p) + n, err := t.pipeWriter.Write(p) + if err != nil { + t.closedLock.Lock() + closed := t.closed + t.closedLock.Unlock() + err = fmt.Errorf("writing to tar filter pipe (closed=%v,err=%v): %w", closed, t.err, err) + } + return n, err } func (t *tarFilterer) Close() error { @@ -108,9 +115,8 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk skip, replaceContents, replacementContents = filter(hdr) } if !skip { - err = tarWriter.WriteHeader(hdr) - if err != nil { - err = fmt.Errorf("filtering tar header for %q: %w", hdr.Name, err) + if err = tarWriter.WriteHeader(hdr); err != nil { + err = fmt.Errorf("writing tar header for %q: %w", hdr.Name, err) break } if hdr.Size != 0 { @@ -130,6 +136,10 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk break } } + if err = tarWriter.Flush(); err != nil { + err = fmt.Errorf("flushing tar item padding for %q: %w", hdr.Name, err) + break + } } hdr, err = tarReader.Next() } diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index f9630c938a..a27ec27707 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -10,6 +10,7 @@ import ( "io" "maps" "os" + "path" "path/filepath" "slices" "strings" @@ -46,15 +47,25 @@ const ( // manifest, suitable for specifying as a value of the // PreferredManifestType member of a CommitOptions structure. Dockerv2ImageManifest = define.Dockerv2ImageManifest + // containerExcludesDir is the subdirectory of the container data + // directory where we drop exclusions + containerExcludesDir = "commit-excludes" + // containerExcludesSubstring is the suffix of files under + // $cdir/containerExcludesDir which should be ignored, as they only + // exist because we use CreateTemp() to create uniquely-named files, + // but we don't want to try to use their contents until after they've + // been written to + containerExcludesSubstring = ".tmp" ) // ExtractRootfsOptions is consumed by ExtractRootfs() which allows users to // control whether various information like the like setuid and setgid bits and // xattrs are preserved when extracting file system objects. type ExtractRootfsOptions struct { - StripSetuidBit bool // strip the setuid bit off of items being extracted. - StripSetgidBit bool // strip the setgid bit off of items being extracted. - StripXattrs bool // don't record extended attributes of items being extracted. + StripSetuidBit bool // strip the setuid bit off of items being extracted. + StripSetgidBit bool // strip the setgid bit off of items being extracted. + StripXattrs bool // don't record extended attributes of items being extracted. + ForceTimestamp *time.Time // force timestamps in output content } type containerImageRef struct { @@ -80,6 +91,7 @@ type containerImageRef struct { confidentialWorkload ConfidentialWorkloadOptions omitHistory bool emptyLayer bool + omitLayerHistoryEntry bool idMappingOptions *define.IDMappingOptions parent string blobDirectory string @@ -91,6 +103,10 @@ type containerImageRef struct { overrideConfig *manifest.Schema2Config extraImageContent map[string]string compatSetParent types.OptionalBool + layerExclusions []copier.ConditionalRemovePath + unsetAnnotations []string + setAnnotations []string + createdAnnotation types.OptionalBool } type blobLayerInfo struct { @@ -226,13 +242,14 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo pipeReader, pipeWriter := io.Pipe() errChan := make(chan error, 1) go func() { + defer pipeWriter.Close() defer close(errChan) if len(i.extraImageContent) > 0 { // Abuse the tar format and _prepend_ the synthesized // data items to the archive we'll get from // copier.Get(), in a way that looks right to a reader // as long as we DON'T Close() the tar Writer. - filename, _, _, err := i.makeExtraImageContentDiff(false) + filename, _, _, err := i.makeExtraImageContentDiff(false, opts.ForceTimestamp) if err != nil { errChan <- fmt.Errorf("creating part of archive with extra content: %w", err) return @@ -257,10 +274,10 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo StripSetuidBit: opts.StripSetuidBit, StripSetgidBit: opts.StripSetgidBit, StripXattrs: opts.StripXattrs, + Timestamp: opts.ForceTimestamp, } err := copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter) errChan <- err - pipeWriter.Close() }() return ioutils.NewReadCloserWrapper(pipeReader, func() error { if err = pipeReader.Close(); err != nil { @@ -306,12 +323,21 @@ func (i *containerImageRef) newDockerSchema2ManifestBuilder() (manifestBuilder, if err := json.Unmarshal(i.dconfig, &dimage); err != nil { return nil, err } + // Suppress the hostname and domainname if we're running with the + // equivalent of either --timestamp or --source-date-epoch. + if i.created != nil { + dimage.Config.Hostname = "sandbox" + dimage.Config.Domainname = "" + } // Set the parent, but only if we want to be compatible with "classic" docker build. if i.compatSetParent == types.OptionalBoolTrue { dimage.Parent = docker.ID(i.parent) } // Set the container ID and containerConfig in the docker format. dimage.Container = i.containerID + if i.created != nil { + dimage.Container = "" + } if dimage.Config != nil { dimage.ContainerConfig = *dimage.Config } @@ -440,14 +466,16 @@ func (mb *dockerSchema2ManifestBuilder) buildHistory(extraImageContentDiff strin if mb.i.created != nil { created = (*mb.i.created).UTC() } - dnews := docker.V2S2History{ - Created: created, - CreatedBy: mb.i.createdBy, - Author: mb.dimage.Author, - EmptyLayer: mb.i.emptyLayer, - Comment: mb.i.historyComment, + if !mb.i.omitLayerHistoryEntry { + dnews := docker.V2S2History{ + Created: created, + CreatedBy: mb.i.createdBy, + Author: mb.dimage.Author, + EmptyLayer: mb.i.emptyLayer, + Comment: mb.i.historyComment, + } + mb.dimage.History = append(mb.dimage.History, dnews) } - mb.dimage.History = append(mb.dimage.History, dnews) // Add a history entry for the extra image content if we added a layer for it. // This diff was added to the list of layers before API-supplied layers that // needed to be appended, and we need to keep the order of history entries for @@ -467,16 +495,19 @@ func (mb *dockerSchema2ManifestBuilder) buildHistory(extraImageContentDiff strin appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer) } - // Assemble a comment indicating which base image was used, if it wasn't - // just an image ID, and add it to the first history entry we added. - var fromComment string - if strings.Contains(mb.i.parent, mb.i.fromImageID) && mb.i.fromImageName != "" && !strings.HasPrefix(mb.i.fromImageID, mb.i.fromImageName) { - if mb.dimage.History[baseImageHistoryLen].Comment != "" { - fromComment = " " + // Assemble a comment indicating which base image was used, if it + // wasn't just an image ID, and add it to the first history entry we + // added, if we indeed added one. + if len(mb.dimage.History) > baseImageHistoryLen { + var fromComment string + if strings.Contains(mb.i.parent, mb.i.fromImageID) && mb.i.fromImageName != "" && !strings.HasPrefix(mb.i.fromImageID, mb.i.fromImageName) { + if mb.dimage.History[baseImageHistoryLen].Comment != "" { + fromComment = " " + } + fromComment += "FROM " + mb.i.fromImageName } - fromComment += "FROM " + mb.i.fromImageName + mb.dimage.History[baseImageHistoryLen].Comment += fromComment } - mb.dimage.History[baseImageHistoryLen].Comment += fromComment // Confidence check that we didn't just create a mismatch between non-empty layers in the // history and the number of diffIDs. Only applicable if the base image (if there was @@ -562,6 +593,23 @@ func (i *containerImageRef) newOCIManifestBuilder() (manifestBuilder, error) { } // Return partial manifest. The Layers lists will be populated later. + annotations := make(map[string]string) + maps.Copy(annotations, i.annotations) + switch i.createdAnnotation { + case types.OptionalBoolFalse: + delete(annotations, v1.AnnotationCreated) + default: + fallthrough + case types.OptionalBoolTrue, types.OptionalBoolUndefined: + annotations[v1.AnnotationCreated] = created.UTC().Format(time.RFC3339Nano) + } + for _, k := range i.unsetAnnotations { + delete(annotations, k) + } + for _, kv := range i.setAnnotations { + k, v, _ := strings.Cut(kv, "=") + annotations[k] = v + } return &ociManifestBuilder{ i: i, // The default layer media type assumes no compression. @@ -576,7 +624,7 @@ func (i *containerImageRef) newOCIManifestBuilder() (manifestBuilder, error) { MediaType: v1.MediaTypeImageConfig, }, Layers: []v1.Descriptor{}, - Annotations: i.annotations, + Annotations: annotations, }, }, nil } @@ -610,9 +658,8 @@ func (mb *ociManifestBuilder) computeLayerMIMEType(what string, layerCompression // how to decompress them, we can't try to compress layers with xz. return errors.New("media type for xz-compressed layers is not defined") case archive.Zstd: - // Until the image specs define a media type for zstd-compressed layers, even if we know - // how to decompress them, we can't try to compress layers with zstd. - return errors.New("media type for zstd-compressed layers is not defined") + omediaType = v1.MediaTypeImageLayerZstd + logrus.Debugf("compressing %s with zstd", what) default: logrus.Debugf("compressing %s with unknown compressor(?)", what) } @@ -656,14 +703,16 @@ func (mb *ociManifestBuilder) buildHistory(extraImageContentDiff string, extraIm if mb.i.created != nil { created = (*mb.i.created).UTC() } - onews := v1.History{ - Created: &created, - CreatedBy: mb.i.createdBy, - Author: mb.oimage.Author, - EmptyLayer: mb.i.emptyLayer, - Comment: mb.i.historyComment, + if !mb.i.omitLayerHistoryEntry { + onews := v1.History{ + Created: &created, + CreatedBy: mb.i.createdBy, + Author: mb.oimage.Author, + EmptyLayer: mb.i.emptyLayer, + Comment: mb.i.historyComment, + } + mb.oimage.History = append(mb.oimage.History, onews) } - mb.oimage.History = append(mb.oimage.History, onews) // Add a history entry for the extra image content if we added a layer for it. // This diff was added to the list of layers before API-supplied layers that // needed to be appended, and we need to keep the order of history entries for @@ -683,16 +732,19 @@ func (mb *ociManifestBuilder) buildHistory(extraImageContentDiff string, extraIm appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer) } - // Assemble a comment indicating which base image was used, if it wasn't - // just an image ID, and add it to the first history entry we added. - var fromComment string - if strings.Contains(mb.i.parent, mb.i.fromImageID) && mb.i.fromImageName != "" && !strings.HasPrefix(mb.i.fromImageID, mb.i.fromImageName) { - if mb.oimage.History[baseImageHistoryLen].Comment != "" { - fromComment = " " + // Assemble a comment indicating which base image was used, if it + // wasn't just an image ID, and add it to the first history entry we + // added, if we indeed added one. + if len(mb.oimage.History) > baseImageHistoryLen { + var fromComment string + if strings.Contains(mb.i.parent, mb.i.fromImageID) && mb.i.fromImageName != "" && !strings.HasPrefix(mb.i.fromImageID, mb.i.fromImageName) { + if mb.oimage.History[baseImageHistoryLen].Comment != "" { + fromComment = " " + } + fromComment += "FROM " + mb.i.fromImageName } - fromComment += "FROM " + mb.i.fromImageName + mb.oimage.History[baseImageHistoryLen].Comment += fromComment } - mb.oimage.History[baseImageHistoryLen].Comment += fromComment // Confidence check that we didn't just create a mismatch between non-empty layers in the // history and the number of diffIDs. Only applicable if the base image (if there was @@ -849,7 +901,7 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon layerUncompressedSize = apiLayers[apiLayerIndex].size } else if layerID == synthesizedLayerID { // layer diff consisting of extra files to synthesize into a layer - diffFilename, digest, size, err := i.makeExtraImageContentDiff(true) + diffFilename, digest, size, err := i.makeExtraImageContentDiff(true, nil) if err != nil { return nil, fmt.Errorf("unable to generate layer for additional content: %w", err) } @@ -943,8 +995,12 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon counter := ioutils.NewWriteCounter(layerFile) var destHasher digest.Digester var multiWriter io.Writer - // Avoid rehashing when we do not compress. - if i.compression != archive.Uncompressed { + // Avoid rehashing when we compress or mess with the layer contents somehow. + // At this point, there are multiple ways that can happen. + diffBeingAltered := i.compression != archive.Uncompressed + diffBeingAltered = diffBeingAltered || i.layerModTime != nil || i.layerLatestModTime != nil + diffBeingAltered = diffBeingAltered || len(i.layerExclusions) != 0 + if diffBeingAltered { destHasher = digest.Canonical.Digester() multiWriter = io.MultiWriter(counter, destHasher.Hash()) } else { @@ -963,19 +1019,25 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon // Use specified timestamps in the layer, if we're doing that for history // entries. nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close) - writeCloser = makeFilteredLayerWriteCloser(nestedWriteCloser, i.layerModTime, i.layerLatestModTime) + writeCloser = makeFilteredLayerWriteCloser(nestedWriteCloser, i.layerModTime, i.layerLatestModTime, i.layerExclusions) writer = writeCloser // Okay, copy from the raw diff through the filter, compressor, and counter and // digesters. size, err := io.Copy(writer, rc) + if err != nil { + writeCloser.Close() + layerFile.Close() + rc.Close() + return nil, fmt.Errorf("storing %s to file: on copy: %w", what, err) + } if err := writeCloser.Close(); err != nil { layerFile.Close() rc.Close() - return nil, fmt.Errorf("storing %s to file: %w on pipe close", what, err) + return nil, fmt.Errorf("storing %s to file: on pipe close: %w", what, err) } if err := layerFile.Close(); err != nil { rc.Close() - return nil, fmt.Errorf("storing %s to file: %w on file close", what, err) + return nil, fmt.Errorf("storing %s to file: on file close: %w", what, err) } rc.Close() @@ -989,12 +1051,12 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon if err != nil { return nil, fmt.Errorf("storing %s to file: %w", what, err) } - if i.compression == archive.Uncompressed { + if diffBeingAltered { + size = counter.Count + } else { if size != counter.Count { return nil, fmt.Errorf("storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count) } - } else { - size = counter.Count } logrus.Debugf("%s size is %d bytes, uncompressed digest %s, possibly-compressed digest %s", what, size, srcHasher.Digest().String(), destHasher.Digest().String()) // Rename the layer so that we can more easily find it by digest later. @@ -1152,7 +1214,7 @@ func (i *containerImageSource) GetBlob(_ context.Context, blob types.BlobInfo, _ // makeExtraImageContentDiff creates an archive file containing the contents of // files named in i.extraImageContent. The footer that marks the end of the // archive may be omitted. -func (i *containerImageRef) makeExtraImageContentDiff(includeFooter bool) (_ string, _ digest.Digest, _ int64, retErr error) { +func (i *containerImageRef) makeExtraImageContentDiff(includeFooter bool, timestamp *time.Time) (_ string, _ digest.Digest, _ int64, retErr error) { cdir, err := i.store.ContainerDirectory(i.containerID) if err != nil { return "", "", -1, err @@ -1170,9 +1232,12 @@ func (i *containerImageRef) makeExtraImageContentDiff(includeFooter bool) (_ str digester := digest.Canonical.Digester() counter := ioutils.NewWriteCounter(digester.Hash()) tw := tar.NewWriter(io.MultiWriter(diff, counter)) - created := time.Now() - if i.created != nil { - created = *i.created + if timestamp == nil { + now := time.Now() + timestamp = &now + if i.created != nil { + timestamp = i.created + } } for path, contents := range i.extraImageContent { if err := func() error { @@ -1189,26 +1254,31 @@ func (i *containerImageRef) makeExtraImageContentDiff(includeFooter bool) (_ str Name: path, Typeflag: tar.TypeReg, Mode: 0o644, - ModTime: created, + ModTime: *timestamp, Size: st.Size(), }); err != nil { - return err + return fmt.Errorf("writing header for %q: %w", path, err) } if _, err := io.Copy(tw, content); err != nil { return fmt.Errorf("writing content for %q: %w", path, err) } if err := tw.Flush(); err != nil { - return err + return fmt.Errorf("flushing content for %q: %w", path, err) } return nil }(); err != nil { - return "", "", -1, err + return "", "", -1, fmt.Errorf("writing %q to prepend-to-archive blob: %w", contents, err) } } - if !includeFooter { - return diff.Name(), "", -1, nil + if includeFooter { + if err = tw.Close(); err != nil { + return "", "", -1, fmt.Errorf("closingprepend-to-archive blob after final write: %w", err) + } + } else { + if err = tw.Flush(); err != nil { + return "", "", -1, fmt.Errorf("flushing prepend-to-archive blob after final write: %w", err) + } } - tw.Close() return diff.Name(), digester.Digest(), counter.Count, nil } @@ -1219,10 +1289,18 @@ func (i *containerImageRef) makeExtraImageContentDiff(includeFooter bool) (_ str // no later than layerLatestModTime (if a value is provided for it). // This implies that if both values are provided, the archive's timestamps will // be set to the earlier of the two values. -func makeFilteredLayerWriteCloser(wc io.WriteCloser, layerModTime, layerLatestModTime *time.Time) io.WriteCloser { - if layerModTime == nil && layerLatestModTime == nil { +func makeFilteredLayerWriteCloser(wc io.WriteCloser, layerModTime, layerLatestModTime *time.Time, exclusions []copier.ConditionalRemovePath) io.WriteCloser { + if layerModTime == nil && layerLatestModTime == nil && len(exclusions) == 0 { return wc } + exclusionsMap := make(map[string]copier.ConditionalRemovePath) + for _, exclusionSpec := range exclusions { + pathSpec := strings.Trim(path.Clean(exclusionSpec.Path), "/") + if pathSpec == "" { + continue + } + exclusionsMap[pathSpec] = exclusionSpec + } wc = newTarFilterer(wc, func(hdr *tar.Header) (skip, replaceContents bool, replacementContents io.Reader) { // Changing a zeroed field to a non-zero field can affect the // format that the library uses for writing the header, so only @@ -1230,6 +1308,14 @@ func makeFilteredLayerWriteCloser(wc io.WriteCloser, layerModTime, layerLatestMo // format (and as a result, changing the length) of the header // that we write. modTime := hdr.ModTime + nameSpec := strings.Trim(path.Clean(hdr.Name), "/") + if conditions, ok := exclusionsMap[nameSpec]; ok { + if (conditions.ModTime == nil || conditions.ModTime.Equal(modTime)) && + (conditions.Owner == nil || (conditions.Owner.UID == hdr.Uid && conditions.Owner.GID == hdr.Gid)) && + (conditions.Mode == nil || (*conditions.Mode&os.ModePerm == os.FileMode(hdr.Mode)&os.ModePerm)) { + return true, false, nil + } + } if layerModTime != nil { modTime = *layerModTime } @@ -1305,7 +1391,7 @@ func (b *Builder) makeLinkedLayerInfos(layers []LinkedLayer, layerType string, l digester := digest.Canonical.Digester() sizeCountedFile := ioutils.NewWriteCounter(io.MultiWriter(digester.Hash(), f)) - wc := makeFilteredLayerWriteCloser(ioutils.NopWriteCloser(sizeCountedFile), layerModTime, layerLatestModTime) + wc := makeFilteredLayerWriteCloser(ioutils.NopWriteCloser(sizeCountedFile), layerModTime, layerLatestModTime, nil) _, copyErr := io.Copy(wc, rc) wcErr := wc.Close() if err := rc.Close(); err != nil { @@ -1348,6 +1434,35 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR name = parsed } } + + cdir, err := b.store.ContainerDirectory(b.ContainerID) + if err != nil { + return nil, fmt.Errorf("getting the per-container data directory for %q: %w", b.ContainerID, err) + } + + excludesFiles, err := filepath.Glob(filepath.Join(cdir, containerExcludesDir, "*")) + if err != nil { + return nil, fmt.Errorf("checking for commit exclusions for %q: %w", b.ContainerID, err) + } + var layerExclusions []copier.ConditionalRemovePath + for _, excludesFile := range excludesFiles { + if strings.Contains(excludesFile, containerExcludesSubstring) { + continue + } + excludesData, err := os.ReadFile(excludesFile) + if err != nil { + return nil, fmt.Errorf("reading commit exclusions for %q: %w", b.ContainerID, err) + } + var excludes []copier.ConditionalRemovePath + if err := json.Unmarshal(excludesData, &excludes); err != nil { + return nil, fmt.Errorf("parsing commit exclusions for %q: %w", b.ContainerID, err) + } + layerExclusions = append(layerExclusions, excludes...) + } + if options.CompatLayerOmissions == types.OptionalBoolTrue { + layerExclusions = append(layerExclusions, compatLayerExclusions...) + } + manifestType := options.PreferredManifestType if manifestType == "" { manifestType = define.OCIv1ImageManifest @@ -1430,11 +1545,14 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR layerLatestModTime: layerLatestModTime, historyComment: b.HistoryComment(), annotations: b.Annotations(), + setAnnotations: slices.Clone(options.Annotations), + unsetAnnotations: slices.Clone(options.UnsetAnnotations), preferredManifestType: manifestType, squash: options.Squash, confidentialWorkload: options.ConfidentialWorkloadOptions, omitHistory: options.OmitHistory || forceOmitHistory, - emptyLayer: options.EmptyLayer && !options.Squash && !options.ConfidentialWorkloadOptions.Convert, + emptyLayer: (options.EmptyLayer || options.OmitLayerHistoryEntry) && !options.Squash && !options.ConfidentialWorkloadOptions.Convert, + omitLayerHistoryEntry: options.OmitLayerHistoryEntry && !options.Squash && !options.ConfidentialWorkloadOptions.Convert, idMappingOptions: &b.IDMappingOptions, parent: parent, blobDirectory: options.BlobDirectory, @@ -1446,6 +1564,8 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR overrideConfig: options.OverrideConfig, extraImageContent: maps.Clone(options.ExtraImageContent), compatSetParent: options.CompatSetParent, + layerExclusions: layerExclusions, + createdAnnotation: options.CreatedAnnotation, } if ref.created != nil { for i := range ref.preEmptyLayers { diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go index 2bffdc3c60..eb3c5e2409 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/build.go +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -17,10 +17,12 @@ import ( "strconv" "strings" "sync" + "time" "github.com/containerd/platforms" "github.com/containers/buildah" "github.com/containers/buildah/define" + "github.com/containers/buildah/internal" internalUtil "github.com/containers/buildah/internal/util" "github.com/containers/buildah/pkg/parse" "github.com/containers/buildah/util" @@ -218,6 +220,15 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B } } + if sourceDateEpoch, ok := options.Args[internal.SourceDateEpochName]; ok && options.SourceDateEpoch == nil { + sde, err := strconv.ParseInt(sourceDateEpoch, 10, 64) + if err != nil { + return "", nil, fmt.Errorf("parsing SOURCE_DATE_EPOCH build-arg %q: %w", sourceDateEpoch, err) + } + sdeTime := time.Unix(sde, 0) + options.SourceDateEpoch = &sdeTime + } + systemContext := options.SystemContext for _, platform := range options.Platforms { platformContext := *systemContext @@ -259,6 +270,16 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B } // Deep copy args to prevent concurrent read/writes over Args. platformOptions.Args = maps.Clone(options.Args) + + if options.SourceDateEpoch != nil { + if options.Timestamp != nil { + return "", nil, errors.New("timestamp and source-date-epoch would be ambiguous if allowed together") + } + if _, alreadySet := platformOptions.Args[internal.SourceDateEpochName]; !alreadySet { + platformOptions.Args[internal.SourceDateEpochName] = fmt.Sprintf("%d", options.SourceDateEpoch.Unix()) + } + } + builds.Go(func() error { loggerPerPlatform := logger if platformOptions.LogFile != "" && platformOptions.LogSplitByPlatform { diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go index 9f0f62b82b..63089ff7b8 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go @@ -14,6 +14,7 @@ import ( "github.com/containers/buildah" "github.com/containers/buildah/define" + "github.com/containers/buildah/internal" internalUtil "github.com/containers/buildah/internal/util" "github.com/containers/buildah/pkg/parse" "github.com/containers/buildah/pkg/sshagent" @@ -43,18 +44,19 @@ import ( // instruction in the Dockerfile, since that's usually an indication of a user // error, but for these values we make exceptions and ignore them. var builtinAllowedBuildArgs = map[string]struct{}{ - "HTTP_PROXY": {}, - "http_proxy": {}, - "HTTPS_PROXY": {}, - "https_proxy": {}, - "FTP_PROXY": {}, - "ftp_proxy": {}, - "NO_PROXY": {}, - "no_proxy": {}, - "TARGETARCH": {}, - "TARGETOS": {}, - "TARGETPLATFORM": {}, - "TARGETVARIANT": {}, + "HTTP_PROXY": {}, + "http_proxy": {}, + "HTTPS_PROXY": {}, + "https_proxy": {}, + "FTP_PROXY": {}, + "ftp_proxy": {}, + "NO_PROXY": {}, + "no_proxy": {}, + "TARGETARCH": {}, + "TARGETOS": {}, + "TARGETPLATFORM": {}, + "TARGETVARIANT": {}, + internal.SourceDateEpochName: {}, } // Executor is a buildah-based implementation of the imagebuilder.Executor @@ -83,6 +85,7 @@ type Executor struct { log func(format string, args ...any) // can be nil in io.Reader inheritLabels types.OptionalBool + inheritAnnotations types.OptionalBool out io.Writer err io.Writer signaturePolicyPath string @@ -151,6 +154,7 @@ type Executor struct { logPrefix string unsetEnvs []string unsetLabels []string + unsetAnnotations []string processLabel string // Shares processLabel of first stage container with containers of other stages in same build mountLabel string // Shares mountLabel of first stage container with containers of other stages in same build buildOutputs []string // Specifies instructions for any custom build output @@ -163,7 +167,11 @@ type Executor struct { compatSetParent types.OptionalBool compatVolumes types.OptionalBool compatScratchConfig types.OptionalBool + compatLayerOmissions types.OptionalBool noPivotRoot bool + sourceDateEpoch *time.Time + rewriteTimestamp bool + createdAnnotation types.OptionalBool } type imageTypeAndHistoryAndDiffIDs struct { @@ -171,6 +179,8 @@ type imageTypeAndHistoryAndDiffIDs struct { history []v1.History diffIDs []digest.Digest err error + architecture string + os string } // newExecutor creates a new instance of the imagebuilder.Executor interface. @@ -268,6 +278,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o reportWriter: writer, isolation: options.Isolation, inheritLabels: options.InheritLabels, + inheritAnnotations: options.InheritAnnotations, namespaceOptions: options.NamespaceOptions, configureNetwork: options.ConfigureNetwork, cniPluginPath: options.CNIPluginPath, @@ -319,6 +330,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o logPrefix: logPrefix, unsetEnvs: slices.Clone(options.UnsetEnvs), unsetLabels: slices.Clone(options.UnsetLabels), + unsetAnnotations: slices.Clone(options.UnsetAnnotations), buildOutputs: buildOutputs, osVersion: options.OSVersion, osFeatures: slices.Clone(options.OSFeatures), @@ -329,8 +341,17 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o compatSetParent: options.CompatSetParent, compatVolumes: options.CompatVolumes, compatScratchConfig: options.CompatScratchConfig, + compatLayerOmissions: options.CompatLayerOmissions, noPivotRoot: options.NoPivotRoot, + sourceDateEpoch: options.SourceDateEpoch, + rewriteTimestamp: options.RewriteTimestamp, + createdAnnotation: options.CreatedAnnotation, } + // sort unsetAnnotations because we will later write these + // values to the history of the image therefore we want to + // make sure that order is always consistent. + slices.Sort(exec.unsetAnnotations) + if exec.err == nil { exec.err = os.Stderr } @@ -457,30 +478,30 @@ func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebu } } -// getImageTypeAndHistoryAndDiffIDs returns the manifest type, history, and diff IDs list of imageID. -func (b *Executor) getImageTypeAndHistoryAndDiffIDs(ctx context.Context, imageID string) (string, []v1.History, []digest.Digest, error) { +// getImageTypeAndHistoryAndDiffIDs returns the os, architecture, manifest type, history, and diff IDs list of imageID. +func (b *Executor) getImageTypeAndHistoryAndDiffIDs(ctx context.Context, imageID string) (string, string, string, []v1.History, []digest.Digest, error) { b.imageInfoLock.Lock() imageInfo, ok := b.imageInfoCache[imageID] b.imageInfoLock.Unlock() if ok { - return imageInfo.manifestType, imageInfo.history, imageInfo.diffIDs, imageInfo.err + return imageInfo.os, imageInfo.architecture, imageInfo.manifestType, imageInfo.history, imageInfo.diffIDs, imageInfo.err } imageRef, err := storageTransport.Transport.ParseStoreReference(b.store, "@"+imageID) if err != nil { - return "", nil, nil, fmt.Errorf("getting image reference %q: %w", imageID, err) + return "", "", "", nil, nil, fmt.Errorf("getting image reference %q: %w", imageID, err) } ref, err := imageRef.NewImage(ctx, nil) if err != nil { - return "", nil, nil, fmt.Errorf("creating new image from reference to image %q: %w", imageID, err) + return "", "", "", nil, nil, fmt.Errorf("creating new image from reference to image %q: %w", imageID, err) } defer ref.Close() oci, err := ref.OCIConfig(ctx) if err != nil { - return "", nil, nil, fmt.Errorf("getting possibly-converted OCI config of image %q: %w", imageID, err) + return "", "", "", nil, nil, fmt.Errorf("getting possibly-converted OCI config of image %q: %w", imageID, err) } manifestBytes, manifestFormat, err := ref.Manifest(ctx) if err != nil { - return "", nil, nil, fmt.Errorf("getting manifest of image %q: %w", imageID, err) + return "", "", "", nil, nil, fmt.Errorf("getting manifest of image %q: %w", imageID, err) } if manifestFormat == "" && len(manifestBytes) > 0 { manifestFormat = manifest.GuessMIMEType(manifestBytes) @@ -491,9 +512,11 @@ func (b *Executor) getImageTypeAndHistoryAndDiffIDs(ctx context.Context, imageID history: oci.History, diffIDs: oci.RootFS.DiffIDs, err: nil, + architecture: oci.Architecture, + os: oci.OS, } b.imageInfoLock.Unlock() - return manifestFormat, oci.History, oci.RootFS.DiffIDs, nil + return oci.OS, oci.Architecture, manifestFormat, oci.History, oci.RootFS.DiffIDs, nil } func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageExecutor, stages imagebuilder.Stages, stageIndex int) (imageID string, ref reference.Canonical, onlyBaseImage bool, err error) { diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index 51c0ce0a25..fcab73c778 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -76,6 +76,8 @@ type StageExecutor struct { stage *imagebuilder.Stage didExecute bool argsFromContainerfile []string + hasLink bool + isLastStep bool } // Preserve informs the stage executor that from this point on, it needs to @@ -359,8 +361,11 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err } return errors.New("COPY --keep-git-dir is not supported") } - if cp.Link { - return errors.New("COPY --link is not supported") + if cp.Link && s.executor.layers { + s.hasLink = true + } else if cp.Link { + s.executor.logger.Warn("--link is not supported when building without --layers, ignoring --link") + s.hasLink = false } if len(cp.Excludes) > 0 { excludes = append(slices.Clone(excludes), cp.Excludes...) @@ -564,6 +569,7 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co sources = append(sources, src) } } + labelsAndAnnotations := s.buildMetadata(s.isLastStep, true) options := buildah.AddAndCopyOptions{ Chmod: copy.Chmod, Chown: copy.Chown, @@ -583,6 +589,8 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co MaxRetries: s.executor.maxPullPushRetries, RetryDelay: s.executor.retryPullPushDelay, Parents: copy.Parents, + Link: s.hasLink, + BuildMetadata: labelsAndAnnotations, } if len(copy.Files) > 0 { // If we are copying heredoc files, we need to temporary place @@ -1034,9 +1042,14 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo for _, p := range builder.Ports() { ports[docker.Port(p)] = struct{}{} } + hostname, domainname := builder.Hostname(), builder.Domainname() + containerName := builder.Container + if s.executor.timestamp != nil || s.executor.sourceDateEpoch != nil { + hostname, domainname, containerName = "sandbox", "", "" + } dConfig := docker.Config{ - Hostname: builder.Hostname(), - Domainname: builder.Domainname(), + Hostname: hostname, + Domainname: domainname, User: builder.User(), Env: builder.Env(), Cmd: builder.Cmd(), @@ -1063,7 +1076,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo dImage := docker.Image{ Parent: builder.FromImageID, ContainerConfig: dConfig, - Container: builder.Container, + Container: containerName, Author: builder.Maintainer(), Architecture: builder.Architecture(), RootFS: rootfs, @@ -1284,7 +1297,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if len(children) == 0 { // There are no steps. - if s.builder.FromImageID == "" || s.executor.squash || s.executor.confidentialWorkload.Convert || len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 || len(s.executor.unsetEnvs) > 0 || len(s.executor.unsetLabels) > 0 || len(s.executor.sbomScanOptions) > 0 { + if s.builder.FromImageID == "" || s.executor.squash || s.executor.confidentialWorkload.Convert || len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 || len(s.executor.unsetEnvs) > 0 || len(s.executor.unsetLabels) > 0 || len(s.executor.sbomScanOptions) > 0 || len(s.executor.unsetAnnotations) > 0 { // We either don't have a base image, or we need to // transform the contents of the base image, or we need // to make some changes to just the config blob. Whichever @@ -1293,7 +1306,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // No base image means there's nothing to put in a // layer, so don't create one. emptyLayer := (s.builder.FromImageID == "") - createdBy, err := s.getCreatedBy(nil, "") + createdBy, err := s.getCreatedBy(nil, "", lastStage) if err != nil { return "", nil, false, fmt.Errorf("unable to get createdBy for the node: %w", err) } @@ -1325,6 +1338,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, logRusage() moreInstructions := i < len(children)-1 lastInstruction := !moreInstructions + + s.isLastStep = lastStage && lastInstruction // Resolve any arguments in this instruction. step := ib.Step() if err := step.Resolve(node); err != nil { @@ -1444,7 +1459,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if s.executor.timestamp != nil { timestamp = *s.executor.timestamp } - createdBy, err := s.getCreatedBy(node, addedContentSummary) + createdBy, err := s.getCreatedBy(node, addedContentSummary, false) if err != nil { return "", nil, false, fmt.Errorf("unable to get createdBy for the node: %w", err) } @@ -1458,7 +1473,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // stage. if lastStage || imageIsUsedLater { logCommit(s.output, i) - createdBy, err := s.getCreatedBy(node, addedContentSummary) + createdBy, err := s.getCreatedBy(node, addedContentSummary, lastStage && lastInstruction) if err != nil { return "", nil, false, fmt.Errorf("unable to get createdBy for the node: %w", err) } @@ -1533,7 +1548,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // cacheKey since it will be used either while pulling or pushing the // cache images. if needsCacheKey { - cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) + cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step), lastInstruction && lastStage) if err != nil { return "", nil, false, fmt.Errorf("failed while generating cache key: %w", err) } @@ -1561,13 +1576,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, addedContentSummary = s.getContentSummaryAfterAddingContent() // regenerate cache key with updated content summary if needsCacheKey { - cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) + cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step), lastInstruction && lastStage) if err != nil { return "", nil, false, fmt.Errorf("failed while generating cache key: %w", err) } } } - cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) + cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step), lastInstruction && lastStage) if err != nil { return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err) } @@ -1579,7 +1594,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // is ignored and will be automatically logged for --log-level debug if ref, id, err := s.pullCache(ctx, cacheKey); ref != nil && id != "" && err == nil { logCachePulled(cacheKey, ref) - cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) + cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step), lastInstruction && lastStage) if err != nil { return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err) } @@ -1611,7 +1626,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, addedContentSummary = s.getContentSummaryAfterAddingContent() // regenerate cache key with updated content summary if needsCacheKey { - cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) + cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step), lastInstruction && lastStage) if err != nil { return "", nil, false, fmt.Errorf("failed while generating cache key: %w", err) } @@ -1620,7 +1635,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // Check if there's already an image based on our parent that // has the same change that we just made. if checkForLayers && !avoidLookingCache { - cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) + cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step), lastInstruction && lastStage) if err != nil { return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err) } @@ -1633,7 +1648,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // is ignored and will be automatically logged for --log-level debug if ref, id, err := s.pullCache(ctx, cacheKey); ref != nil && id != "" && err == nil { logCachePulled(cacheKey, ref) - cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) + cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step), lastInstruction && lastStage) if err != nil { return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err) } @@ -1683,7 +1698,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // We're not going to find any more cache hits, so we // can stop looking for them. checkForLayers = false - createdBy, err := s.getCreatedBy(node, addedContentSummary) + createdBy, err := s.getCreatedBy(node, addedContentSummary, lastStage && lastInstruction) if err != nil { return "", nil, false, fmt.Errorf("unable to get createdBy for the node: %w", err) } @@ -1725,7 +1740,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if lastInstruction && lastStage { if s.executor.squash || s.executor.confidentialWorkload.Convert || len(s.executor.sbomScanOptions) != 0 { - createdBy, err := s.getCreatedBy(node, addedContentSummary) + createdBy, err := s.getCreatedBy(node, addedContentSummary, lastStage && lastInstruction) if err != nil { return "", nil, false, fmt.Errorf("unable to get createdBy for the node: %w", err) } @@ -1790,6 +1805,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, return "", nil, false, fmt.Errorf("preparing container for next step: %w", err) } } + + s.hasLink = false } return imgID, ref, onlyBaseImage, nil @@ -1826,7 +1843,7 @@ func historyEntriesEqual(base, derived v1.History) bool { // that we're comparing. // Used to verify whether a cache of the intermediate image exists and whether // to run the build again. -func (s *StageExecutor) historyAndDiffIDsMatch(baseHistory []v1.History, baseDiffIDs []digest.Digest, child *parser.Node, history []v1.History, diffIDs []digest.Digest, addedContentSummary string, buildAddsLayer bool) (bool, error) { +func (s *StageExecutor) historyAndDiffIDsMatch(baseHistory []v1.History, baseDiffIDs []digest.Digest, child *parser.Node, history []v1.History, diffIDs []digest.Digest, addedContentSummary string, buildAddsLayer bool, lastInstruction bool) (bool, error) { // our history should be as long as the base's, plus one entry for what // we're doing if len(history) != len(baseHistory)+1 { @@ -1865,7 +1882,7 @@ func (s *StageExecutor) historyAndDiffIDsMatch(baseHistory []v1.History, baseDif return false, nil } } - createdBy, err := s.getCreatedBy(child, addedContentSummary) + createdBy, err := s.getCreatedBy(child, addedContentSummary, lastInstruction) if err != nil { return false, fmt.Errorf("unable to get createdBy for the node: %w", err) } @@ -1875,16 +1892,21 @@ func (s *StageExecutor) historyAndDiffIDsMatch(baseHistory []v1.History, baseDif // getCreatedBy returns the command the image at node will be created by. If // the passed-in CompositeDigester is not nil, it is assumed to have the digest // information for the content if the node is ADD or COPY. -func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary string) (string, error) { +// +// This function acts differently if getCreatedBy is invoked by LastStep. For instances +// certain instructions like `removing annotations` does not makes sense for every step +// but only makes sense if the step is last step of a build. +func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary string, isLastStep bool) (string, error) { if node == nil { return "/bin/sh", nil } - inheritLabels := "" - // If --inherit-label was manually set to false then update history. - if s.executor.inheritLabels == types.OptionalBoolFalse { - inheritLabels = "|inheritLabels=false" - } - switch strings.ToUpper(node.Value) { + + command := strings.ToUpper(node.Value) + addcopy := command == "ADD" || command == "COPY" + + labelsAndAnnotations := s.buildMetadata(isLastStep, addcopy) + + switch command { case "ARG": for _, variable := range strings.Fields(node.Original) { if variable != "ARG" { @@ -1892,7 +1914,7 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri } } buildArgs := s.getBuildArgsKey() - return "/bin/sh -c #(nop) ARG " + buildArgs + inheritLabels, nil + return "/bin/sh -c #(nop) ARG " + buildArgs + labelsAndAnnotations, nil case "RUN": shArg := "" buildArgs := s.getBuildArgsResolvedForRun() @@ -1918,7 +1940,7 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri mountOptionSource = "." } } - // Source specificed is part of stage, image or additional-build-context. + // Source specified is part of stage, image or additional-build-context. if mountOptionFrom != "" { // If this is not a stage then get digest of image or additional build context if _, ok := s.executor.stages[mountOptionFrom]; !ok { @@ -1972,16 +1994,20 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri if buildArgs != "" { result = result + "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " " } - result = result + "/bin/sh -c " + shArg + heredoc + appendCheckSum + inheritLabels + result = result + "/bin/sh -c " + shArg + heredoc + appendCheckSum + labelsAndAnnotations return result, nil case "ADD", "COPY": destination := node for destination.Next != nil { destination = destination.Next } - return "/bin/sh -c #(nop) " + strings.ToUpper(node.Value) + " " + addedContentSummary + " in " + destination.Value + " " + inheritLabels, nil + hasLink := "" + if s.hasLink { + hasLink = " --link" + } + return "/bin/sh -c #(nop) " + strings.ToUpper(node.Value) + hasLink + " " + addedContentSummary + " in " + destination.Value + " " + labelsAndAnnotations, nil default: - return "/bin/sh -c #(nop) " + node.Original + inheritLabels, nil + return "/bin/sh -c #(nop) " + node.Original + labelsAndAnnotations, nil } } @@ -2115,14 +2141,14 @@ func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output st // generated CacheKey is further used by buildah to lock and decide // tag for the intermediate image which can be pushed and pulled to/from // the remote repository. -func (s *StageExecutor) generateCacheKey(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool) (string, error) { +func (s *StageExecutor) generateCacheKey(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool, lastInstruction bool) (string, error) { hash := sha256.New() var baseHistory []v1.History var diffIDs []digest.Digest var manifestType string var err error if s.builder.FromImageID != "" { - manifestType, baseHistory, diffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID) + _, _, manifestType, baseHistory, diffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID) if err != nil { return "", fmt.Errorf("getting history of base image %q: %w", s.builder.FromImageID, err) } @@ -2130,7 +2156,7 @@ func (s *StageExecutor) generateCacheKey(ctx context.Context, currNode *parser.N fmt.Fprintln(hash, diffIDs[i].String()) } } - createdBy, err := s.getCreatedBy(currNode, addedContentDigest) + createdBy, err := s.getCreatedBy(currNode, addedContentDigest, lastInstruction) if err != nil { return "", err } @@ -2247,8 +2273,8 @@ func (s *StageExecutor) pullCache(ctx context.Context, cacheKey string) (referen // intermediateImageExists returns image ID if an intermediate image of currNode exists in the image store from a previous build. // It verifies this by checking the parent of the top layer of the image and the history. -// If more than one image matches as potiential candidates then priority is given to the most recently built image. -func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool) (string, error) { +// If more than one image matches as potential candidates then priority is given to the most recently built image. +func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool, lastInstruction bool) (string, error) { cacheCandidates := []storage.Image{} // Get the list of images available in the image store images, err := s.executor.store.Images() @@ -2258,7 +2284,7 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p var baseHistory []v1.History var baseDiffIDs []digest.Digest if s.builder.FromImageID != "" { - _, baseHistory, baseDiffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID) + _, _, _, baseHistory, baseDiffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID) if err != nil { return "", fmt.Errorf("getting history of base image %q: %w", s.builder.FromImageID, err) } @@ -2299,9 +2325,10 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p if s.builder.TopLayer != imageParentLayerID { continue } + // Next we double check that the history of this image is equivalent to the previous // lines in the Dockerfile up till the point we are at in the build. - manifestType, history, diffIDs, err := s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, image.ID) + imageOS, imageArchitecture, manifestType, history, diffIDs, err := s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, image.ID) if err != nil { // It's possible that this image is for another architecture, which results // in a custom-crafted error message that we'd have to use substring matching @@ -2314,8 +2341,27 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p if manifestType != s.executor.outputFormat { continue } + + // Compare the cached image's platform with the current build's target platform + currentArch := s.executor.architecture + currentOS := s.executor.os + if currentArch == "" && currentOS == "" { + currentOS, currentArch, _, err = parse.Platform(s.stage.Builder.Platform) + if err != nil { + logrus.Debugf("unable to parse default OS and Arch for the current build: %v", err) + } + } + if currentArch != "" && imageArchitecture != currentArch { + logrus.Debugf("cached image %q has architecture %q but current build targets %q, ignoring it", image.ID, imageArchitecture, currentArch) + continue + } + if currentOS != "" && imageOS != currentOS { + logrus.Debugf("cached image %q has OS %q but current build targets %q, ignoring it", image.ID, imageOS, currentOS) + continue + } + // children + currNode is the point of the Dockerfile we are currently at. - foundMatch, err := s.historyAndDiffIDsMatch(baseHistory, baseDiffIDs, currNode, history, diffIDs, addedContentDigest, buildAddsLayer) + foundMatch, err := s.historyAndDiffIDsMatch(baseHistory, baseDiffIDs, currNode, history, diffIDs, addedContentDigest, buildAddsLayer, lastInstruction) if err != nil { return "", err } @@ -2421,15 +2467,33 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer for k, v := range config.Labels { s.builder.SetLabel(k, v) } - if s.executor.commonBuildOptions.IdentityLabel == types.OptionalBoolUndefined || s.executor.commonBuildOptions.IdentityLabel == types.OptionalBoolTrue { + switch s.executor.commonBuildOptions.IdentityLabel { + case types.OptionalBoolTrue: s.builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version) + case types.OptionalBoolFalse: + // nothing - don't clear it if there's a value set in the base image + default: + if s.executor.timestamp == nil && s.executor.sourceDateEpoch == nil { + s.builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version) + } } for _, key := range s.executor.unsetLabels { s.builder.UnsetLabel(key) } - for _, annotationSpec := range s.executor.annotations { - annotationk, annotationv, _ := strings.Cut(annotationSpec, "=") - s.builder.SetAnnotation(annotationk, annotationv) + if finalInstruction { + if s.executor.inheritAnnotations == types.OptionalBoolFalse { + // If user has selected `--inherit-annotations=false` let's not + // inherit annotations from base image. + s.builder.ClearAnnotations() + } + // Add new annotations to the last step. + for _, annotationSpec := range s.executor.annotations { + annotationk, annotationv, _ := strings.Cut(annotationSpec, "=") + s.builder.SetAnnotation(annotationk, annotationv) + } + for _, key := range s.executor.unsetAnnotations { + s.builder.UnsetAnnotation(key) + } } if imageRef != nil { logName := transports.ImageName(imageRef) @@ -2450,6 +2514,7 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer Squash: squash, OmitHistory: s.executor.commonBuildOptions.OmitHistory, EmptyLayer: emptyLayer, + OmitLayerHistoryEntry: s.hasLink, BlobDirectory: s.executor.blobDirectory, SignBy: s.executor.signBy, MaxRetries: s.executor.maxPullPushRetries, @@ -2457,6 +2522,12 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer HistoryTimestamp: s.executor.timestamp, Manifest: s.executor.manifest, CompatSetParent: s.executor.compatSetParent, + SourceDateEpoch: s.executor.sourceDateEpoch, + RewriteTimestamp: s.executor.rewriteTimestamp, + CompatLayerOmissions: s.executor.compatLayerOmissions, + UnsetAnnotations: s.executor.unsetAnnotations, + Annotations: s.executor.annotations, + CreatedAnnotation: s.executor.createdAnnotation, } if finalInstruction { options.ConfidentialWorkloadOptions = s.executor.confidentialWorkload @@ -2478,7 +2549,13 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer } func (s *StageExecutor) generateBuildOutput(buildOutputOpts define.BuildOutputOption) error { - extractRootfsOpts := buildah.ExtractRootfsOptions{} + forceTimestamp := s.executor.timestamp + if s.executor.sourceDateEpoch != nil { + forceTimestamp = s.executor.sourceDateEpoch + } + extractRootfsOpts := buildah.ExtractRootfsOptions{ + ForceTimestamp: forceTimestamp, + } if unshare.IsRootless() { // In order to maintain as much parity as possible // with buildkit's version of --output and to avoid @@ -2492,7 +2569,12 @@ func (s *StageExecutor) generateBuildOutput(buildOutputOpts define.BuildOutputOp extractRootfsOpts.StripSetgidBit = true extractRootfsOpts.StripXattrs = true } - rc, errChan, err := s.builder.ExtractRootfs(buildah.CommitOptions{}, extractRootfsOpts) + rc, errChan, err := s.builder.ExtractRootfs(buildah.CommitOptions{ + HistoryTimestamp: s.executor.timestamp, + SourceDateEpoch: s.executor.sourceDateEpoch, + RewriteTimestamp: s.executor.rewriteTimestamp, + CompatLayerOmissions: s.executor.compatLayerOmissions, + }, extractRootfsOpts) if err != nil { return fmt.Errorf("failed to extract rootfs from given container image: %w", err) } @@ -2519,3 +2601,34 @@ func (s *StageExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMo logrus.Debugf("EnsureContainerPath %q (owner %q, mode %o) in %q", path, user, mode, s.builder.ContainerID) return s.builder.EnsureContainerPathAs(path, user, mode) } + +func (s *StageExecutor) buildMetadata(isLastStep bool, addcopy bool) string { + inheritLabels := "" + unsetAnnotations := "" + inheritAnnotations := "" + newAnnotations := "" + // If --inherit-label was manually set to false then update history. + if s.executor.inheritLabels == types.OptionalBoolFalse { + inheritLabels = "|inheritLabels=false" + } + if isLastStep { + for _, annotation := range s.executor.unsetAnnotations { + unsetAnnotations += "|unsetAnnotation=" + annotation + } + // If --inherit-annotation was manually set to false then update history. + if s.executor.inheritAnnotations == types.OptionalBoolFalse { + inheritAnnotations = "|inheritAnnotations=false" + } + // If new annotations are added, they must be added as part of the last step of the build, + // so mention in history that new annotations were added inorder to make sure the builds + // can either reuse layers or burst the cache depending upon new annotations. + if len(s.executor.annotations) > 0 { + newAnnotations += strings.Join(s.executor.annotations, ",") + } + } + + if addcopy { + return inheritLabels + " " + unsetAnnotations + " " + inheritAnnotations + " " + newAnnotations + } + return inheritLabels + unsetAnnotations + inheritAnnotations + newAnnotations +} diff --git a/vendor/github.com/containers/buildah/internal/mkcw/workload.go b/vendor/github.com/containers/buildah/internal/mkcw/workload.go index ddfdc88d52..e407b22f93 100644 --- a/vendor/github.com/containers/buildah/internal/mkcw/workload.go +++ b/vendor/github.com/containers/buildah/internal/mkcw/workload.go @@ -92,7 +92,7 @@ func ReadWorkloadConfigFromImage(path string) (WorkloadConfig, error) { } err = json.Unmarshal(configBytes, &wc) if err != nil { - err = fmt.Errorf("unmarshaling configuration %q: %w", string(configBytes), err) + err = fmt.Errorf("unmarshalling configuration %q: %w", string(configBytes), err) } return wc, err } diff --git a/vendor/github.com/containers/buildah/internal/types.go b/vendor/github.com/containers/buildah/internal/types.go index 453614e717..47061c4a37 100644 --- a/vendor/github.com/containers/buildah/internal/types.go +++ b/vendor/github.com/containers/buildah/internal/types.go @@ -6,8 +6,10 @@ const ( // external items which are downloaded for a build, typically a tarball // being used as an additional build context. BuildahExternalArtifactsDir = "buildah-external-artifacts" - // SourceDateEpochName is the name of the SOURCE_DATE_EPOCH environment - // variable when it's read from the environment by our main(). + // SourceDateEpochName is both the name of the SOURCE_DATE_EPOCH + // environment variable and the built-in ARG that carries its value, + // whether it's read from the environment by our main(), or passed in + // via CLI or API flags. SourceDateEpochName = "SOURCE_DATE_EPOCH" ) diff --git a/vendor/github.com/containers/buildah/internal/util/util.go b/vendor/github.com/containers/buildah/internal/util/util.go index 42d3ca5536..5f74f96a38 100644 --- a/vendor/github.com/containers/buildah/internal/util/util.go +++ b/vendor/github.com/containers/buildah/internal/util/util.go @@ -54,8 +54,7 @@ func NormalizePlatform(platform v1.Platform) v1.Platform { func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error { var err error if !filepath.IsAbs(opts.Path) { - opts.Path, err = filepath.Abs(opts.Path) - if err != nil { + if opts.Path, err = filepath.Abs(opts.Path); err != nil { return err } } @@ -72,26 +71,22 @@ func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error { noLChown = true } - err = os.MkdirAll(opts.Path, 0o700) - if err != nil { + if err = os.MkdirAll(opts.Path, 0o700); err != nil { return fmt.Errorf("failed while creating the destination path %q: %w", opts.Path, err) } - err = chrootarchive.Untar(input, opts.Path, &archive.TarOptions{NoLchown: noLChown}) - if err != nil { + if err = chrootarchive.Untar(input, opts.Path, &archive.TarOptions{NoLchown: noLChown}); err != nil { return fmt.Errorf("failed while performing untar at %q: %w", opts.Path, err) } } else { outFile := os.Stdout if !opts.IsStdout { - outFile, err = os.Create(opts.Path) - if err != nil { + if outFile, err = os.Create(opts.Path); err != nil { return fmt.Errorf("failed while creating destination tar at %q: %w", opts.Path, err) } defer outFile.Close() } - _, err = io.Copy(outFile, input) - if err != nil { + if _, err = io.Copy(outFile, input); err != nil { return fmt.Errorf("failed while performing copy to %q: %w", opts.Path, err) } } diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go index 3bb4ff77f5..431b06b51f 100644 --- a/vendor/github.com/containers/buildah/new.go +++ b/vendor/github.com/containers/buildah/new.go @@ -320,7 +320,6 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions TopLayer: topLayer, Args: maps.Clone(options.Args), Format: options.Format, - TempVolumes: map[string]bool{}, Devices: options.Devices, DeviceSpecs: options.DeviceSpecs, Logger: options.Logger, diff --git a/vendor/github.com/containers/buildah/pkg/cli/build.go b/vendor/github.com/containers/buildah/pkg/cli/build.go index 12d17afbfd..6de027c1bb 100644 --- a/vendor/github.com/containers/buildah/pkg/cli/build.go +++ b/vendor/github.com/containers/buildah/pkg/cli/build.go @@ -13,6 +13,7 @@ import ( "os" "path/filepath" "slices" + "strconv" "strings" "time" @@ -257,11 +258,19 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) ( return options, nil, nil, err } } - var timestamp *time.Time + var timestamp, sourceDateEpoch *time.Time if c.Flag("timestamp").Changed { t := time.Unix(iopts.Timestamp, 0).UTC() timestamp = &t } + if iopts.SourceDateEpoch != "" { + u, err := strconv.ParseInt(iopts.SourceDateEpoch, 10, 64) + if err != nil { + return options, nil, nil, fmt.Errorf("error parsing source-date-epoch offset %q: %w", iopts.SourceDateEpoch, err) + } + s := time.Unix(u, 0).UTC() + sourceDateEpoch = &s + } if c.Flag("output").Changed { for _, buildOutput := range iopts.BuildOutputs { // if any of these go to stdout, we need to avoid @@ -369,6 +378,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) ( Compression: compression, ConfigureNetwork: networkPolicy, ContextDirectory: contextDir, + CreatedAnnotation: types.NewOptionalBool(iopts.CreatedAnnotation), Devices: iopts.Devices, DropCapabilities: iopts.CapDrop, Err: stderr, @@ -381,6 +391,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) ( IgnoreFile: iopts.IgnoreFile, In: stdin, InheritLabels: types.NewOptionalBool(iopts.InheritLabels), + InheritAnnotations: types.NewOptionalBool(iopts.InheritAnnotations), Isolation: isolation, Jobs: &iopts.Jobs, Labels: iopts.Label, @@ -405,6 +416,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) ( Quiet: iopts.Quiet, RemoveIntermediateCtrs: iopts.Rm, ReportWriter: reporter, + RewriteTimestamp: iopts.RewriteTimestamp, Runtime: iopts.Runtime, RuntimeArgs: runtimeFlags, RusageLogFile: iopts.RusageLogFile, @@ -412,6 +424,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) ( SignBy: iopts.SignBy, SignaturePolicyPath: iopts.SignaturePolicy, SkipUnusedStages: types.NewOptionalBool(iopts.SkipUnusedStages), + SourceDateEpoch: sourceDateEpoch, Squash: iopts.Squash, SystemContext: systemContext, Target: iopts.Target, @@ -419,6 +432,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) ( TransientMounts: iopts.Volumes, UnsetEnvs: iopts.UnsetEnvs, UnsetLabels: iopts.UnsetLabels, + UnsetAnnotations: iopts.UnsetAnnotations, } if iopts.RetryDelay != "" { options.PullPushRetryDelay, err = time.ParseDuration(iopts.RetryDelay) diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go index c2276d296a..e00e47e313 100644 --- a/vendor/github.com/containers/buildah/pkg/cli/common.go +++ b/vendor/github.com/containers/buildah/pkg/cli/common.go @@ -12,6 +12,7 @@ import ( "strings" "github.com/containers/buildah/define" + "github.com/containers/buildah/internal" "github.com/containers/buildah/pkg/completion" "github.com/containers/buildah/pkg/parse" commonComp "github.com/containers/common/pkg/completion" @@ -73,6 +74,7 @@ type BudResults struct { From string Iidfile string InheritLabels bool + InheritAnnotations bool Label []string LayerLabel []string Logfile string @@ -116,12 +118,16 @@ type BudResults struct { RusageLogFile string UnsetEnvs []string UnsetLabels []string + UnsetAnnotations []string Envs []string OSFeatures []string OSVersion string CWOptions string SBOMOptions []string CompatVolumes bool + SourceDateEpoch string + RewriteTimestamp bool + CreatedAnnotation bool } // FromAndBugResults represents the results for common flags @@ -233,7 +239,9 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet { fs.BoolVar(&flags.Compress, "compress", false, "this is a legacy option, which has no effect on the image") fs.BoolVar(&flags.CompatVolumes, "compat-volumes", false, "preserve the contents of VOLUMEs during RUN instructions") fs.BoolVar(&flags.InheritLabels, "inherit-labels", true, "inherit the labels from the base image or base stages.") + fs.BoolVar(&flags.InheritAnnotations, "inherit-annotations", true, "inherit the annotations from the base image or base stages.") fs.StringArrayVar(&flags.CPPFlags, "cpp-flag", []string{}, "set additional flag to pass to C preprocessor (cpp)") + fs.BoolVar(&flags.CreatedAnnotation, "created-annotation", true, `set an "org.opencontainers.image.created" annotation in the image`) fs.StringVar(&flags.Creds, "creds", "", "use `[username[:password]]` for accessing the registry") fs.StringVarP(&flags.CWOptions, "cw", "", "", "confidential workload `options`") fs.BoolVarP(&flags.DisableCompression, "disable-compression", "D", true, "don't compress layers by default") @@ -303,17 +311,24 @@ newer: only pull base and SBOM scanner images when newer images exist on the r panic(fmt.Sprintf("error marking the signature-policy flag as hidden: %v", err)) } fs.BoolVar(&flags.SkipUnusedStages, "skip-unused-stages", true, "skips stages in multi-stage builds which do not affect the final target") + sourceDateEpochUsageDefault := ", defaults to current time" + if v := os.Getenv(internal.SourceDateEpochName); v != "" { + sourceDateEpochUsageDefault = "" + } + fs.StringVar(&flags.SourceDateEpoch, "source-date-epoch", os.Getenv(internal.SourceDateEpochName), "set new timestamps in image info to `seconds` after the epoch"+sourceDateEpochUsageDefault) + fs.BoolVar(&flags.RewriteTimestamp, "rewrite-timestamp", false, "set timestamps in layers to no later than the value for --source-date-epoch") fs.BoolVar(&flags.Squash, "squash", false, "squash all image layers into a single layer") fs.StringArrayVar(&flags.SSH, "ssh", []string{}, "SSH agent socket or keys to expose to the build. (format: default|[=|[,]])") fs.BoolVar(&flags.Stdin, "stdin", false, "pass stdin into containers") fs.StringArrayVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image") fs.StringArrayVarP(&flags.BuildOutputs, "output", "o", nil, "output destination (format: type=local,dest=path)") fs.StringVar(&flags.Target, "target", "", "set the target build stage to build") - fs.Int64Var(&flags.Timestamp, "timestamp", 0, "set created timestamp to the specified epoch seconds to allow for deterministic builds, defaults to current time") + fs.Int64Var(&flags.Timestamp, "timestamp", 0, "set new timestamps in image info and layer to `seconds` after the epoch, defaults to current times") fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry") fs.String("variant", "", "override the `variant` of the specified image") fs.StringSliceVar(&flags.UnsetEnvs, "unsetenv", nil, "unset environment variable from final image") fs.StringSliceVar(&flags.UnsetLabels, "unsetlabel", nil, "unset label when inheriting labels from base image") + fs.StringSliceVar(&flags.UnsetAnnotations, "unsetannotation", nil, "unset annotation when inheriting annotations from base image") return fs } @@ -363,11 +378,13 @@ func GetBudFlagsCompletions() commonComp.FlagCompletions { flagCompletion["sign-by"] = commonComp.AutocompleteNone flagCompletion["signature-policy"] = commonComp.AutocompleteNone flagCompletion["ssh"] = commonComp.AutocompleteNone + flagCompletion["source-date-epoch"] = commonComp.AutocompleteNone flagCompletion["tag"] = commonComp.AutocompleteNone flagCompletion["target"] = commonComp.AutocompleteNone flagCompletion["timestamp"] = commonComp.AutocompleteNone flagCompletion["unsetenv"] = commonComp.AutocompleteNone flagCompletion["unsetlabel"] = commonComp.AutocompleteNone + flagCompletion["unsetannotation"] = commonComp.AutocompleteNone flagCompletion["variant"] = commonComp.AutocompleteNone return flagCompletion } diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index fbedb4d83f..e434966cfd 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -184,7 +184,11 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name cpuQuota, _ := flags.GetInt64("cpu-quota") cpuShares, _ := flags.GetUint64("cpu-shares") httpProxy, _ := flags.GetBool("http-proxy") - identityLabel, _ := flags.GetBool("identity-label") + var identityLabel types.OptionalBool + if flags.Changed("identity-label") { + b, _ := flags.GetBool("identity-label") + identityLabel = types.NewOptionalBool(b) + } omitHistory, _ := flags.GetBool("omit-history") ulimit := []string{} @@ -208,7 +212,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name DNSSearch: dnsSearch, DNSServers: dnsServers, HTTPProxy: httpProxy, - IdentityLabel: types.NewOptionalBool(identityLabel), + IdentityLabel: identityLabel, Memory: memoryLimit, MemorySwap: memorySwap, NoHostname: noHostname, diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go index 3988bf511e..53f0a71089 100644 --- a/vendor/github.com/containers/buildah/run.go +++ b/vendor/github.com/containers/buildah/run.go @@ -5,11 +5,13 @@ import ( "io" "net" + "github.com/containers/buildah/copier" "github.com/containers/buildah/define" "github.com/containers/buildah/internal" "github.com/containers/buildah/pkg/sshagent" "github.com/containers/common/libnetwork/etchosts" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/lockfile" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" @@ -20,6 +22,15 @@ const ( runUsingRuntimeCommand = define.Package + "-oci-runtime" ) +// compatLayerExclusions is the set of items to omit from layers if +// options.CompatLayerOmissions is set to true. For whatever reason, the +// classic builder didn't bake these into images, but BuildKit does. +var compatLayerExclusions = []copier.ConditionalRemovePath{ + {Path: "dev", Owner: &idtools.IDPair{UID: 0, GID: 0}}, + {Path: "proc", Owner: &idtools.IDPair{UID: 0, GID: 0}}, + {Path: "sys", Owner: &idtools.IDPair{UID: 0, GID: 0}}, +} + // TerminalPolicy takes the value DefaultTerminal, WithoutTerminal, or WithTerminal. type TerminalPolicy int @@ -180,12 +191,8 @@ type RunOptions struct { // RunMountArtifacts are the artifacts created when using a run mount. type runMountArtifacts struct { - // RunMountTargets are the run mount targets inside the container which should be removed - RunMountTargets []string // RunOverlayDirs are overlay directories which will need to be cleaned up using overlay.RemoveTemp() RunOverlayDirs []string - // TmpFiles are artifacts that need to be removed outside the container - TmpFiles []string // Any images which were mounted, which should be unmounted MountedImages []string // Agents are the ssh agents started, which should have their Shutdown() methods called diff --git a/vendor/github.com/containers/buildah/run_common.go b/vendor/github.com/containers/buildah/run_common.go index 3448c47250..9d1b81fe18 100644 --- a/vendor/github.com/containers/buildah/run_common.go +++ b/vendor/github.com/containers/buildah/run_common.go @@ -3,6 +3,7 @@ package buildah import ( + "archive/tar" "bytes" "encoding/json" "errors" @@ -13,6 +14,7 @@ import ( "os" "os/exec" "os/signal" + "path" "path/filepath" "runtime" "slices" @@ -1370,14 +1372,14 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st processGID: int(processGID), } // Get the list of mounts that are just for this Run() call. - runMounts, mountArtifacts, err := b.runSetupRunMounts(mountPoint, bundlePath, runFileMounts, runMountInfo, idMaps) + runMounts, mountArtifacts, err := b.runSetupRunMounts(bundlePath, runFileMounts, runMountInfo, idMaps) if err != nil { return nil, err } succeeded := false defer func() { if !succeeded { - if err := b.cleanupRunMounts(mountPoint, mountArtifacts); err != nil { + if err := b.cleanupRunMounts(mountArtifacts); err != nil { b.Logger.Debugf("cleaning up run mounts: %v", err) } } @@ -1394,17 +1396,14 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st if spec.Linux != nil { mountLabel = spec.Linux.MountLabel } - volumes, err := b.runSetupVolumeMounts(mountLabel, volumeMounts, optionMounts, idMaps) + volumes, overlayDirs, err := b.runSetupVolumeMounts(mountLabel, volumeMounts, optionMounts, idMaps) if err != nil { return nil, err } - - // prepare list of mount destinations which can be cleaned up safely. - // we can clean bindFiles, subscriptionMounts and specMounts - // everything other than these might have users content - mountArtifacts.RunMountTargets = append(append(append(mountArtifacts.RunMountTargets, cleanableDestinationListFromMounts(bindFileMounts)...), cleanableDestinationListFromMounts(subscriptionMounts)...), cleanableDestinationListFromMounts(specMounts)...) + mountArtifacts.RunOverlayDirs = append(mountArtifacts.RunOverlayDirs, overlayDirs...) allMounts := util.SortMounts(append(append(append(append(append(volumes, builtins...), runMounts...), subscriptionMounts...), bindFileMounts...), specMounts...)) + // Add them all, in the preferred order, except where they conflict with something that was previously added. for _, mount := range allMounts { if haveMount(mount.Destination) { @@ -1493,52 +1492,12 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin return mounts, nil } -// Destinations which can be cleaned up after every RUN -func cleanableDestinationListFromMounts(mounts []specs.Mount) []string { - mountDest := []string{} - for _, mount := range mounts { - // Add all destination to mountArtifacts so that they can be cleaned up later - if mount.Destination != "" { - cleanPath := true - for _, prefix := range nonCleanablePrefixes { - if strings.HasPrefix(mount.Destination, prefix) { - cleanPath = false - break - } - } - if cleanPath { - mountDest = append(mountDest, mount.Destination) - } - } - } - return mountDest -} - -func checkIfMountDestinationPreExists(root string, dest string) (bool, error) { - statResults, err := copier.Stat(root, "", copier.StatOptions{}, []string{dest}) - if err != nil { - return false, err - } - if len(statResults) > 0 { - // We created exact path for globbing so it will - // return only one result. - if statResults[0].Error != "" && len(statResults[0].Globbed) == 0 { - // Path do not exist. - return false, nil - } - // Path exists. - return true, nil - } - return false, nil -} - // runSetupRunMounts sets up mounts that exist only in this RUN, not in subsequent runs // // If this function succeeds, the caller must free the returned // runMountArtifacts by calling b.cleanupRunMounts() after the command being // executed with those mounts has finished. -func (b *Builder) runSetupRunMounts(mountPoint, bundlePath string, mounts []string, sources runMountInfo, idMaps IDMaps) ([]specs.Mount, *runMountArtifacts, error) { - mountTargets := make([]string, 0, len(mounts)) +func (b *Builder) runSetupRunMounts(bundlePath string, mounts []string, sources runMountInfo, idMaps IDMaps) ([]specs.Mount, *runMountArtifacts, error) { tmpFiles := make([]string, 0, len(mounts)) mountImages := make([]string, 0, len(mounts)) intermediateMounts := make([]string, 0, len(mounts)) @@ -1679,25 +1638,10 @@ func (b *Builder) runSetupRunMounts(mountPoint, bundlePath string, mounts []stri default: return nil, nil, fmt.Errorf("invalid mount type %q", mountType) } - - if mountSpec != nil { - pathPreExists, err := checkIfMountDestinationPreExists(mountPoint, mountSpec.Destination) - if err != nil { - return nil, nil, err - } - if !pathPreExists { - // In such case it means that the path did not exists before - // creating any new mounts therefore we must clean the newly - // created directory after this step. - mountTargets = append(mountTargets, mountSpec.Destination) - } - } } succeeded = true artifacts := &runMountArtifacts{ - RunMountTargets: mountTargets, RunOverlayDirs: overlayDirs, - TmpFiles: tmpFiles, Agents: agents, MountedImages: mountImages, SSHAuthSock: defaultSSHSock, @@ -1740,10 +1684,13 @@ func (b *Builder) getBindMount(tokens []string, sys *types.SystemContext, contex } }() optionMounts = append(optionMounts, optionMount) - volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps) + volumes, overlayDirs, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps) if err != nil { return nil, "", "", "", err } + if len(overlayDirs) != 0 { + return nil, "", "", "", errors.New("internal error: did not expect a resolved bind mount to use the O flag") + } succeeded = true return &volumes[0], image, intermediateMount, overlayMount, nil } @@ -1755,10 +1702,13 @@ func (b *Builder) getTmpfsMount(tokens []string, idMaps IDMaps, workDir string) return nil, err } optionMounts = append(optionMounts, mount) - volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps) + volumes, overlayDirs, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps) if err != nil { return nil, err } + if len(overlayDirs) != 0 { + return nil, errors.New("internal error: did not expect a resolved tmpfs mount to use the O flag") + } return &volumes[0], nil } @@ -2010,19 +1960,8 @@ func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string] return &newMount, fwdAgent, nil } -func (b *Builder) cleanupTempVolumes() { - for tempVolume, val := range b.TempVolumes { - if val { - if err := overlay.RemoveTemp(tempVolume); err != nil { - b.Logger.Error(err.Error()) - } - b.TempVolumes[tempVolume] = false - } - } -} - // cleanupRunMounts cleans up run mounts so they only appear in this run. -func (b *Builder) cleanupRunMounts(mountpoint string, artifacts *runMountArtifacts) error { +func (b *Builder) cleanupRunMounts(artifacts *runMountArtifacts) error { for _, agent := range artifacts.Agents { servePath := agent.ServePath() if err := agent.Shutdown(); err != nil { @@ -2050,27 +1989,9 @@ func (b *Builder) cleanupRunMounts(mountpoint string, artifacts *runMountArtifac logrus.Debugf("umounting image %q: %v", image, err) } } - // remove mount targets that were created for this run - opts := copier.RemoveOptions{ - All: true, - } - for _, path := range artifacts.RunMountTargets { - if err := copier.Remove(mountpoint, path, opts); err != nil { - return fmt.Errorf("removing mount target %q %q: %w", mountpoint, path, err) - } - } - var prevErr error - for _, path := range artifacts.TmpFiles { - if err := os.Remove(path); err != nil && !errors.Is(err, os.ErrNotExist) { - if prevErr != nil { - logrus.Error(prevErr) - } - prevErr = fmt.Errorf("removing temporary file: %w", err) - } - } // unlock locks we took, most likely for cache mounts volumes.UnlockLockArray(artifacts.TargetLocks) - return prevErr + return nil } // setPdeathsig sets a parent-death signal for the process @@ -2110,3 +2031,143 @@ func mapContainerNameToHostname(containerName string) string { } return trimmed[:match[1]] } + +// createMountTargets creates empty files or directories that are used as +// targets for mounts in the spec, and makes a note of what it created. +func (b *Builder) createMountTargets(spec *specs.Spec) ([]copier.ConditionalRemovePath, error) { + // Avoid anything weird happening, just in case. + if spec == nil || spec.Root == nil { + return nil, nil + } + rootfsPath := spec.Root.Path + then := time.Unix(0, 0) + exemptFromTimesPreservation := map[string]struct{}{ + "dev": {}, + "proc": {}, + "sys": {}, + } + exemptFromRemoval := map[string]struct{}{ + "dev": {}, + "proc": {}, + "sys": {}, + } + overridePermissions := map[string]os.FileMode{ + "dev": 0o755, + "proc": 0o755, + "sys": 0o755, + } + uidmap, gidmap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) + targets := copier.EnsureOptions{ + UIDMap: uidmap, + GIDMap: gidmap, + } + for _, mnt := range spec.Mounts { + typeFlag := byte(tar.TypeDir) + // If the mount is a "bind" or "rbind" mount, then it's a bind + // mount, which means the target _could_ be a non-directory. + // Check the source and make a note. + if mnt.Type == define.TypeBind || slices.Contains(mnt.Options, "bind") || slices.Contains(mnt.Options, "rbind") { + if st, err := os.Stat(mnt.Source); err == nil { + if !st.IsDir() { + typeFlag = tar.TypeReg + } + } + } + // Walk the path components from the root all the way down to + // the target mountpoint and build a list of pathnames that we + // need to ensure exist. If we might need to remove them, give + // them a conspicuous mtime, so that we can detect if they were + // unmounted and then modified, in which case we'll want to + // preserve those changes. + destination := mnt.Destination + for destination != "" { + cleanedDestination := strings.Trim(path.Clean(filepath.ToSlash(destination)), "/") + modTime := &then + if _, ok := exemptFromTimesPreservation[cleanedDestination]; ok { + // don't force a timestamp for this path + modTime = nil + } + var mode *os.FileMode + if _, ok := exemptFromRemoval[cleanedDestination]; ok { + // we're not going to filter this out later, + // so don't make it look weird + perms := os.FileMode(0o755) + if typeFlag == tar.TypeReg { + perms = 0o644 + } + mode = &perms + modTime = nil + } + if perms, ok := overridePermissions[cleanedDestination]; ok { + // forced permissions + mode = &perms + } + targets.Paths = append(targets.Paths, copier.EnsurePath{ + Path: destination, + Typeflag: typeFlag, + ModTime: modTime, + Chmod: mode, + }) + typeFlag = tar.TypeDir + dir, _ := filepath.Split(destination) + if destination == dir { + break + } + destination = dir + } + } + if len(targets.Paths) == 0 { + return nil, nil + } + created, err := copier.Ensure(rootfsPath, rootfsPath, targets) + if err != nil { + return nil, err + } + logrus.Debugf("created mount targets at %v", created) + var remove []copier.ConditionalRemovePath + for _, target := range created { + cleanedTarget := strings.Trim(path.Clean(filepath.ToSlash(target)), "/") + if _, ok := exemptFromRemoval[cleanedTarget]; ok { + continue + } + modTime := &then + if _, ok := exemptFromTimesPreservation[cleanedTarget]; ok { + modTime = nil + } + condition := copier.ConditionalRemovePath{ + Path: cleanedTarget, + ModTime: modTime, + Owner: &idtools.IDPair{UID: 0, GID: 0}, + } + remove = append(remove, condition) + } + if len(remove) == 0 { + return nil, nil + } + // encode the set of paths we might need to filter out at commit-time + // in a way that hopefully doesn't break long-running concurrent Run() + // calls, that lets us also not have to manage any locking for them + cdir, err := b.store.ContainerDirectory(b.Container) + if err != nil { + return nil, fmt.Errorf("finding working container bookkeeping directory: %w", err) + } + if err := os.Mkdir(filepath.Join(cdir, containerExcludesDir), 0o700); err != nil && !errors.Is(err, os.ErrExist) { + return nil, fmt.Errorf("creating exclusions directory: %w", err) + } + encoded, err := json.Marshal(remove) + if err != nil { + return nil, fmt.Errorf("encoding list of items to exclude at commit-time: %w", err) + } + f, err := os.CreateTemp(filepath.Join(cdir, containerExcludesDir), "filter*"+containerExcludesSubstring) + if err != nil { + return nil, fmt.Errorf("creating exclusions file: %w", err) + } + defer os.Remove(f.Name()) + defer f.Close() + if err := ioutils.AtomicWriteFile(strings.TrimSuffix(f.Name(), containerExcludesSubstring), encoded, 0o600); err != nil { + return nil, fmt.Errorf("writing exclusions file: %w", err) + } + // return that set of paths directly, in case the caller would prefer + // to clear them out before commit-time + return remove, nil +} diff --git a/vendor/github.com/containers/buildah/run_freebsd.go b/vendor/github.com/containers/buildah/run_freebsd.go index e7f2df4b9e..637b96a098 100644 --- a/vendor/github.com/containers/buildah/run_freebsd.go +++ b/vendor/github.com/containers/buildah/run_freebsd.go @@ -45,15 +45,6 @@ const ( PROC_REAP_RELEASE = 3 ) -// We dont want to remove destinations with /etc, /dev as -// rootfs already contains these files and unionfs will create -// a `whiteout` i.e `.wh` files on removal of overlapping -// files from these directories. everything other than these -// will be cleaned up -var nonCleanablePrefixes = []string{ - "/etc", "/dev", -} - func procctl(idtype int, id int, cmd int, arg *byte) error { _, _, e1 := unix.Syscall6( unix.SYS_PROCCTL, uintptr(idtype), uintptr(id), @@ -298,13 +289,11 @@ func (b *Builder) Run(command []string, options RunOptions) error { } defer func() { - if err := b.cleanupRunMounts(mountPoint, runArtifacts); err != nil { + if err := b.cleanupRunMounts(runArtifacts); err != nil { options.Logger.Errorf("unable to cleanup run mounts %v", err) } }() - defer b.cleanupTempVolumes() - // If we are creating a network, make the vnet here so that we can // execute the OCI runtime inside it. For FreeBSD-13.3 and later, we can // configure the container network settings from outside the jail, which @@ -336,6 +325,28 @@ func (b *Builder) Run(command []string, options RunOptions) error { }() } + // Create any mount points that we need that aren't already present in + // the rootfs. + createdMountTargets, err := b.createMountTargets(spec) + if err != nil { + return fmt.Errorf("ensuring mount targets for container %q: %w", b.ContainerID, err) + } + defer func() { + // Attempt to clean up mount targets for the sake of builds + // that don't commit and rebase at each step, and people using + // `buildah run` more than once, who don't expect empty mount + // points to stick around. They'll still get filtered out at + // commit-time if another concurrent Run() is keeping something + // busy. + if _, err := copier.ConditionalRemove(mountPoint, mountPoint, copier.ConditionalRemoveOptions{ + UIDMap: b.store.UIDMap(), + GIDMap: b.store.GIDMap(), + Paths: createdMountTargets, + }); err != nil { + options.Logger.Errorf("unable to cleanup run mount targets %v", err) + } + }() + switch isolation { case IsolationOCI: var moreCreateArgs []string @@ -382,11 +393,11 @@ func (b *Builder) getCacheMount(tokens []string, sys *types.SystemContext, stage return nil, "", "", "", nil, errors.New("cache mounts not supported on freebsd") } -func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, idMaps IDMaps) (mounts []specs.Mount, Err error) { +func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, idMaps IDMaps) (mounts []specs.Mount, overlayDirs []string, Err error) { // Make sure the overlay directory is clean before running _, err := b.store.ContainerDirectory(b.ContainerID) if err != nil { - return nil, fmt.Errorf("looking up container directory for %s: %w", b.ContainerID, err) + return nil, nil, fmt.Errorf("looking up container directory for %s: %w", b.ContainerID, err) } parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) { @@ -434,7 +445,7 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, overlayMount, err := overlay.MountWithOptions(contentDir, host, container, &overlayOpts) if err == nil { - b.TempVolumes[contentDir] = true + overlayDirs = append(overlayDirs, contentDir) } return overlayMount, err } @@ -451,7 +462,7 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, logrus.Debugf("setting up mounted volume at %q", i.Destination) mount, err := parseMount(i.Type, i.Source, i.Destination, i.Options) if err != nil { - return nil, err + return nil, nil, err } mounts = append(mounts, mount) } @@ -464,11 +475,11 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, } mount, err := parseMount("nullfs", spliti[0], spliti[1], options) if err != nil { - return nil, err + return nil, nil, err } mounts = append(mounts, mount) } - return mounts, nil + return mounts, overlayDirs, nil } func setupCapabilities(g *generate.Generator, defaultCapabilities, adds, drops []string) error { diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index 584507ae4e..b5e874595b 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -54,19 +54,9 @@ import ( "tags.cncf.io/container-device-interface/pkg/parser" ) -var ( - // We dont want to remove destinations with /etc, /dev, /sys, - // /proc as rootfs already contains these files and unionfs - // will create a `whiteout` i.e `.wh` files on removal of - // overlapping files from these directories. everything other - // than these will be cleaned up - nonCleanablePrefixes = []string{ - "/etc", "/dev", "/sys", "/proc", - } - // binfmtRegistered makes sure we only try to register binfmt_misc - // interpreters once, the first time we handle a RUN instruction. - binfmtRegistered sync.Once -) +// binfmtRegistered makes sure we only try to register binfmt_misc +// interpreters once, the first time we handle a RUN instruction. +var binfmtRegistered sync.Once func setChildProcess() error { if err := unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(1), 0, 0, 0); err != nil { @@ -528,6 +518,28 @@ rootless=%d spec.Process.Env = append(spec.Process.Env, sshenv) } + // Create any mount points that we need that aren't already present in + // the rootfs. + createdMountTargets, err := b.createMountTargets(spec) + if err != nil { + return fmt.Errorf("ensuring mount targets for container %q: %w", b.ContainerID, err) + } + defer func() { + // Attempt to clean up mount targets for the sake of builds + // that don't commit and rebase at each step, and people using + // `buildah run` more than once, who don't expect empty mount + // points to stick around. They'll still get filtered out at + // commit-time if another concurrent Run() is keeping something + // busy. + if _, err := copier.ConditionalRemove(mountPoint, mountPoint, copier.ConditionalRemoveOptions{ + UIDMap: b.store.UIDMap(), + GIDMap: b.store.GIDMap(), + Paths: createdMountTargets, + }); err != nil { + options.Logger.Errorf("unable to cleanup run mount targets %v", err) + } + }() + // following run was called from `buildah run` // and some images were mounted for this run // add them to cleanup artifacts @@ -536,13 +548,11 @@ rootless=%d } defer func() { - if err := b.cleanupRunMounts(mountPoint, runArtifacts); err != nil { + if err := b.cleanupRunMounts(runArtifacts); err != nil { options.Logger.Errorf("unable to cleanup run mounts %v", err) } }() - defer b.cleanupTempVolumes() - // Handle mount flags that request that the source locations for "bind" mountpoints be // relabeled, and filter those flags out of the list of mount options we pass to the // runtime. @@ -1115,14 +1125,14 @@ func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) return nil } -func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, idMaps IDMaps) (mounts []specs.Mount, Err error) { +func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, idMaps IDMaps) (mounts []specs.Mount, overlayDirs []string, Err error) { // Make sure the overlay directory is clean before running containerDir, err := b.store.ContainerDirectory(b.ContainerID) if err != nil { - return nil, fmt.Errorf("looking up container directory for %s: %w", b.ContainerID, err) + return nil, nil, fmt.Errorf("looking up container directory for %s: %w", b.ContainerID, err) } if err := overlay.CleanupContent(containerDir); err != nil { - return nil, fmt.Errorf("cleaning up overlay content for %s: %w", b.ContainerID, err) + return nil, nil, fmt.Errorf("cleaning up overlay content for %s: %w", b.ContainerID, err) } parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) { @@ -1205,7 +1215,7 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, overlayMount, err := overlay.MountWithOptions(contentDir, host, container, &overlayOpts) if err == nil { - b.TempVolumes[contentDir] = true + overlayDirs = append(overlayDirs, contentDir) } // If chown true, add correct ownership to the overlay temp directories. @@ -1237,7 +1247,7 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, logrus.Debugf("setting up mounted volume at %q", i.Destination) mount, err := parseMount(i.Type, i.Source, i.Destination, i.Options) if err != nil { - return nil, err + return nil, nil, err } mounts = append(mounts, mount) } @@ -1251,11 +1261,11 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, options = append(options, "rbind") mount, err := parseMount("bind", spliti[0], spliti[1], options) if err != nil { - return nil, err + return nil, nil, err } mounts = append(mounts, mount) } - return mounts, nil + return mounts, overlayDirs, nil } func setupMaskedPaths(g *generate.Generator, opts *define.CommonBuildOptions) { @@ -1517,10 +1527,13 @@ func (b *Builder) getCacheMount(tokens []string, sys *types.SystemContext, stage } }() optionMounts = append(optionMounts, optionMount) - volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps) + volumes, overlayDirs, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps) if err != nil { return nil, "", "", "", nil, err } + if len(overlayDirs) != 0 { + return nil, "", "", "", nil, errors.New("internal error: did not expect a resolved cache mount to use the O flag") + } succeeded = true return &volumes[0], mountedImage, intermediateMount, overlayMount, targetLock, nil } diff --git a/vendor/github.com/containers/buildah/scan.go b/vendor/github.com/containers/buildah/scan.go index 0ae88768fa..c01baace17 100644 --- a/vendor/github.com/containers/buildah/scan.go +++ b/vendor/github.com/containers/buildah/scan.go @@ -52,6 +52,13 @@ func (b *Builder) sbomScan(ctx context.Context, options CommitOptions) (imageFil } } }() + scansSubdir := filepath.Join(scansDir, "scans") + if err = os.Mkdir(scansSubdir, 0o700); err != nil { + return nil, nil, "", err + } + if err = os.Chmod(scansSubdir, 0o777); err != nil { + return nil, nil, "", err + } // We may be producing sets of outputs using temporary containers, and // there's no need to create more than one container for any one @@ -127,7 +134,7 @@ func (b *Builder) sbomScan(ctx context.Context, options CommitOptions) (imageFil // Our temporary directory, read-write. { Type: define.TypeBind, - Source: scansDir, + Source: scansSubdir, Destination: scansTargetDir, Options: []string{"rw", "z"}, }, @@ -212,19 +219,19 @@ func (b *Builder) sbomScan(ctx context.Context, options CommitOptions) (imageFil var sbomResult, purlResult string switch { case scanSpec.ImageSBOMOutput != "": - sbomResult = filepath.Join(scansDir, filepath.Base(scanSpec.ImageSBOMOutput)) + sbomResult = filepath.Join(scansSubdir, filepath.Base(scanSpec.ImageSBOMOutput)) case scanSpec.SBOMOutput != "": - sbomResult = filepath.Join(scansDir, filepath.Base(scanSpec.SBOMOutput)) + sbomResult = filepath.Join(scansSubdir, filepath.Base(scanSpec.SBOMOutput)) default: - sbomResult = filepath.Join(scansDir, "sbom-result") + sbomResult = filepath.Join(scansSubdir, "sbom-result") } switch { case scanSpec.ImagePURLOutput != "": - purlResult = filepath.Join(scansDir, filepath.Base(scanSpec.ImagePURLOutput)) + purlResult = filepath.Join(scansSubdir, filepath.Base(scanSpec.ImagePURLOutput)) case scanSpec.PURLOutput != "": - purlResult = filepath.Join(scansDir, filepath.Base(scanSpec.PURLOutput)) + purlResult = filepath.Join(scansSubdir, filepath.Base(scanSpec.PURLOutput)) default: - purlResult = filepath.Join(scansDir, "purl-result") + purlResult = filepath.Join(scansSubdir, "purl-result") } copyFile := func(destination, source string) error { dst, err := os.Create(destination) @@ -244,7 +251,7 @@ func (b *Builder) sbomScan(ctx context.Context, options CommitOptions) (imageFil } err = func() error { for i := range resultFiles { - thisResultFile := filepath.Join(scansDir, filepath.Base(resultFiles[i])) + thisResultFile := filepath.Join(scansSubdir, filepath.Base(resultFiles[i])) switch i { case 0: // Straight-up copy to create the first version of the final output. diff --git a/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml index 9921997daf..cdbf80dec9 100644 --- a/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml +++ b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml @@ -1,6 +1,3 @@ -run: - deadline: 5m - linters: disable-all: true enable: diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.pb.go b/vendor/github.com/moby/buildkit/util/stack/stack.pb.go index 452dbd15f0..ef36670a89 100644 --- a/vendor/github.com/moby/buildkit/util/stack/stack.pb.go +++ b/vendor/github.com/moby/buildkit/util/stack/stack.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.11.4 // source: github.com/moby/buildkit/util/stack/stack.proto @@ -11,6 +11,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -21,15 +22,14 @@ const ( ) type Stack struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Frames []*Frame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"` + Cmdline []string `protobuf:"bytes,2,rep,name=cmdline,proto3" json:"cmdline,omitempty"` + Pid int32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + Revision string `protobuf:"bytes,5,opt,name=revision,proto3" json:"revision,omitempty"` unknownFields protoimpl.UnknownFields - - Frames []*Frame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"` - Cmdline []string `protobuf:"bytes,2,rep,name=cmdline,proto3" json:"cmdline,omitempty"` - Pid int32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` - Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` - Revision string `protobuf:"bytes,5,opt,name=revision,proto3" json:"revision,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Stack) Reset() { @@ -98,13 +98,12 @@ func (x *Stack) GetRevision() string { } type Frame struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + File string `protobuf:"bytes,2,opt,name=File,proto3" json:"File,omitempty"` + Line int32 `protobuf:"varint,3,opt,name=Line,proto3" json:"Line,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - File string `protobuf:"bytes,2,opt,name=File,proto3" json:"File,omitempty"` - Line int32 `protobuf:"varint,3,opt,name=Line,proto3" json:"Line,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Frame) Reset() { @@ -160,37 +159,28 @@ func (x *Frame) GetLine() int32 { var File_github_com_moby_buildkit_util_stack_stack_proto protoreflect.FileDescriptor -var file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, - 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x75, 0x74, 0x69, 0x6c, 0x2f, - 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2f, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x22, 0x8f, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, - 0x63, 0x6b, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2e, 0x46, 0x72, 0x61, 0x6d, 0x65, - 0x52, 0x06, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6d, 0x64, 0x6c, - 0x69, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6d, 0x64, 0x6c, 0x69, - 0x6e, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x03, 0x70, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, - 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x43, 0x0a, 0x05, 0x46, 0x72, - 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4c, - 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x4c, 0x69, 0x6e, 0x65, 0x42, - 0x25, 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, - 0x62, 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x75, 0x74, 0x69, 0x6c, - 0x2f, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc = "" + + "\n" + + "/github.com/moby/buildkit/util/stack/stack.proto\x12\x05stack\"\x8f\x01\n" + + "\x05Stack\x12$\n" + + "\x06frames\x18\x01 \x03(\v2\f.stack.FrameR\x06frames\x12\x18\n" + + "\acmdline\x18\x02 \x03(\tR\acmdline\x12\x10\n" + + "\x03pid\x18\x03 \x01(\x05R\x03pid\x12\x18\n" + + "\aversion\x18\x04 \x01(\tR\aversion\x12\x1a\n" + + "\brevision\x18\x05 \x01(\tR\brevision\"C\n" + + "\x05Frame\x12\x12\n" + + "\x04Name\x18\x01 \x01(\tR\x04Name\x12\x12\n" + + "\x04File\x18\x02 \x01(\tR\x04File\x12\x12\n" + + "\x04Line\x18\x03 \x01(\x05R\x04LineB%Z#github.com/moby/buildkit/util/stackb\x06proto3" var ( file_github_com_moby_buildkit_util_stack_stack_proto_rawDescOnce sync.Once - file_github_com_moby_buildkit_util_stack_stack_proto_rawDescData = file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc + file_github_com_moby_buildkit_util_stack_stack_proto_rawDescData []byte ) func file_github_com_moby_buildkit_util_stack_stack_proto_rawDescGZIP() []byte { file_github_com_moby_buildkit_util_stack_stack_proto_rawDescOnce.Do(func() { - file_github_com_moby_buildkit_util_stack_stack_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_moby_buildkit_util_stack_stack_proto_rawDescData) + file_github_com_moby_buildkit_util_stack_stack_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc), len(file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc))) }) return file_github_com_moby_buildkit_util_stack_stack_proto_rawDescData } @@ -218,7 +208,7 @@ func file_github_com_moby_buildkit_util_stack_stack_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc), len(file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc)), NumEnums: 0, NumMessages: 2, NumExtensions: 0, @@ -229,7 +219,6 @@ func file_github_com_moby_buildkit_util_stack_stack_proto_init() { MessageInfos: file_github_com_moby_buildkit_util_stack_stack_proto_msgTypes, }.Build() File_github_com_moby_buildkit_util_stack_stack_proto = out.File - file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc = nil file_github_com_moby_buildkit_util_stack_stack_proto_goTypes = nil file_github_com_moby_buildkit_util_stack_stack_proto_depIdxs = nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index b1734a37cf..bd04822cd6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -108,7 +108,7 @@ github.com/containernetworking/cni/pkg/version # github.com/containernetworking/plugins v1.7.1 ## explicit; go 1.23.0 github.com/containernetworking/plugins/pkg/ns -# github.com/containers/buildah v1.40.1-0.20250604193037-b8d8cc375f30 +# github.com/containers/buildah v1.41.0 ## explicit; go 1.23.3 github.com/containers/buildah github.com/containers/buildah/bind @@ -294,8 +294,8 @@ github.com/containers/libhvee/pkg/wmiext # github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 ## explicit github.com/containers/libtrust -# github.com/containers/luksy v0.0.0-20250408185436-4bb4c3f825be -## explicit; go 1.21 +# github.com/containers/luksy v0.0.0-20250609192159-bc60f96d4194 +## explicit; go 1.23.0 github.com/containers/luksy # github.com/containers/ocicrypt v1.2.1 ## explicit; go 1.22 @@ -494,8 +494,8 @@ github.com/felixge/httpsnoop ## explicit; go 1.17 github.com/fsnotify/fsnotify github.com/fsnotify/fsnotify/internal -# github.com/fsouza/go-dockerclient v1.12.0 -## explicit; go 1.22 +# github.com/fsouza/go-dockerclient v1.12.1 +## explicit; go 1.23 github.com/fsouza/go-dockerclient # github.com/go-jose/go-jose/v4 v4.0.5 ## explicit; go 1.21 @@ -651,7 +651,7 @@ github.com/miekg/pkcs11 # github.com/mistifyio/go-zfs/v3 v3.0.1 ## explicit; go 1.14 github.com/mistifyio/go-zfs/v3 -# github.com/moby/buildkit v0.22.0 +# github.com/moby/buildkit v0.23.2 ## explicit; go 1.23.0 github.com/moby/buildkit/frontend/dockerfile/command github.com/moby/buildkit/frontend/dockerfile/parser