vendor: update buildah to latest main

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
This commit is contained in:
Paul Holzinger
2025-08-21 18:33:04 +02:00
parent e76b08394e
commit b172cf7475
18 changed files with 461 additions and 140 deletions

View File

@ -211,24 +211,12 @@ integration_task:
STORAGE_DRIVER: 'vfs'
BUILDAH_RUNTIME: crun
RUNTIME_N: " using crun"
- env:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
BUILDAH_RUNTIME: crun
RUNTIME_N: " using crun"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${DEBIAN_NAME}"
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
@ -240,30 +228,18 @@ integration_task:
STORAGE_DRIVER: 'overlay'
BUILDAH_RUNTIME: crun
RUNTIME_N: " using crun"
- env:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
BUILDAH_RUNTIME: crun
RUNTIME_N: " using crun"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${DEBIAN_NAME}"
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
gce_instance:
gce_instance: &integration_gce_instance
image_name: "$IMAGE_NAME"
cpu: 8
memory: "8G"
@ -286,6 +262,53 @@ integration_task:
package_versions_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh packages'
golang_version_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh golang'
non_blocking_integration_task:
name: "Integration $DISTRO_NV$RUNTIME_N w/ $STORAGE_DRIVER (non-blocking)"
alias: non_blocking_integration
skip: *not_build_docs
depends_on: *smoke_vendor
matrix:
# VFS
- env:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
# OVERLAY
- env:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
gce_instance:
<<: *integration_gce_instance
# Separate scripts for separate outputs, makes debugging easier.
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}'
integration_test_script: '${SCRIPT_BASE}/test.sh integration |& ${_TIMESTAMP}'
binary_artifacts:
path: ./bin/*
always: *standardlogs
integration_rootless_task:
name: "Integration rootless $DISTRO_NV$RUNTIME_N w/ $STORAGE_DRIVER"
alias: integration_rootless
@ -295,13 +318,6 @@ integration_rootless_task:
matrix:
# Running rootless tests on overlay
# OVERLAY
- env:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
@ -309,13 +325,6 @@ integration_rootless_task:
PRIV_NAME: rootless
BUILDAH_RUNTIME: crun
RUNTIME_N: " using crun"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
@ -330,9 +339,43 @@ integration_rootless_task:
PRIV_NAME: rootless
gce_instance:
image_name: "$IMAGE_NAME"
cpu: 8
memory: "8G"
<<: *integration_gce_instance
# Separate scripts for separate outputs, makes debugging easier.
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}'
integration_test_script: '${SCRIPT_BASE}/test.sh integration |& ${_TIMESTAMP}'
binary_artifacts:
path: ./bin/*
always:
<<: *standardlogs
non_blocking_integration_rootless_task:
name: "Integration rootless $DISTRO_NV$RUNTIME_N w/ $STORAGE_DRIVER (non-blocking)"
alias: non_blocking_integration_rootless
skip: *not_build_docs
depends_on: *smoke_vendor
matrix:
- env:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
gce_instance:
<<: *integration_gce_instance
# Separate scripts for separate outputs, makes debugging easier.
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'

View File

@ -13,6 +13,7 @@ describes the project's governance and the Project Roles used below.
| Paul Holzinger | [Luap99](https://github.com/Luap99) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
| Giuseppe Scrivano | [giuseppe](https://github.com/giuseppe) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
| Miloslav Trmač | [mtrmac](https://github.com/mtrmac) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
| Mohan Boddu | [mohanboddu](https://github.com/mohanboddu) | Community Manager | [Red Hat](https://github.com/RedHatOfficial) |
| Neil Smith | [actionmancan](https://github.com/actionmancan) | Community Manager | [Red Hat](https://github.com/RedHatOfficial) |
| Tom Sweeney | [TomSweeneyRedHat](https://github.com/TomSweeneyRedHat/) | Maintainer and Community Manager | [Red Hat](https://github.com/RedHatOfficial) |
| Lokesh Mandvekar | [lsm5](https://github.com/lsm5) | Maintainer | [Red Hat](https://github.com/RedHatOfficial) |

View File

@ -59,7 +59,7 @@ export GOLANGCI_LINT_VERSION := 2.1.0
# Note: Uses the -N -l go compiler options to disable compiler optimizations
# and inlining. Using these build options allows you to subsequently
# use source debugging tools like delve.
all: bin/buildah bin/imgtype bin/copy bin/inet bin/tutorial bin/dumpspec bin/passwd docs
all: bin/buildah bin/imgtype bin/copy bin/inet bin/tutorial bin/dumpspec bin/passwd bin/crash bin/wait docs
bin/buildah: $(SOURCES) internal/mkcw/embed/entrypoint_amd64.gz
$(GO_BUILD) $(BUILDAH_LDFLAGS) $(GO_GCFLAGS) "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah
@ -91,6 +91,12 @@ bin/buildah.%: $(SOURCES)
mkdir -p ./bin
GOOS=$(word 2,$(subst ., ,$@)) GOARCH=$(word 3,$(subst ., ,$@)) $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ -tags "containers_image_openpgp" ./cmd/buildah
bin/crash: $(SOURCES)
$(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/crash
bin/wait: $(SOURCES)
$(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/wait
bin/dumpspec: $(SOURCES)
$(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/dumpspec

View File

@ -144,7 +144,12 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
return err
}
tlsClientConfig := &tls.Config{
CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
// As of 2025-08, tlsconfig.ClientDefault() differs from Go 1.23 defaults only in CipherSuites;
// so, limit us to only using that value. If go-connections/tlsconfig changes its policy, we
// will want to consider that and make a decision whether to follow suit.
// There is some chance that eventually the Go default will be to require TLS 1.3, and that point
// we might want to drop the dependency on go-connections entirely.
CipherSuites: tlsconfig.ClientDefault().CipherSuites,
}
if err := tlsclientconfig.SetupCertificates(certPath, tlsClientConfig); err != nil {
return err
@ -426,15 +431,17 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
// source item, or the destination has a path separator at the end of
// it, and it's not a remote URL, the destination needs to be a
// directory.
destMustBeDirectory := strings.HasSuffix(destination, string(os.PathSeparator)) || strings.HasSuffix(destination, string(os.PathSeparator)+".") // keep this in sync with github.com/openshift/imagebuilder.hasSlash()
destMustBeDirectory = destMustBeDirectory || destination == "" || (len(sources) > 1)
if destination == "" || !filepath.IsAbs(destination) {
tmpDestination := filepath.Join(string(os.PathSeparator)+b.WorkDir(), destination)
if destination == "" || strings.HasSuffix(destination, string(os.PathSeparator)) {
if destMustBeDirectory {
destination = tmpDestination + string(os.PathSeparator)
} else {
destination = tmpDestination
}
}
destMustBeDirectory := (len(sources) > 1) || strings.HasSuffix(destination, string(os.PathSeparator)) || destination == b.WorkDir()
destMustBeDirectory = destMustBeDirectory || (filepath.Clean(destination) == filepath.Clean(b.WorkDir()))
destCanBeFile := false
if len(sources) == 1 {
if len(remoteSources) == 1 {

View File

@ -201,7 +201,7 @@ func (req *request) UIDMap() []idtools.IDMap {
case requestEval:
return nil
case requestStat:
return nil
return req.StatOptions.UIDMap
case requestGet:
return req.GetOptions.UIDMap
case requestPut:
@ -226,7 +226,7 @@ func (req *request) GIDMap() []idtools.IDMap {
case requestEval:
return nil
case requestStat:
return nil
return req.StatOptions.GIDMap
case requestGet:
return req.GetOptions.GIDMap
case requestPut:
@ -284,6 +284,7 @@ type StatForItem struct {
Size int64 // dereferenced value for symlinks
Mode os.FileMode // dereferenced value for symlinks
ModTime time.Time // dereferenced value for symlinks
UID, GID int64 // usually in the uint32 range, set to -1 if unknown
IsSymlink bool
IsDir bool // dereferenced value for symlinks
IsRegular bool // dereferenced value for symlinks
@ -305,7 +306,8 @@ type removeResponse struct{}
// ensureResponse encodes a response to an Ensure request.
type ensureResponse struct {
Created []string // paths that were created because they weren't already present
Created []string // paths that were created because they weren't already present
Noted []EnsureParentPath // preexisting paths that are parents of created items
}
// conditionalRemoveResponse encodes a response to a conditionalRemove request.
@ -341,8 +343,9 @@ func Eval(root string, directory string, _ EvalOptions) (string, error) {
// StatOptions controls parts of Stat()'s behavior.
type StatOptions struct {
CheckForArchives bool // check for and populate the IsArchive bit in returned values
Excludes []string // contents to pretend don't exist, using the OS-specific path separator
UIDMap, GIDMap []idtools.IDMap // map from hostIDs to containerIDs when returning results
CheckForArchives bool // check for and populate the IsArchive bit in returned values
Excludes []string // contents to pretend don't exist, using the OS-specific path separator
}
// Stat globs the specified pattern in the specified directory and returns its
@ -479,6 +482,7 @@ func Put(root string, directory string, options PutOptions, bulkReader io.Reader
// MkdirOptions controls parts of Mkdir()'s behavior.
type MkdirOptions struct {
UIDMap, GIDMap []idtools.IDMap // map from containerIDs to hostIDs when creating directories
ModTimeNew *time.Time // set mtime and atime of newly-created directories
ChownNew *idtools.IDPair // set ownership of newly-created directories
ChmodNew *os.FileMode // set permissions on newly-created directories
}
@ -973,7 +977,7 @@ func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*re
resp := copierHandlerEval(req)
return resp, nil, nil
case requestStat:
resp := copierHandlerStat(req, pm)
resp := copierHandlerStat(req, pm, idMappings)
return resp, nil, nil
case requestGet:
return copierHandlerGet(bulkWriter, req, pm, idMappings)
@ -1100,7 +1104,7 @@ func copierHandlerEval(req request) *response {
return &response{Eval: evalResponse{Evaluated: filepath.Join(req.rootPrefix, resolvedTarget)}}
}
func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response {
func copierHandlerStat(req request, pm *fileutils.PatternMatcher, idMappings *idtools.IDMappings) *response {
errorResponse := func(fmtspec string, args ...any) *response {
return &response{Error: fmt.Sprintf(fmtspec, args...), Stat: statResponse{}}
}
@ -1158,6 +1162,17 @@ func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response {
}
result.Size = linfo.Size()
result.Mode = linfo.Mode()
result.UID, result.GID = -1, -1
if uid, gid, err := owner(linfo); err == nil {
if idMappings != nil && !idMappings.Empty() {
hostPair := idtools.IDPair{UID: uid, GID: gid}
uid, gid, err = idMappings.ToContainer(hostPair)
if err != nil {
return errorResponse("copier: stat: mapping host filesystem owners %#v to container filesystem owners: %w", hostPair, err)
}
}
result.UID, result.GID = int64(uid), int64(gid)
}
result.ModTime = linfo.ModTime()
result.IsDir = linfo.IsDir()
result.IsRegular = result.Mode.IsRegular()
@ -1270,7 +1285,7 @@ func checkLinks(item string, req request, info os.FileInfo) (string, os.FileInfo
func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMatcher, idMappings *idtools.IDMappings) (*response, func() error, error) {
statRequest := req
statRequest.Request = requestStat
statResponse := copierHandlerStat(req, pm)
statResponse := copierHandlerStat(req, pm, idMappings)
errorResponse := func(fmtspec string, args ...any) (*response, func() error, error) {
return &response{Error: fmt.Sprintf(fmtspec, args...), Stat: statResponse.Stat, Get: getResponse{}}, nil, nil
}
@ -1589,6 +1604,7 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
if name != "" {
hdr.Name = filepath.ToSlash(name)
}
hdr.Uname, hdr.Gname = "", ""
if options.Rename != nil {
hdr.Name = handleRename(options.Rename, hdr.Name)
}
@ -1696,6 +1712,9 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
if options.ChmodDirs != nil {
hdr.Mode = int64(*options.ChmodDirs)
}
if !strings.HasSuffix(hdr.Name, "/") {
hdr.Name += "/"
}
} else {
if options.ChownFiles != nil {
hdr.Uid, hdr.Gid = options.ChownFiles.UID, options.ChownFiles.GID
@ -2199,6 +2218,7 @@ func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response,
}
subdir := ""
var created []string
for _, component := range strings.Split(rel, string(os.PathSeparator)) {
subdir = filepath.Join(subdir, component)
path := filepath.Join(req.Root, subdir)
@ -2209,6 +2229,7 @@ func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response,
if err = chmod(path, dirMode); err != nil {
return errorResponse("copier: mkdir: error setting permissions on %q to 0%o: %v", path, dirMode)
}
created = append(created, path)
} else {
// FreeBSD can return EISDIR for "mkdir /":
// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=59739.
@ -2217,6 +2238,17 @@ func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response,
}
}
}
// set timestamps last, in case we needed to create some nested directories, which would
// update the timestamps on directories that we'd just set timestamps on, if we had done
// that immediately
if req.MkdirOptions.ModTimeNew != nil {
when := *req.MkdirOptions.ModTimeNew
for _, newDirectory := range created {
if err = lutimes(false, newDirectory, when, when); err != nil {
return errorResponse("copier: mkdir: error setting datestamp on %q: %v", newDirectory, err)
}
}
}
return &response{Error: "", Mkdir: mkdirResponse{}}, nil, nil
}
@ -2251,16 +2283,26 @@ type EnsurePath struct {
// EnsureOptions controls parts of Ensure()'s behavior.
type EnsureOptions struct {
UIDMap, GIDMap []idtools.IDMap // map from hostIDs to containerIDs in the chroot
UIDMap, GIDMap []idtools.IDMap // map from containerIDs to hostIDs in the chroot
Paths []EnsurePath
}
// EnsureParentPath is a parent (or grandparent, or...) directory of an item
// created by Ensure(), along with information about it, from before the item
// in question was created. If the information about this directory hasn't
// changed when commit-time rolls around, it's most likely that this directory
// is only being considered for inclusion in the layer because it was pulled
// up, and it was not actually changed.
type EnsureParentPath = ConditionalRemovePath
// Ensure ensures that the specified mount point targets exist under the root.
// If the root directory is not specified, the current root directory is used.
// If root is specified and the current OS supports it, and the calling process
// has the necessary privileges, the operation is performed in a chrooted
// context.
func Ensure(root, directory string, options EnsureOptions) ([]string, error) {
// Returns a slice with the pathnames of items that needed to be created and a
// slice of affected parent directories and information about them.
func Ensure(root, directory string, options EnsureOptions) ([]string, []EnsureParentPath, error) {
req := request{
Request: requestEnsure,
Root: root,
@ -2269,12 +2311,12 @@ func Ensure(root, directory string, options EnsureOptions) ([]string, error) {
}
resp, err := copier(nil, nil, req)
if err != nil {
return nil, err
return nil, nil, err
}
if resp.Error != "" {
return nil, errors.New(resp.Error)
return nil, nil, errors.New(resp.Error)
}
return resp.Ensure.Created, nil
return resp.Ensure.Created, resp.Ensure.Noted, nil
}
func copierHandlerEnsure(req request, idMappings *idtools.IDMappings) *response {
@ -2283,6 +2325,7 @@ func copierHandlerEnsure(req request, idMappings *idtools.IDMappings) *response
}
slices.SortFunc(req.EnsureOptions.Paths, func(a, b EnsurePath) int { return strings.Compare(a.Path, b.Path) })
var created []string
notedByName := map[string]EnsureParentPath{}
for _, item := range req.EnsureOptions.Paths {
uid, gid := 0, 0
if item.Chown != nil {
@ -2326,11 +2369,25 @@ func copierHandlerEnsure(req request, idMappings *idtools.IDMappings) *response
if parentPath == "" {
parentPath = "."
}
leaf := filepath.Join(subdir, component)
leaf := filepath.Join(parentPath, component)
parentInfo, err := os.Stat(filepath.Join(req.Root, parentPath))
if err != nil {
return errorResponse("copier: ensure: checking datestamps on %q (%d: %v): %v", parentPath, i, components, err)
}
if parentPath != "." {
parentModTime := parentInfo.ModTime().UTC()
parentMode := parentInfo.Mode()
uid, gid, err := owner(parentInfo)
if err != nil {
return errorResponse("copier: ensure: error reading owner of %q: %v", parentPath, err)
}
notedByName[parentPath] = EnsureParentPath{
Path: parentPath,
ModTime: &parentModTime,
Mode: &parentMode,
Owner: &idtools.IDPair{UID: uid, GID: gid},
}
}
if i < len(components)-1 || item.Typeflag == tar.TypeDir {
err = os.Mkdir(filepath.Join(req.Root, leaf), mode)
subdir = leaf
@ -2372,7 +2429,15 @@ func copierHandlerEnsure(req request, idMappings *idtools.IDMappings) *response
}
}
slices.Sort(created)
return &response{Error: "", Ensure: ensureResponse{Created: created}}
noted := make([]EnsureParentPath, 0, len(notedByName))
for _, n := range notedByName {
if slices.Contains(created, n.Path) {
continue
}
noted = append(noted, n)
}
slices.SortFunc(noted, func(a, b EnsureParentPath) int { return strings.Compare(a.Path, b.Path) })
return &response{Error: "", Ensure: ensureResponse{Created: created, Noted: noted}}
}
// ConditionalRemovePath is a single item being passed to an ConditionalRemove() call.
@ -2385,7 +2450,7 @@ type ConditionalRemovePath struct {
// ConditionalRemoveOptions controls parts of ConditionalRemove()'s behavior.
type ConditionalRemoveOptions struct {
UIDMap, GIDMap []idtools.IDMap // map from hostIDs to containerIDs in the chroot
UIDMap, GIDMap []idtools.IDMap // map from containerIDs to hostIDs in the chroot
Paths []ConditionalRemovePath
}

View File

@ -29,7 +29,7 @@ const (
// identify working containers.
Package = "buildah"
// Version for the Package. Also used by .packit.sh for Packit builds.
Version = "1.41.0"
Version = "1.42.0-dev"
// DefaultRuntime if containers.conf fails.
DefaultRuntime = "runc"

View File

@ -50,11 +50,14 @@ const (
// containerExcludesDir is the subdirectory of the container data
// directory where we drop exclusions
containerExcludesDir = "commit-excludes"
// containerPulledUpDir is the subdirectory of the container
// data directory where we drop exclusions when we're not squashing
containerPulledUpDir = "commit-pulled-up"
// containerExcludesSubstring is the suffix of files under
// $cdir/containerExcludesDir which should be ignored, as they only
// exist because we use CreateTemp() to create uniquely-named files,
// but we don't want to try to use their contents until after they've
// been written to
// $cdir/containerExcludesDir and $cdir/containerPulledUpDir which
// should be ignored, as they only exist because we use CreateTemp() to
// create uniquely-named files, but we don't want to try to use their
// contents until after they've been written to
containerExcludesSubstring = ".tmp"
)
@ -104,6 +107,8 @@ type containerImageRef struct {
extraImageContent map[string]string
compatSetParent types.OptionalBool
layerExclusions []copier.ConditionalRemovePath
layerMountTargets []copier.ConditionalRemovePath
layerPullUps []copier.EnsureParentPath
unsetAnnotations []string
setAnnotations []string
createdAnnotation types.OptionalBool
@ -781,7 +786,73 @@ func (mb *ociManifestBuilder) manifestAndConfig() ([]byte, []byte, error) {
return omanifestbytes, oconfig, nil
}
func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemContext) (src types.ImageSource, err error) {
// filterExclusionsByImage returns a slice of the members of "exclusions" which are present in the image with the specified ID
func (i containerImageRef) filterExclusionsByImage(ctx context.Context, exclusions []copier.EnsureParentPath, imageID string) ([]copier.EnsureParentPath, error) {
if len(exclusions) == 0 || imageID == "" {
return nil, nil
}
var paths []copier.EnsureParentPath
mountPoint, err := i.store.MountImage(imageID, nil, i.mountLabel)
cleanup := func() {
if _, err := i.store.UnmountImage(imageID, false); err != nil {
logrus.Debugf("unmounting image %q: %v", imageID, err)
}
}
if err != nil && errors.Is(err, storage.ErrLayerUnknown) {
// if an imagestore is being used, this could be expected
if b, err2 := NewBuilder(ctx, i.store, BuilderOptions{
FromImage: imageID,
PullPolicy: define.PullNever,
ContainerSuffix: "tmp",
}); err2 == nil {
mountPoint, err = b.Mount(i.mountLabel)
cleanup = func() {
cid := b.ContainerID
if err := b.Delete(); err != nil {
logrus.Debugf("unmounting image %q as container %q: %v", imageID, cid, err)
}
}
}
}
if err != nil {
return nil, fmt.Errorf("mounting image %q to examine its contents: %w", imageID, err)
}
defer cleanup()
globs := make([]string, 0, len(exclusions))
for _, exclusion := range exclusions {
globs = append(globs, exclusion.Path)
}
options := copier.StatOptions{}
stats, err := copier.Stat(mountPoint, mountPoint, options, globs)
if err != nil {
return nil, fmt.Errorf("checking for potential exclusion items in image %q: %w", imageID, err)
}
for _, stat := range stats {
for _, exclusion := range exclusions {
if stat.Glob != exclusion.Path {
continue
}
for result, stat := range stat.Results {
if result != exclusion.Path {
continue
}
if exclusion.ModTime != nil && !exclusion.ModTime.Equal(stat.ModTime) {
continue
}
if exclusion.Mode != nil && *exclusion.Mode != stat.Mode {
continue
}
if exclusion.Owner != nil && (int64(exclusion.Owner.UID) != stat.UID && int64(exclusion.Owner.GID) != stat.GID) {
continue
}
paths = append(paths, exclusion)
}
}
}
return paths, nil
}
func (i *containerImageRef) NewImageSource(ctx context.Context, _ *types.SystemContext) (src types.ImageSource, err error) {
// These maps will let us check if a layer ID is part of one group or another.
parentLayerIDs := make(map[string]bool)
apiLayerIDs := make(map[string]bool)
@ -945,6 +1016,7 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
}
var rc io.ReadCloser
var errChan chan error
var layerExclusions []copier.ConditionalRemovePath
if i.confidentialWorkload.Convert {
// Convert the root filesystem into an encrypted disk image.
rc, err = i.extractConfidentialWorkloadFS(i.confidentialWorkload)
@ -977,6 +1049,18 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
if i.emptyLayer && layerID == i.layerID {
continue
}
if layerID == i.layerID {
// We need to filter out any mount targets that we created.
layerExclusions = append(slices.Clone(i.layerExclusions), i.layerMountTargets...)
// And we _might_ need to filter out directories that modified
// by creating and removing mount targets, _if_ they were the
// same in the base image for this stage.
layerPullUps, err := i.filterExclusionsByImage(ctx, i.layerPullUps, i.fromImageID)
if err != nil {
return nil, fmt.Errorf("checking which exclusions are in base image %q: %w", i.fromImageID, err)
}
layerExclusions = append(layerExclusions, layerPullUps...)
}
// Extract this layer, one of possibly many.
rc, err = i.store.Diff("", layerID, diffOptions)
if err != nil {
@ -999,7 +1083,7 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
// At this point, there are multiple ways that can happen.
diffBeingAltered := i.compression != archive.Uncompressed
diffBeingAltered = diffBeingAltered || i.layerModTime != nil || i.layerLatestModTime != nil
diffBeingAltered = diffBeingAltered || len(i.layerExclusions) != 0
diffBeingAltered = diffBeingAltered || len(layerExclusions) != 0
if diffBeingAltered {
destHasher = digest.Canonical.Digester()
multiWriter = io.MultiWriter(counter, destHasher.Hash())
@ -1019,7 +1103,7 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
// Use specified timestamps in the layer, if we're doing that for history
// entries.
nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
writeCloser = makeFilteredLayerWriteCloser(nestedWriteCloser, i.layerModTime, i.layerLatestModTime, i.layerExclusions)
writeCloser = makeFilteredLayerWriteCloser(nestedWriteCloser, i.layerModTime, i.layerLatestModTime, layerExclusions)
writer = writeCloser
// Okay, copy from the raw diff through the filter, compressor, and counter and
// digesters.
@ -1440,27 +1524,52 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
return nil, fmt.Errorf("getting the per-container data directory for %q: %w", b.ContainerID, err)
}
excludesFiles, err := filepath.Glob(filepath.Join(cdir, containerExcludesDir, "*"))
gatherExclusions := func(excludesFiles []string) ([]copier.ConditionalRemovePath, error) {
var excludes []copier.ConditionalRemovePath
for _, excludesFile := range excludesFiles {
if strings.Contains(excludesFile, containerExcludesSubstring) {
continue
}
excludesData, err := os.ReadFile(excludesFile)
if err != nil {
return nil, fmt.Errorf("reading commit exclusions for %q: %w", b.ContainerID, err)
}
var theseExcludes []copier.ConditionalRemovePath
if err := json.Unmarshal(excludesData, &theseExcludes); err != nil {
return nil, fmt.Errorf("parsing commit exclusions for %q: %w", b.ContainerID, err)
}
excludes = append(excludes, theseExcludes...)
}
return excludes, nil
}
mountTargetFiles, err := filepath.Glob(filepath.Join(cdir, containerExcludesDir, "*"))
if err != nil {
return nil, fmt.Errorf("checking for commit exclusions for %q: %w", b.ContainerID, err)
}
var layerExclusions []copier.ConditionalRemovePath
for _, excludesFile := range excludesFiles {
if strings.Contains(excludesFile, containerExcludesSubstring) {
continue
}
excludesData, err := os.ReadFile(excludesFile)
if err != nil {
return nil, fmt.Errorf("reading commit exclusions for %q: %w", b.ContainerID, err)
}
var excludes []copier.ConditionalRemovePath
if err := json.Unmarshal(excludesData, &excludes); err != nil {
return nil, fmt.Errorf("parsing commit exclusions for %q: %w", b.ContainerID, err)
}
layerExclusions = append(layerExclusions, excludes...)
pulledUpFiles, err := filepath.Glob(filepath.Join(cdir, containerPulledUpDir, "*"))
if err != nil {
return nil, fmt.Errorf("checking for commit pulled-up items for %q: %w", b.ContainerID, err)
}
layerMountTargets, err := gatherExclusions(mountTargetFiles)
if err != nil {
return nil, err
}
if len(layerMountTargets) > 0 {
logrus.Debugf("these items were created for use as mount targets: %#v", layerMountTargets)
}
layerPullUps, err := gatherExclusions(pulledUpFiles)
if err != nil {
return nil, err
}
if len(layerPullUps) > 0 {
logrus.Debugf("these items appear to have been pulled up: %#v", layerPullUps)
}
var layerExclusions []copier.ConditionalRemovePath
if options.CompatLayerOmissions == types.OptionalBoolTrue {
layerExclusions = append(layerExclusions, compatLayerExclusions...)
layerExclusions = slices.Clone(compatLayerExclusions)
}
if len(layerExclusions) > 0 {
logrus.Debugf("excluding these items from committed layer: %#v", layerExclusions)
}
manifestType := options.PreferredManifestType
@ -1565,6 +1674,8 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
extraImageContent: maps.Clone(options.ExtraImageContent),
compatSetParent: options.CompatSetParent,
layerExclusions: layerExclusions,
layerMountTargets: layerMountTargets,
layerPullUps: layerPullUps,
createdAnnotation: options.CreatedAnnotation,
}
if ref.created != nil {

View File

@ -862,7 +862,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
logrus.Debugf("stage %d name: %q resolves to %q", stageIndex, stageName, baseWithArg)
stageName = baseWithArg
// If --from=<index> convert index to name
if index, err := strconv.Atoi(stageName); err == nil {
if index, err := strconv.Atoi(stageName); err == nil && index >= 0 && index < stageIndex {
stageName = stages[index].Name
}
// Check if selected base is not an additional

View File

@ -467,7 +467,7 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
// exists and if stage short_name matches any
// additionalContext replace stage with additional
// build context.
if index, err := strconv.Atoi(from); err == nil {
if index, err := strconv.Atoi(from); err == nil && index >= 0 && index < s.index {
from = s.stages[index].Name
}
if foundContext, ok := s.executor.additionalBuildContexts[from]; ok {
@ -1155,8 +1155,9 @@ func (s *StageExecutor) getImageRootfs(ctx context.Context, image string) (mount
return builder.MountPoint, nil
}
// getContentSummary generates content summary for cases where we added content and need
// to get summary with updated digests.
// getContentSummary generates a description of what was most recently added to the container,
// typically in the form "file", "dir", or "multi" followed by a colon and the hex part of the
// digest of the content, for inclusion in the corresponding history entry's "createdBy" field
func (s *StageExecutor) getContentSummaryAfterAddingContent() string {
contentType, digest := s.builder.ContentDigester.Digest()
summary := contentType
@ -1297,7 +1298,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if len(children) == 0 {
// There are no steps.
if s.builder.FromImageID == "" || s.executor.squash || s.executor.confidentialWorkload.Convert || len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 || len(s.executor.unsetEnvs) > 0 || len(s.executor.unsetLabels) > 0 || len(s.executor.sbomScanOptions) > 0 || len(s.executor.unsetAnnotations) > 0 {
if s.builder.FromImageID == "" || s.executor.squash || s.executor.confidentialWorkload.Convert || len(s.executor.annotations) > 0 || len(s.executor.unsetEnvs) > 0 || len(s.executor.unsetLabels) > 0 || len(s.executor.sbomScanOptions) > 0 || len(s.executor.unsetAnnotations) > 0 || s.executor.inheritLabels == types.OptionalBoolFalse || s.executor.inheritAnnotations == types.OptionalBoolFalse {
// We either don't have a base image, or we need to
// transform the contents of the base image, or we need
// to make some changes to just the config blob. Whichever
@ -1394,7 +1395,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// also account if the index is given instead
// of name so convert index in --from=<index>
// to name.
if index, err := strconv.Atoi(from); err == nil {
if index, err := strconv.Atoi(from); err == nil && index >= 0 && index < s.index {
from = s.stages[index].Name
}
// If additional buildContext contains this
@ -1889,13 +1890,17 @@ func (s *StageExecutor) historyAndDiffIDsMatch(baseHistory []v1.History, baseDif
return history[len(baseHistory)].CreatedBy == createdBy, nil
}
// getCreatedBy returns the command the image at node will be created by. If
// the passed-in CompositeDigester is not nil, it is assumed to have the digest
// information for the content if the node is ADD or COPY.
// getCreatedBy returns the value to store in the history entry for the node.
// If the the passed-in addedContentSummary is not an empty string, it is
// assumed to have the digest information for the content if the node is ADD or
// COPY.
//
// This function acts differently if getCreatedBy is invoked by LastStep. For instances
// certain instructions like `removing annotations` does not makes sense for every step
// but only makes sense if the step is last step of a build.
// The metadata string which is appended to the instruction may need to
// indicate that certain last-minute changes (generally things which couldn't
// be done by appending to the parsed Dockerfile, such as modifying timestamps
// in the layer, unsetting labels, or anything having to do with annotations)
// were made so that a future build won't mistake this result for a cache hit
// unless the same flags are being used at that time.
func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary string, isLastStep bool) (string, error) {
if node == nil {
return "/bin/sh", nil
@ -2602,33 +2607,65 @@ func (s *StageExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMo
return s.builder.EnsureContainerPathAs(path, user, mode)
}
func (s *StageExecutor) buildMetadata(isLastStep bool, addcopy bool) string {
// buildMetadata constructs the text at the end of the createdBy value for the
// history entry that we'll generate for the instruction that we're currently
// processing. Any flags that affect the output image in a way that affects
// whether or not it should be used as a cache hit for another build with that
// flag set differently should be reflected in its result. Some build settings
// only take affect at the final step, so only note those when they're applied.
func (s *StageExecutor) buildMetadata(isLastStep bool, isAddOrCopy bool) string {
unsetLabels := ""
inheritLabels := ""
unsetAnnotations := ""
inheritAnnotations := ""
newAnnotations := ""
layerMutations := ""
// If --inherit-label was manually set to false then update history.
if s.executor.inheritLabels == types.OptionalBoolFalse {
inheritLabels = "|inheritLabels=false"
}
// If --unsetlabel was used to clear a label, make a note of it.
for _, label := range s.executor.unsetLabels {
unsetLabels += "|unsetLabel=" + label
}
if isLastStep {
// If --unsetannotation was used to clear an annotation, make a note of it.
for _, annotation := range s.executor.unsetAnnotations {
unsetAnnotations += "|unsetAnnotation=" + annotation
}
// If --inherit-annotation was manually set to false then update history.
// If --inherit-annotation was manually set to false then we cleared the inherited annotations.
if s.executor.inheritAnnotations == types.OptionalBoolFalse {
inheritAnnotations = "|inheritAnnotations=false"
}
// If new annotations are added, they must be added as part of the last step of the build,
// so mention in history that new annotations were added inorder to make sure the builds
// can either reuse layers or burst the cache depending upon new annotations.
// so mention in history that new annotations were added in order to make sure that subsequent builds
// only use this image as a cache hit if it was built with the same set of annotations.
if len(s.executor.annotations) > 0 {
newAnnotations += strings.Join(s.executor.annotations, ",")
}
}
if addcopy {
return inheritLabels + " " + unsetAnnotations + " " + inheritAnnotations + " " + newAnnotations
// If we're messing with timestamps in layer contents, make a note of how we're doing it.
if s.executor.timestamp != nil || (s.executor.sourceDateEpoch != nil && s.executor.rewriteTimestamp) {
var t time.Time
modtype := ""
if s.executor.timestamp != nil {
t = s.executor.timestamp.UTC()
modtype = "force-mtime"
}
if s.executor.sourceDateEpoch != nil && s.executor.rewriteTimestamp {
t = s.executor.sourceDateEpoch.UTC()
modtype = "clamp-mtime"
if s.executor.timestamp != nil && s.executor.timestamp.Before(*s.executor.sourceDateEpoch) {
t = s.executor.timestamp.UTC()
modtype = "force-mtime"
}
}
layerMutations = "|" + modtype + "=" + strconv.FormatInt(t.Unix(), 10)
}
return inheritLabels + unsetAnnotations + inheritAnnotations + newAnnotations
if isAddOrCopy {
return unsetLabels + " " + inheritLabels + " " + unsetAnnotations + " " + inheritAnnotations + " " + layerMutations + " " + newAnnotations
}
return unsetLabels + inheritLabels + unsetAnnotations + inheritAnnotations + layerMutations + newAnnotations
}

View File

@ -6,6 +6,7 @@ import (
"os"
"path/filepath"
"strings"
"time"
"github.com/containers/buildah"
digest "github.com/opencontainers/go-digest"
@ -69,6 +70,11 @@ func generatePathChecksum(sourcePath string) (string, error) {
}
header.Name = filepath.ToSlash(relPath)
// Zero out timestamp fields to ignore modification time in checksum calculation
header.ModTime = time.Time{}
header.AccessTime = time.Time{}
header.ChangeTime = time.Time{}
if err := tarWriter.WriteHeader(header); err != nil {
return err
}

View File

@ -133,6 +133,8 @@ func storeInfo(store storage.Store) (map[string]any, error) {
info := map[string]any{}
info["GraphRoot"] = store.GraphRoot()
info["RunRoot"] = store.RunRoot()
info["GraphImageStore"] = store.ImageStore()
info["GraphTransientStore"] = store.TransientStore()
info["GraphDriverName"] = store.GraphDriverName()
info["GraphOptions"] = store.GraphOptions()
statusPairs, err := store.Status()
@ -144,6 +146,7 @@ func storeInfo(store storage.Store) (map[string]any, error) {
status[pair[0]] = pair[1]
}
info["GraphStatus"] = status
images, err := store.Images()
if err != nil {
logrus.Error(err, "error getting number of images")

View File

@ -355,6 +355,23 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
sbomScanOptions = append(sbomScanOptions, *sbomScanOption)
}
var compatVolumes, createdAnnotation, inheritAnnotations, inheritLabels, skipUnusedStages types.OptionalBool
if c.Flag("compat-volumes").Changed {
compatVolumes = types.NewOptionalBool(iopts.CompatVolumes)
}
if c.Flag("created-annotation").Changed {
createdAnnotation = types.NewOptionalBool(iopts.CreatedAnnotation)
}
if c.Flag("inherit-annotations").Changed {
inheritAnnotations = types.NewOptionalBool(iopts.InheritAnnotations)
}
if c.Flag("inherit-labels").Changed {
inheritLabels = types.NewOptionalBool(iopts.InheritLabels)
}
if c.Flag("skip-unused-stages").Changed {
skipUnusedStages = types.NewOptionalBool(iopts.SkipUnusedStages)
}
options = define.BuildOptions{
AddCapabilities: iopts.CapAdd,
AdditionalBuildContexts: additionalBuildContext,
@ -371,14 +388,14 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
CDIConfigDir: iopts.CDIConfigDir,
CNIConfigDir: iopts.CNIConfigDir,
CNIPluginPath: iopts.CNIPlugInPath,
CompatVolumes: types.NewOptionalBool(iopts.CompatVolumes),
CompatVolumes: compatVolumes,
ConfidentialWorkload: confidentialWorkloadOptions,
CPPFlags: iopts.CPPFlags,
CommonBuildOpts: commonOpts,
Compression: compression,
ConfigureNetwork: networkPolicy,
ContextDirectory: contextDir,
CreatedAnnotation: types.NewOptionalBool(iopts.CreatedAnnotation),
CreatedAnnotation: createdAnnotation,
Devices: iopts.Devices,
DropCapabilities: iopts.CapDrop,
Err: stderr,
@ -390,8 +407,8 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
IIDFile: iopts.Iidfile,
IgnoreFile: iopts.IgnoreFile,
In: stdin,
InheritLabels: types.NewOptionalBool(iopts.InheritLabels),
InheritAnnotations: types.NewOptionalBool(iopts.InheritAnnotations),
InheritLabels: inheritLabels,
InheritAnnotations: inheritAnnotations,
Isolation: isolation,
Jobs: &iopts.Jobs,
Labels: iopts.Label,
@ -423,7 +440,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
SBOMScanOptions: sbomScanOptions,
SignBy: iopts.SignBy,
SignaturePolicyPath: iopts.SignaturePolicy,
SkipUnusedStages: types.NewOptionalBool(iopts.SkipUnusedStages),
SkipUnusedStages: skipUnusedStages,
SourceDateEpoch: sourceDateEpoch,
Squash: iopts.Squash,
SystemContext: systemContext,

View File

@ -281,7 +281,7 @@ always: pull base and SBOM scanner images even if the named images are present
missing: pull base and SBOM scanner images if the named images are not present in store.
never: only use images present in store if available.
newer: only pull base and SBOM scanner images when newer images exist on the registry than those in the store.`)
fs.Lookup("pull").NoOptDefVal = "missing" // treat a --pull with no argument like --pull=missing
fs.Lookup("pull").NoOptDefVal = "always" // treat a --pull with no argument like --pull=always
fs.BoolVar(&flags.PullAlways, "pull-always", false, "pull the image even if the named image is present in store")
if err := fs.MarkHidden("pull-always"); err != nil {
panic(fmt.Sprintf("error marking the pull-always flag as hidden: %v", err))

View File

@ -527,9 +527,9 @@ func pullPolicyWithFlags(policySpec string, always, never bool) (define.PullPoli
}
policy := strings.ToLower(policySpec)
switch policy {
case "true", "missing", "ifmissing", "notpresent":
case "missing", "ifmissing", "notpresent":
return define.PullIfMissing, nil
case "always":
case "true", "always":
return define.PullAlways, nil
case "false", "never":
return define.PullNever, nil

View File

@ -452,6 +452,7 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [
// Lock the caller to a single OS-level thread.
runtime.LockOSThread()
defer reapStrays()
// Set up bind mounts for things that a namespaced user might not be able to get to directly.
unmountAll, err := bind.SetupIntermediateMountNamespace(spec, bundlePath)
@ -1081,6 +1082,23 @@ func runAcceptTerminal(logger *logrus.Logger, consoleListener *net.UnixListener,
return terminalFD, nil
}
func reapStrays() {
// Reap the exit status of anything that was reparented to us, not that
// we care about their exit status.
logrus.Debugf("checking for reparented child processes")
for range 100 {
wpid, err := unix.Wait4(-1, nil, unix.WNOHANG, nil)
if err != nil {
break
}
if wpid == 0 {
time.Sleep(100 * time.Millisecond)
} else {
logrus.Debugf("caught reparented child process %d", wpid)
}
}
}
func runUsingRuntimeMain() {
var options runUsingRuntimeSubprocOptions
// Set logging.
@ -1129,6 +1147,7 @@ func runUsingRuntimeMain() {
// Run the container, start to finish.
status, err := runUsingRuntime(options.Options, options.ConfigureNetwork, options.MoreCreateArgs, ospec, options.BundlePath, options.ContainerName, containerCreateW, containerStartR)
reapStrays()
if err != nil {
fmt.Fprintf(os.Stderr, "error running container: %v\n", err)
os.Exit(1)
@ -2119,11 +2138,12 @@ func (b *Builder) createMountTargets(spec *specs.Spec) ([]copier.ConditionalRemo
if len(targets.Paths) == 0 {
return nil, nil
}
created, err := copier.Ensure(rootfsPath, rootfsPath, targets)
created, noted, err := copier.Ensure(rootfsPath, rootfsPath, targets)
if err != nil {
return nil, err
}
logrus.Debugf("created mount targets at %v", created)
logrus.Debugf("parents of mount targets at %+v", noted)
var remove []copier.ConditionalRemovePath
for _, target := range created {
cleanedTarget := strings.Trim(path.Clean(filepath.ToSlash(target)), "/")
@ -2151,23 +2171,28 @@ func (b *Builder) createMountTargets(spec *specs.Spec) ([]copier.ConditionalRemo
if err != nil {
return nil, fmt.Errorf("finding working container bookkeeping directory: %w", err)
}
if err := os.Mkdir(filepath.Join(cdir, containerExcludesDir), 0o700); err != nil && !errors.Is(err, os.ErrExist) {
return nil, fmt.Errorf("creating exclusions directory: %w", err)
for excludesDir, exclusions := range map[string][]copier.ConditionalRemovePath{
containerExcludesDir: remove,
containerPulledUpDir: noted,
} {
if err := os.Mkdir(filepath.Join(cdir, excludesDir), 0o700); err != nil && !errors.Is(err, os.ErrExist) {
return nil, fmt.Errorf("creating exclusions directory: %w", err)
}
encoded, err := json.Marshal(exclusions)
if err != nil {
return nil, fmt.Errorf("encoding list of items to exclude at commit-time: %w", err)
}
f, err := os.CreateTemp(filepath.Join(cdir, excludesDir), "filter*"+containerExcludesSubstring)
if err != nil {
return nil, fmt.Errorf("creating exclusions file: %w", err)
}
defer os.Remove(f.Name())
defer f.Close()
if err := ioutils.AtomicWriteFile(strings.TrimSuffix(f.Name(), containerExcludesSubstring), encoded, 0o600); err != nil {
return nil, fmt.Errorf("writing exclusions file: %w", err)
}
}
encoded, err := json.Marshal(remove)
if err != nil {
return nil, fmt.Errorf("encoding list of items to exclude at commit-time: %w", err)
}
f, err := os.CreateTemp(filepath.Join(cdir, containerExcludesDir), "filter*"+containerExcludesSubstring)
if err != nil {
return nil, fmt.Errorf("creating exclusions file: %w", err)
}
defer os.Remove(f.Name())
defer f.Close()
if err := ioutils.AtomicWriteFile(strings.TrimSuffix(f.Name(), containerExcludesSubstring), encoded, 0o600); err != nil {
return nil, fmt.Errorf("writing exclusions file: %w", err)
}
// return that set of paths directly, in case the caller would prefer
// to clear them out before commit-time
// return the set of to-remove-now paths directly, in case the caller would prefer
// to clear them out itself now instead of waiting until commit-time
return remove, nil
}