vendor: update c/{buildah,common,image,storage} to main

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
This commit is contained in:
Paul Holzinger
2025-05-28 14:53:37 +02:00
parent e98e128012
commit b551939be6
123 changed files with 2454 additions and 1568 deletions

View File

@@ -28,12 +28,12 @@ env:
####
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
FEDORA_NAME: "fedora-41"
PRIOR_FEDORA_NAME: "fedora-40"
FEDORA_NAME: "fedora-42"
PRIOR_FEDORA_NAME: "fedora-41"
DEBIAN_NAME: "debian-13"
# Image identifiers
IMAGE_SUFFIX: "c20250324t111922z-f41f40d13"
IMAGE_SUFFIX: "c20250422t130822z-f42f41d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
@@ -122,7 +122,7 @@ vendor_task:
# Runs within Cirrus's "community cluster"
container:
image: docker.io/library/golang:1.23
image: docker.io/library/golang:1.23.3
cpu: 1
memory: 1
@@ -207,11 +207,10 @@ integration_task:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
# Disabled until we update to f41/42 as f40 does not have go 1.22
# - env:
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
# STORAGE_DRIVER: 'vfs'
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
- env:
DISTRO_NV: "${DEBIAN_NAME}"
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
@@ -221,11 +220,10 @@ integration_task:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
# Disabled until we update to f41/42 as f40 does not have go 1.22
# - env:
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
# STORAGE_DRIVER: 'overlay'
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
- env:
DISTRO_NV: "${DEBIAN_NAME}"
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
@@ -268,12 +266,11 @@ integration_rootless_task:
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless
# Disabled until we update to f40/41 as f39 does not have go 1.22
# - env:
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
# STORAGE_DRIVER: 'overlay'
# PRIV_NAME: rootless
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless
- env:
DISTRO_NV: "${DEBIAN_NAME}"
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"

View File

@@ -46,17 +46,16 @@ jobs:
notifications: &copr_build_failure_notification
failure_comment:
message: "Ephemeral COPR build failed. @containers/packit-build please check."
# Fedora aliases documentation: https://packit.dev/docs/configuration#aliases
# python3-fedora-distro-aliases provides `resolve-fedora-aliases` command
targets: &fedora_copr_targets
# f40 ships go 1.22 and we require go 1.23 now. This should be revert to fedora-all
# once either f40 is rebased to go 1.23 or f42 is released and f40 EOL.
- fedora-latest-stable-x86_64
- fedora-latest-stable-aarch64
- fedora-development-x86_64
- fedora-development-aarch64
- fedora-all-x86_64
- fedora-all-aarch64
enable_net: true
# Ignore until golang is updated in distro buildroot to 1.23.3+
- job: copr_build
trigger: pull_request
trigger: ignore
packages: [buildah-eln]
notifications: *copr_build_failure_notification
targets:
@@ -68,8 +67,9 @@ jobs:
- "https://kojipkgs.fedoraproject.org/repos/eln-build/latest/aarch64/"
enable_net: true
# Ignore until golang is updated in distro buildroot to 1.23.3+
- job: copr_build
trigger: pull_request
trigger: ignore
packages: [buildah-centos]
notifications: *copr_build_failure_notification
targets: &centos_copr_targets
@@ -95,21 +95,20 @@ jobs:
- job: tests
trigger: pull_request
packages: [buildah-fedora]
targets: &fedora_copr_test_targets
# See the other comment above, this should be reverted to fedora-all when possible.
- fedora-latest-stable-x86_64
- fedora-development-x86_64
targets:
- fedora-all-x86_64
tf_extra_params:
environments:
- artifacts:
- type: repository-file
id: https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/repo/fedora-$releasever/rhcontainerbot-podman-next-fedora-$releasever.repo
# Ignore until golang is updated in distro buildroot to 1.23.3+
# Tests on CentOS Stream for main branch PRs
- job: tests
trigger: pull_request
trigger: ignore
packages: [buildah-centos]
targets: &centos_copr_test_targets
targets:
- centos-stream-9-x86_64
- centos-stream-10-x86_64
tf_extra_params:

View File

@@ -1750,7 +1750,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
return fmt.Errorf("copier: put: error setting owner of %q to %d:%d: %w", path, defaultDirUID, defaultDirGID, err)
}
// make a conditional note to set this directory's permissions
// later, but not if we already had an explictly-provided mode
// later, but not if we already had an explicitly-provided mode
if _, ok := directoryModes[path]; !ok {
directoryModes[path] = defaultDirMode
}

View File

@@ -29,7 +29,7 @@ const (
// identify working containers.
Package = "buildah"
// Version for the Package. Also used by .packit.sh for Packit builds.
Version = "1.40.0"
Version = "1.41.0-dev"
// DefaultRuntime if containers.conf fails.
DefaultRuntime = "runc"

View File

@@ -148,36 +148,6 @@ func expectedDockerDiffIDs(image docker.V2Image) int {
return expected
}
// Compute the media types which we need to attach to a layer, given the type of
// compression that we'll be applying.
func computeLayerMIMEType(what string, layerCompression archive.Compression) (omediaType, dmediaType string, err error) {
omediaType = v1.MediaTypeImageLayer
dmediaType = docker.V2S2MediaTypeUncompressedLayer
if layerCompression != archive.Uncompressed {
switch layerCompression {
case archive.Gzip:
omediaType = v1.MediaTypeImageLayerGzip
dmediaType = manifest.DockerV2Schema2LayerMediaType
logrus.Debugf("compressing %s with gzip", what)
case archive.Bzip2:
// Until the image specs define a media type for bzip2-compressed layers, even if we know
// how to decompress them, we can't try to compress layers with bzip2.
return "", "", errors.New("media type for bzip2-compressed layers is not defined")
case archive.Xz:
// Until the image specs define a media type for xz-compressed layers, even if we know
// how to decompress them, we can't try to compress layers with xz.
return "", "", errors.New("media type for xz-compressed layers is not defined")
case archive.Zstd:
// Until the image specs define a media type for zstd-compressed layers, even if we know
// how to decompress them, we can't try to compress layers with zstd.
return "", "", errors.New("media type for zstd-compressed layers is not defined")
default:
logrus.Debugf("compressing %s with unknown compressor(?)", what)
}
}
return omediaType, dmediaType, nil
}
// Extract the container's whole filesystem as a filesystem image, wrapped
// in LUKS-compatible encryption.
func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWorkloadOptions) (io.ReadCloser, error) {
@@ -304,34 +274,35 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo
}), errChan, nil
}
type manifestBuilder interface {
// addLayer adds notes to the manifest and config about the layer. The layer blobs are
// identified by their possibly-compressed blob digests and sizes in the manifest, and by
// their uncompressed digests (diffIDs) in the config.
addLayer(layerBlobSum digest.Digest, layerBlobSize int64, diffID digest.Digest)
computeLayerMIMEType(what string, layerCompression archive.Compression) error
buildHistory(extraImageContentDiff string, extraImageContentDiffDigest digest.Digest) error
manifestAndConfig() ([]byte, []byte, error)
}
type dockerSchema2ManifestBuilder struct {
i *containerImageRef
layerMediaType string
dimage docker.V2Image
dmanifest docker.V2S2Manifest
}
// Build fresh copies of the container configuration structures so that we can edit them
// without making unintended changes to the original Builder.
func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, docker.V2Image, docker.V2S2Manifest, error) {
// without making unintended changes to the original Builder (Docker schema 2).
func (i *containerImageRef) newDockerSchema2ManifestBuilder() (manifestBuilder, error) {
created := time.Now().UTC()
if i.created != nil {
created = *i.created
}
// Build an empty image, and then decode over it.
oimage := v1.Image{}
if err := json.Unmarshal(i.oconfig, &oimage); err != nil {
return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err
}
// Always replace this value, since we're newer than our base image.
oimage.Created = &created
// Clear the list of diffIDs, since we always repopulate it.
oimage.RootFS.Type = docker.TypeLayers
oimage.RootFS.DiffIDs = []digest.Digest{}
// Only clear the history if we're squashing, otherwise leave it be so that we can append
// entries to it.
if i.confidentialWorkload.Convert || i.squash || i.omitHistory {
oimage.History = []v1.History{}
}
// Build an empty image, and then decode over it.
dimage := docker.V2Image{}
if err := json.Unmarshal(i.dconfig, &dimage); err != nil {
return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err
return nil, err
}
// Set the parent, but only if we want to be compatible with "classic" docker build.
if i.compatSetParent == types.OptionalBoolTrue {
@@ -358,64 +329,405 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest,
// If we were supplied with a configuration, copy fields from it to
// matching fields in both formats.
if err := config.Override(dimage.Config, &oimage.Config, i.overrideChanges, i.overrideConfig); err != nil {
return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, fmt.Errorf("applying changes: %w", err)
if err := config.OverrideDocker(dimage.Config, i.overrideChanges, i.overrideConfig); err != nil {
return nil, fmt.Errorf("applying changes: %w", err)
}
// If we're producing a confidential workload, override the command and
// assorted other settings that aren't expected to work correctly.
if i.confidentialWorkload.Convert {
dimage.Config.Entrypoint = []string{"/entrypoint"}
oimage.Config.Entrypoint = []string{"/entrypoint"}
dimage.Config.Cmd = nil
oimage.Config.Cmd = nil
dimage.Config.User = ""
oimage.Config.User = ""
dimage.Config.WorkingDir = ""
oimage.Config.WorkingDir = ""
dimage.Config.Healthcheck = nil
dimage.Config.Shell = nil
dimage.Config.Volumes = nil
oimage.Config.Volumes = nil
dimage.Config.ExposedPorts = nil
}
// Return partial manifest. The Layers lists will be populated later.
return &dockerSchema2ManifestBuilder{
i: i,
layerMediaType: docker.V2S2MediaTypeUncompressedLayer,
dimage: dimage,
dmanifest: docker.V2S2Manifest{
V2Versioned: docker.V2Versioned{
SchemaVersion: 2,
MediaType: manifest.DockerV2Schema2MediaType,
},
Config: docker.V2S2Descriptor{
MediaType: manifest.DockerV2Schema2ConfigMediaType,
},
Layers: []docker.V2S2Descriptor{},
},
}, nil
}
func (mb *dockerSchema2ManifestBuilder) addLayer(layerBlobSum digest.Digest, layerBlobSize int64, diffID digest.Digest) {
dlayerDescriptor := docker.V2S2Descriptor{
MediaType: mb.layerMediaType,
Digest: layerBlobSum,
Size: layerBlobSize,
}
mb.dmanifest.Layers = append(mb.dmanifest.Layers, dlayerDescriptor)
// Note this layer in the list of diffIDs, again using the uncompressed digest.
mb.dimage.RootFS.DiffIDs = append(mb.dimage.RootFS.DiffIDs, diffID)
}
// Compute the media types which we need to attach to a layer, given the type of
// compression that we'll be applying.
func (mb *dockerSchema2ManifestBuilder) computeLayerMIMEType(what string, layerCompression archive.Compression) error {
dmediaType := docker.V2S2MediaTypeUncompressedLayer
if layerCompression != archive.Uncompressed {
switch layerCompression {
case archive.Gzip:
dmediaType = manifest.DockerV2Schema2LayerMediaType
logrus.Debugf("compressing %s with gzip", what)
case archive.Bzip2:
// Until the image specs define a media type for bzip2-compressed layers, even if we know
// how to decompress them, we can't try to compress layers with bzip2.
return errors.New("media type for bzip2-compressed layers is not defined")
case archive.Xz:
// Until the image specs define a media type for xz-compressed layers, even if we know
// how to decompress them, we can't try to compress layers with xz.
return errors.New("media type for xz-compressed layers is not defined")
case archive.Zstd:
// Until the image specs define a media type for zstd-compressed layers, even if we know
// how to decompress them, we can't try to compress layers with zstd.
return errors.New("media type for zstd-compressed layers is not defined")
default:
logrus.Debugf("compressing %s with unknown compressor(?)", what)
}
}
mb.layerMediaType = dmediaType
return nil
}
func (mb *dockerSchema2ManifestBuilder) buildHistory(extraImageContentDiff string, extraImageContentDiffDigest digest.Digest) error {
// Build history notes in the image configuration.
appendHistory := func(history []v1.History, empty bool) {
for i := range history {
var created time.Time
if history[i].Created != nil {
created = *history[i].Created
}
dnews := docker.V2S2History{
Created: created,
CreatedBy: history[i].CreatedBy,
Author: history[i].Author,
Comment: history[i].Comment,
EmptyLayer: empty,
}
mb.dimage.History = append(mb.dimage.History, dnews)
}
}
// Keep track of how many entries the base image's history had
// before we started adding to it.
baseImageHistoryLen := len(mb.dimage.History)
// Add history entries for prepended empty layers.
appendHistory(mb.i.preEmptyLayers, true)
// Add history entries for prepended API-supplied layers.
for _, h := range mb.i.preLayers {
appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer)
}
// Add a history entry for this layer, empty or not.
created := time.Now().UTC()
if mb.i.created != nil {
created = (*mb.i.created).UTC()
}
dnews := docker.V2S2History{
Created: created,
CreatedBy: mb.i.createdBy,
Author: mb.dimage.Author,
EmptyLayer: mb.i.emptyLayer,
Comment: mb.i.historyComment,
}
mb.dimage.History = append(mb.dimage.History, dnews)
// Add a history entry for the extra image content if we added a layer for it.
// This diff was added to the list of layers before API-supplied layers that
// needed to be appended, and we need to keep the order of history entries for
// not-empty layers consistent with that.
if extraImageContentDiff != "" {
createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded())
dnews := docker.V2S2History{
Created: created,
CreatedBy: createdBy,
}
mb.dimage.History = append(mb.dimage.History, dnews)
}
// Add history entries for appended empty layers.
appendHistory(mb.i.postEmptyLayers, true)
// Add history entries for appended API-supplied layers.
for _, h := range mb.i.postLayers {
appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer)
}
// Assemble a comment indicating which base image was used, if it wasn't
// just an image ID, and add it to the first history entry we added.
var fromComment string
if strings.Contains(mb.i.parent, mb.i.fromImageID) && mb.i.fromImageName != "" && !strings.HasPrefix(mb.i.fromImageID, mb.i.fromImageName) {
if mb.dimage.History[baseImageHistoryLen].Comment != "" {
fromComment = " "
}
fromComment += "FROM " + mb.i.fromImageName
}
mb.dimage.History[baseImageHistoryLen].Comment += fromComment
// Confidence check that we didn't just create a mismatch between non-empty layers in the
// history and the number of diffIDs. Only applicable if the base image (if there was
// one) provided us at least one entry to use as a starting point.
if baseImageHistoryLen != 0 {
expectedDiffIDs := expectedDockerDiffIDs(mb.dimage)
if len(mb.dimage.RootFS.DiffIDs) != expectedDiffIDs {
return fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(mb.dimage.RootFS.DiffIDs))
}
}
return nil
}
func (mb *dockerSchema2ManifestBuilder) manifestAndConfig() ([]byte, []byte, error) {
// Encode the image configuration blob.
dconfig, err := json.Marshal(&mb.dimage)
if err != nil {
return nil, nil, fmt.Errorf("encoding %#v as json: %w", mb.dimage, err)
}
logrus.Debugf("Docker v2s2 config = %s", dconfig)
// Add the configuration blob to the manifest.
mb.dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig)
mb.dmanifest.Config.Size = int64(len(dconfig))
mb.dmanifest.Config.MediaType = manifest.DockerV2Schema2ConfigMediaType
// Encode the manifest.
dmanifestbytes, err := json.Marshal(&mb.dmanifest)
if err != nil {
return nil, nil, fmt.Errorf("encoding %#v as json: %w", mb.dmanifest, err)
}
logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes)
return dmanifestbytes, dconfig, nil
}
type ociManifestBuilder struct {
i *containerImageRef
layerMediaType string
oimage v1.Image
omanifest v1.Manifest
}
// Build fresh copies of the container configuration structures so that we can edit them
// without making unintended changes to the original Builder (OCI manifest).
func (i *containerImageRef) newOCIManifestBuilder() (manifestBuilder, error) {
created := time.Now().UTC()
if i.created != nil {
created = *i.created
}
// Build an empty image, and then decode over it.
oimage := v1.Image{}
if err := json.Unmarshal(i.oconfig, &oimage); err != nil {
return nil, err
}
// Always replace this value, since we're newer than our base image.
oimage.Created = &created
// Clear the list of diffIDs, since we always repopulate it.
oimage.RootFS.Type = docker.TypeLayers
oimage.RootFS.DiffIDs = []digest.Digest{}
// Only clear the history if we're squashing, otherwise leave it be so that we can append
// entries to it.
if i.confidentialWorkload.Convert || i.squash || i.omitHistory {
oimage.History = []v1.History{}
}
// If we were supplied with a configuration, copy fields from it to
// matching fields in both formats.
if err := config.OverrideOCI(&oimage.Config, i.overrideChanges, i.overrideConfig); err != nil {
return nil, fmt.Errorf("applying changes: %w", err)
}
// If we're producing a confidential workload, override the command and
// assorted other settings that aren't expected to work correctly.
if i.confidentialWorkload.Convert {
oimage.Config.Entrypoint = []string{"/entrypoint"}
oimage.Config.Cmd = nil
oimage.Config.User = ""
oimage.Config.WorkingDir = ""
oimage.Config.Volumes = nil
oimage.Config.ExposedPorts = nil
}
// Build empty manifests. The Layers lists will be populated later.
omanifest := v1.Manifest{
Versioned: specs.Versioned{
SchemaVersion: 2,
// Return partial manifest. The Layers lists will be populated later.
return &ociManifestBuilder{
i: i,
// The default layer media type assumes no compression.
layerMediaType: v1.MediaTypeImageLayer,
oimage: oimage,
omanifest: v1.Manifest{
Versioned: specs.Versioned{
SchemaVersion: 2,
},
MediaType: v1.MediaTypeImageManifest,
Config: v1.Descriptor{
MediaType: v1.MediaTypeImageConfig,
},
Layers: []v1.Descriptor{},
Annotations: i.annotations,
},
MediaType: v1.MediaTypeImageManifest,
Config: v1.Descriptor{
MediaType: v1.MediaTypeImageConfig,
},
Layers: []v1.Descriptor{},
Annotations: i.annotations,
}, nil
}
func (mb *ociManifestBuilder) addLayer(layerBlobSum digest.Digest, layerBlobSize int64, diffID digest.Digest) {
olayerDescriptor := v1.Descriptor{
MediaType: mb.layerMediaType,
Digest: layerBlobSum,
Size: layerBlobSize,
}
mb.omanifest.Layers = append(mb.omanifest.Layers, olayerDescriptor)
// Note this layer in the list of diffIDs, again using the uncompressed digest.
mb.oimage.RootFS.DiffIDs = append(mb.oimage.RootFS.DiffIDs, diffID)
}
// Compute the media types which we need to attach to a layer, given the type of
// compression that we'll be applying.
func (mb *ociManifestBuilder) computeLayerMIMEType(what string, layerCompression archive.Compression) error {
omediaType := v1.MediaTypeImageLayer
if layerCompression != archive.Uncompressed {
switch layerCompression {
case archive.Gzip:
omediaType = v1.MediaTypeImageLayerGzip
logrus.Debugf("compressing %s with gzip", what)
case archive.Bzip2:
// Until the image specs define a media type for bzip2-compressed layers, even if we know
// how to decompress them, we can't try to compress layers with bzip2.
return errors.New("media type for bzip2-compressed layers is not defined")
case archive.Xz:
// Until the image specs define a media type for xz-compressed layers, even if we know
// how to decompress them, we can't try to compress layers with xz.
return errors.New("media type for xz-compressed layers is not defined")
case archive.Zstd:
// Until the image specs define a media type for zstd-compressed layers, even if we know
// how to decompress them, we can't try to compress layers with zstd.
return errors.New("media type for zstd-compressed layers is not defined")
default:
logrus.Debugf("compressing %s with unknown compressor(?)", what)
}
}
mb.layerMediaType = omediaType
return nil
}
func (mb *ociManifestBuilder) buildHistory(extraImageContentDiff string, extraImageContentDiffDigest digest.Digest) error {
// Build history notes in the image configuration.
appendHistory := func(history []v1.History, empty bool) {
for i := range history {
var created *time.Time
if history[i].Created != nil {
copiedTimestamp := *history[i].Created
created = &copiedTimestamp
}
onews := v1.History{
Created: created,
CreatedBy: history[i].CreatedBy,
Author: history[i].Author,
Comment: history[i].Comment,
EmptyLayer: empty,
}
mb.oimage.History = append(mb.oimage.History, onews)
}
}
dmanifest := docker.V2S2Manifest{
V2Versioned: docker.V2Versioned{
SchemaVersion: 2,
MediaType: manifest.DockerV2Schema2MediaType,
},
Config: docker.V2S2Descriptor{
MediaType: manifest.DockerV2Schema2ConfigMediaType,
},
Layers: []docker.V2S2Descriptor{},
// Keep track of how many entries the base image's history had
// before we started adding to it.
baseImageHistoryLen := len(mb.oimage.History)
// Add history entries for prepended empty layers.
appendHistory(mb.i.preEmptyLayers, true)
// Add history entries for prepended API-supplied layers.
for _, h := range mb.i.preLayers {
appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer)
}
// Add a history entry for this layer, empty or not.
created := time.Now().UTC()
if mb.i.created != nil {
created = (*mb.i.created).UTC()
}
onews := v1.History{
Created: &created,
CreatedBy: mb.i.createdBy,
Author: mb.oimage.Author,
EmptyLayer: mb.i.emptyLayer,
Comment: mb.i.historyComment,
}
mb.oimage.History = append(mb.oimage.History, onews)
// Add a history entry for the extra image content if we added a layer for it.
// This diff was added to the list of layers before API-supplied layers that
// needed to be appended, and we need to keep the order of history entries for
// not-empty layers consistent with that.
if extraImageContentDiff != "" {
createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded())
onews := v1.History{
Created: &created,
CreatedBy: createdBy,
}
mb.oimage.History = append(mb.oimage.History, onews)
}
// Add history entries for appended empty layers.
appendHistory(mb.i.postEmptyLayers, true)
// Add history entries for appended API-supplied layers.
for _, h := range mb.i.postLayers {
appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer)
}
return oimage, omanifest, dimage, dmanifest, nil
// Assemble a comment indicating which base image was used, if it wasn't
// just an image ID, and add it to the first history entry we added.
var fromComment string
if strings.Contains(mb.i.parent, mb.i.fromImageID) && mb.i.fromImageName != "" && !strings.HasPrefix(mb.i.fromImageID, mb.i.fromImageName) {
if mb.oimage.History[baseImageHistoryLen].Comment != "" {
fromComment = " "
}
fromComment += "FROM " + mb.i.fromImageName
}
mb.oimage.History[baseImageHistoryLen].Comment += fromComment
// Confidence check that we didn't just create a mismatch between non-empty layers in the
// history and the number of diffIDs. Only applicable if the base image (if there was
// one) provided us at least one entry to use as a starting point.
if baseImageHistoryLen != 0 {
expectedDiffIDs := expectedOCIDiffIDs(mb.oimage)
if len(mb.oimage.RootFS.DiffIDs) != expectedDiffIDs {
return fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(mb.oimage.RootFS.DiffIDs))
}
}
return nil
}
func (mb *ociManifestBuilder) manifestAndConfig() ([]byte, []byte, error) {
// Encode the image configuration blob.
oconfig, err := json.Marshal(&mb.oimage)
if err != nil {
return nil, nil, fmt.Errorf("encoding %#v as json: %w", mb.oimage, err)
}
logrus.Debugf("OCIv1 config = %s", oconfig)
// Add the configuration blob to the manifest.
mb.omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig)
mb.omanifest.Config.Size = int64(len(oconfig))
mb.omanifest.Config.MediaType = v1.MediaTypeImageConfig
// Encode the manifest.
omanifestbytes, err := json.Marshal(&mb.omanifest)
if err != nil {
return nil, nil, fmt.Errorf("encoding %#v as json: %w", mb.omanifest, err)
}
logrus.Debugf("OCIv1 manifest = %s", omanifestbytes)
return omanifestbytes, oconfig, nil
}
func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemContext) (src types.ImageSource, err error) {
// Decide which type of manifest and configuration output we're going to provide.
manifestType := i.preferredManifestType
// If it's not a format we support, return an error.
if manifestType != v1.MediaTypeImageManifest && manifestType != manifest.DockerV2Schema2MediaType {
return nil, fmt.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
manifestType, v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType)
}
// These maps will let us check if a layer ID is part of one group or another.
parentLayerIDs := make(map[string]bool)
apiLayerIDs := make(map[string]bool)
@@ -492,9 +804,21 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
// Build fresh copies of the configurations and manifest so that we don't mess with any
// values in the Builder object itself.
oimage, omanifest, dimage, dmanifest, err := i.createConfigsAndManifests()
if err != nil {
return nil, err
var mb manifestBuilder
switch i.preferredManifestType {
case v1.MediaTypeImageManifest:
mb, err = i.newOCIManifestBuilder()
if err != nil {
return nil, err
}
case manifest.DockerV2Schema2MediaType:
mb, err = i.newDockerSchema2ManifestBuilder()
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
i.preferredManifestType, v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType)
}
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
@@ -512,9 +836,6 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
if apiLayerIDs[layerID] {
what = layerID
}
// The default layer media type assumes no compression.
omediaType := v1.MediaTypeImageLayer
dmediaType := docker.V2S2MediaTypeUncompressedLayer
// Look up this layer.
var layerUncompressedDigest digest.Digest
var layerUncompressedSize int64
@@ -552,21 +873,7 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
layerBlobSize := layerUncompressedSize
diffID := layerUncompressedDigest
// Note this layer in the manifest, using the appropriate blobsum.
olayerDescriptor := v1.Descriptor{
MediaType: omediaType,
Digest: layerBlobSum,
Size: layerBlobSize,
}
omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
dlayerDescriptor := docker.V2S2Descriptor{
MediaType: dmediaType,
Digest: layerBlobSum,
Size: layerBlobSize,
}
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
// Note this layer in the list of diffIDs, again using the uncompressed digest.
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, diffID)
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, diffID)
mb.addLayer(layerBlobSum, layerBlobSize, diffID)
blobLayers[diffID] = blobLayerInfo{
ID: layerID,
Size: layerBlobSize,
@@ -574,8 +881,7 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
continue
}
// Figure out if we need to change the media type, in case we've changed the compression.
omediaType, dmediaType, err = computeLayerMIMEType(what, i.compression)
if err != nil {
if err := mb.computeLayerMIMEType(what, i.compression); err != nil {
return nil, err
}
// Start reading either the layer or the whole container rootfs.
@@ -651,35 +957,30 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
return nil, fmt.Errorf("compressing %s: %w", what, err)
}
writer := io.MultiWriter(writeCloser, srcHasher.Hash())
{
// Use specified timestamps in the layer, if we're doing that for history
// entries.
if i.created != nil {
// Tweak the contents of layers we're creating.
nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
writeCloser = newTarFilterer(nestedWriteCloser, func(hdr *tar.Header) (bool, bool, io.Reader) {
// Scrub any local user names that might correspond to UIDs or GIDs of
// files in this layer.
hdr.Uname, hdr.Gname = "", ""
// Use specified timestamps in the layer, if we're doing that for history
// entries.
if i.created != nil {
// Changing a zeroed field to a non-zero field can affect the
// format that the library uses for writing the header, so only
// change fields that are already set to avoid changing the
// format (and as a result, changing the length) of the header
// that we write.
if !hdr.ModTime.IsZero() {
hdr.ModTime = *i.created
}
if !hdr.AccessTime.IsZero() {
hdr.AccessTime = *i.created
}
if !hdr.ChangeTime.IsZero() {
hdr.ChangeTime = *i.created
}
return false, false, nil
// Changing a zeroed field to a non-zero field can affect the
// format that the library uses for writing the header, so only
// change fields that are already set to avoid changing the
// format (and as a result, changing the length) of the header
// that we write.
if !hdr.ModTime.IsZero() {
hdr.ModTime = *i.created
}
if !hdr.AccessTime.IsZero() {
hdr.AccessTime = *i.created
}
if !hdr.ChangeTime.IsZero() {
hdr.ChangeTime = *i.created
}
return false, false, nil
})
writer = io.Writer(writeCloser)
writer = writeCloser
}
// Okay, copy from the raw diff through the filter, compressor, and counter and
// digesters.
@@ -718,189 +1019,19 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil {
return nil, fmt.Errorf("storing %s to file while renaming %q to %q: %w", what, filepath.Join(path, "layer"), finalBlobName, err)
}
// Add a note in the manifest about the layer. The blobs are identified by their possibly-
// compressed blob digests.
olayerDescriptor := v1.Descriptor{
MediaType: omediaType,
Digest: destHasher.Digest(),
Size: size,
}
omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
dlayerDescriptor := docker.V2S2Descriptor{
MediaType: dmediaType,
Digest: destHasher.Digest(),
Size: size,
}
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
// Add a note about the diffID, which is always the layer's uncompressed digest.
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest())
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest())
}
// Build history notes in the image configurations.
appendHistory := func(history []v1.History, empty bool) {
for i := range history {
var created *time.Time
if history[i].Created != nil {
copiedTimestamp := *history[i].Created
created = &copiedTimestamp
}
onews := v1.History{
Created: created,
CreatedBy: history[i].CreatedBy,
Author: history[i].Author,
Comment: history[i].Comment,
EmptyLayer: empty,
}
oimage.History = append(oimage.History, onews)
if created == nil {
created = &time.Time{}
}
dnews := docker.V2S2History{
Created: *created,
CreatedBy: history[i].CreatedBy,
Author: history[i].Author,
Comment: history[i].Comment,
EmptyLayer: empty,
}
dimage.History = append(dimage.History, dnews)
}
mb.addLayer(destHasher.Digest(), size, srcHasher.Digest())
}
// Only attempt to append history if history was not disabled explicitly.
if !i.omitHistory {
// Keep track of how many entries the base image's history had
// before we started adding to it.
baseImageHistoryLen := len(oimage.History)
// Add history entries for prepended empty layers.
appendHistory(i.preEmptyLayers, true)
// Add history entries for prepended API-supplied layers.
for _, h := range i.preLayers {
appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer)
}
// Add a history entry for this layer, empty or not.
created := time.Now().UTC()
if i.created != nil {
created = (*i.created).UTC()
}
onews := v1.History{
Created: &created,
CreatedBy: i.createdBy,
Author: oimage.Author,
EmptyLayer: i.emptyLayer,
Comment: i.historyComment,
}
oimage.History = append(oimage.History, onews)
dnews := docker.V2S2History{
Created: created,
CreatedBy: i.createdBy,
Author: dimage.Author,
EmptyLayer: i.emptyLayer,
Comment: i.historyComment,
}
dimage.History = append(dimage.History, dnews)
// Add a history entry for the extra image content if we added a layer for it.
// This diff was added to the list of layers before API-supplied layers that
// needed to be appended, and we need to keep the order of history entries for
// not-empty layers consistent with that.
if extraImageContentDiff != "" {
createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded())
onews := v1.History{
Created: &created,
CreatedBy: createdBy,
}
oimage.History = append(oimage.History, onews)
dnews := docker.V2S2History{
Created: created,
CreatedBy: createdBy,
}
dimage.History = append(dimage.History, dnews)
}
// Add history entries for appended empty layers.
appendHistory(i.postEmptyLayers, true)
// Add history entries for appended API-supplied layers.
for _, h := range i.postLayers {
appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer)
}
// Assemble a comment indicating which base image was used, if it wasn't
// just an image ID, and add it to the first history entry we added.
var fromComment string
if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != "" && !strings.HasPrefix(i.fromImageID, i.fromImageName) {
if oimage.History[baseImageHistoryLen].Comment != "" {
fromComment = " "
}
fromComment += "FROM " + i.fromImageName
}
oimage.History[baseImageHistoryLen].Comment += fromComment
dimage.History[baseImageHistoryLen].Comment += fromComment
// Confidence check that we didn't just create a mismatch between non-empty layers in the
// history and the number of diffIDs. Only applicable if the base image (if there was
// one) provided us at least one entry to use as a starting point.
if baseImageHistoryLen != 0 {
expectedDiffIDs := expectedOCIDiffIDs(oimage)
if len(oimage.RootFS.DiffIDs) != expectedDiffIDs {
return nil, fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs))
}
expectedDiffIDs = expectedDockerDiffIDs(dimage)
if len(dimage.RootFS.DiffIDs) != expectedDiffIDs {
return nil, fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs))
}
if err := mb.buildHistory(extraImageContentDiff, extraImageContentDiffDigest); err != nil {
return nil, err
}
}
// Encode the image configuration blob.
oconfig, err := json.Marshal(&oimage)
imageManifest, config, err := mb.manifestAndConfig()
if err != nil {
return nil, fmt.Errorf("encoding %#v as json: %w", oimage, err)
}
logrus.Debugf("OCIv1 config = %s", oconfig)
// Add the configuration blob to the manifest.
omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig)
omanifest.Config.Size = int64(len(oconfig))
omanifest.Config.MediaType = v1.MediaTypeImageConfig
// Encode the manifest.
omanifestbytes, err := json.Marshal(&omanifest)
if err != nil {
return nil, fmt.Errorf("encoding %#v as json: %w", omanifest, err)
}
logrus.Debugf("OCIv1 manifest = %s", omanifestbytes)
// Encode the image configuration blob.
dconfig, err := json.Marshal(&dimage)
if err != nil {
return nil, fmt.Errorf("encoding %#v as json: %w", dimage, err)
}
logrus.Debugf("Docker v2s2 config = %s", dconfig)
// Add the configuration blob to the manifest.
dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig)
dmanifest.Config.Size = int64(len(dconfig))
dmanifest.Config.MediaType = manifest.DockerV2Schema2ConfigMediaType
// Encode the manifest.
dmanifestbytes, err := json.Marshal(&dmanifest)
if err != nil {
return nil, fmt.Errorf("encoding %#v as json: %w", dmanifest, err)
}
logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes)
// Decide which manifest and configuration blobs we'll actually output.
var config []byte
var imageManifest []byte
switch manifestType {
case v1.MediaTypeImageManifest:
imageManifest = omanifestbytes
config = oconfig
case manifest.DockerV2Schema2MediaType:
imageManifest = dmanifestbytes
config = dconfig
default:
panic("unreachable code: unsupported manifest type")
return nil, err
}
src = &containerImageSource{
path: path,
@@ -914,7 +1045,7 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
config: config,
configDigest: digest.Canonical.FromBytes(config),
manifest: imageManifest,
manifestType: manifestType,
manifestType: i.preferredManifestType,
blobDirectory: i.blobDirectory,
blobLayers: blobLayers,
}

View File

@@ -2243,9 +2243,11 @@ func (s *StageExecutor) pullCache(ctx context.Context, cacheKey string) (referen
return nil, "", fmt.Errorf("failed pulling cache from all available sources %q", srcList)
}
// intermediateImageExists returns true if an intermediate image of currNode exists in the image store from a previous build.
// intermediateImageExists returns image ID if an intermediate image of currNode exists in the image store from a previous build.
// It verifies this by checking the parent of the top layer of the image and the history.
// If more than one image matches as potiential candidates then priority is given to the most recently built image.
func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool) (string, error) {
cacheCandidates := []storage.Image{}
// Get the list of images available in the image store
images, err := s.executor.store.Images()
if err != nil {
@@ -2316,9 +2318,13 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p
return "", err
}
if foundMatch {
return image.ID, nil
cacheCandidates = append(cacheCandidates, image)
}
}
if len(cacheCandidates) > 0 {
slices.SortFunc(cacheCandidates, func(a, b storage.Image) int { return a.Created.Compare(b.Created) })
return cacheCandidates[len(cacheCandidates)-1].ID, nil
}
return "", nil
}

View File

@@ -65,74 +65,120 @@ func mergeEnv(a, b []string) []string {
return results
}
// Override takes a buildah docker config and an OCI ImageConfig, and applies a
func parseOverrideChanges(overrideChanges []string, overrideConfig *manifest.Schema2Config) (*manifest.Schema2Config, error) {
if len(overrideChanges) == 0 {
return overrideConfig, nil
}
if overrideConfig == nil {
overrideConfig = &manifest.Schema2Config{}
}
// Parse the set of changes as we would a Dockerfile.
changes := strings.Join(overrideChanges, "\n")
parsed, err := imagebuilder.ParseDockerfile(strings.NewReader(changes))
if err != nil {
return overrideConfig, fmt.Errorf("parsing change set %+v: %w", changes, err)
}
// Create a dummy builder object to process configuration-related
// instructions.
subBuilder := imagebuilder.NewBuilder(nil)
// Convert the incoming data into an initial RunConfig.
subBuilder.RunConfig = *GoDockerclientConfigFromSchema2Config(overrideConfig)
// Process the change instructions one by one.
for _, node := range parsed.Children {
var step imagebuilder.Step
if err := step.Resolve(node); err != nil {
return overrideConfig, fmt.Errorf("resolving change %q: %w", node.Original, err)
}
if err := subBuilder.Run(&step, &configOnlyExecutor{}, true); err != nil {
return overrideConfig, fmt.Errorf("processing change %q: %w", node.Original, err)
}
}
// Pull settings out of the dummy builder's RunConfig.
return Schema2ConfigFromGoDockerclientConfig(&subBuilder.RunConfig), nil
}
// OverrideOCI takes a buildah docker config and an OCI ImageConfig, and applies a
// mixture of a slice of Dockerfile-style instructions and fields from a config
// blob to them both
func Override(dconfig *docker.Config, oconfig *v1.ImageConfig, overrideChanges []string, overrideConfig *manifest.Schema2Config) error {
if len(overrideChanges) > 0 {
if overrideConfig == nil {
overrideConfig = &manifest.Schema2Config{}
}
// Parse the set of changes as we would a Dockerfile.
changes := strings.Join(overrideChanges, "\n")
parsed, err := imagebuilder.ParseDockerfile(strings.NewReader(changes))
if err != nil {
return fmt.Errorf("parsing change set %+v: %w", changes, err)
}
// Create a dummy builder object to process configuration-related
// instructions.
subBuilder := imagebuilder.NewBuilder(nil)
// Convert the incoming data into an initial RunConfig.
subBuilder.RunConfig = *GoDockerclientConfigFromSchema2Config(overrideConfig)
// Process the change instructions one by one.
for _, node := range parsed.Children {
var step imagebuilder.Step
if err := step.Resolve(node); err != nil {
return fmt.Errorf("resolving change %q: %w", node.Original, err)
}
if err := subBuilder.Run(&step, &configOnlyExecutor{}, true); err != nil {
return fmt.Errorf("processing change %q: %w", node.Original, err)
}
}
// Pull settings out of the dummy builder's RunConfig.
overrideConfig = Schema2ConfigFromGoDockerclientConfig(&subBuilder.RunConfig)
func OverrideOCI(oconfig *v1.ImageConfig, overrideChanges []string, overrideConfig *manifest.Schema2Config) error {
overrideConfig, err := parseOverrideChanges(overrideChanges, overrideConfig)
if err != nil {
return err
}
if overrideConfig != nil {
// Apply changes from a possibly-provided possibly-changed config struct.
dconfig.Hostname = firstStringElseSecondString(overrideConfig.Hostname, dconfig.Hostname)
dconfig.Domainname = firstStringElseSecondString(overrideConfig.Domainname, dconfig.Domainname)
dconfig.User = firstStringElseSecondString(overrideConfig.User, dconfig.User)
oconfig.User = firstStringElseSecondString(overrideConfig.User, oconfig.User)
dconfig.AttachStdin = overrideConfig.AttachStdin
dconfig.AttachStdout = overrideConfig.AttachStdout
dconfig.AttachStderr = overrideConfig.AttachStderr
if len(overrideConfig.ExposedPorts) > 0 {
dexposedPorts := make(map[docker.Port]struct{})
oexposedPorts := make(map[string]struct{})
for port := range dconfig.ExposedPorts {
dexposedPorts[port] = struct{}{}
}
for port := range overrideConfig.ExposedPorts {
dexposedPorts[docker.Port(port)] = struct{}{}
}
for port := range oconfig.ExposedPorts {
oexposedPorts[port] = struct{}{}
}
for port := range overrideConfig.ExposedPorts {
oexposedPorts[string(port)] = struct{}{}
}
dconfig.ExposedPorts = dexposedPorts
oconfig.ExposedPorts = oexposedPorts
}
if len(overrideConfig.Env) > 0 {
oconfig.Env = mergeEnv(oconfig.Env, overrideConfig.Env)
}
oconfig.Entrypoint, oconfig.Cmd = firstSlicePairElseSecondSlicePair(overrideConfig.Entrypoint, overrideConfig.Cmd, oconfig.Entrypoint, oconfig.Cmd)
if len(overrideConfig.Volumes) > 0 {
if oconfig.Volumes == nil {
oconfig.Volumes = make(map[string]struct{})
}
for volume := range overrideConfig.Volumes {
oconfig.Volumes[volume] = struct{}{}
}
}
oconfig.WorkingDir = firstStringElseSecondString(overrideConfig.WorkingDir, oconfig.WorkingDir)
if len(overrideConfig.Labels) > 0 {
if oconfig.Labels == nil {
oconfig.Labels = make(map[string]string)
}
for k, v := range overrideConfig.Labels {
oconfig.Labels[k] = v
}
}
oconfig.StopSignal = overrideConfig.StopSignal
}
return nil
}
// OverrideDocker takes a buildah docker config and an Docker Config, and applies a
// mixture of a slice of Dockerfile-style instructions and fields from a config
// blob to them both
func OverrideDocker(dconfig *docker.Config, overrideChanges []string, overrideConfig *manifest.Schema2Config) error {
overrideConfig, err := parseOverrideChanges(overrideChanges, overrideConfig)
if err != nil {
return err
}
if overrideConfig != nil {
// Apply changes from a possibly-provided possibly-changed config struct.
dconfig.Hostname = firstStringElseSecondString(overrideConfig.Hostname, dconfig.Hostname)
dconfig.Domainname = firstStringElseSecondString(overrideConfig.Domainname, dconfig.Domainname)
dconfig.User = firstStringElseSecondString(overrideConfig.User, dconfig.User)
dconfig.AttachStdin = overrideConfig.AttachStdin
dconfig.AttachStdout = overrideConfig.AttachStdout
dconfig.AttachStderr = overrideConfig.AttachStderr
if len(overrideConfig.ExposedPorts) > 0 {
dexposedPorts := make(map[docker.Port]struct{})
for port := range dconfig.ExposedPorts {
dexposedPorts[port] = struct{}{}
}
for port := range overrideConfig.ExposedPorts {
dexposedPorts[docker.Port(port)] = struct{}{}
}
dconfig.ExposedPorts = dexposedPorts
}
dconfig.Tty = overrideConfig.Tty
dconfig.OpenStdin = overrideConfig.OpenStdin
dconfig.StdinOnce = overrideConfig.StdinOnce
if len(overrideConfig.Env) > 0 {
dconfig.Env = mergeEnv(dconfig.Env, overrideConfig.Env)
oconfig.Env = mergeEnv(oconfig.Env, overrideConfig.Env)
}
dconfig.Entrypoint, dconfig.Cmd = firstSlicePairElseSecondSlicePair(overrideConfig.Entrypoint, overrideConfig.Cmd, dconfig.Entrypoint, dconfig.Cmd)
oconfig.Entrypoint, oconfig.Cmd = firstSlicePairElseSecondSlicePair(overrideConfig.Entrypoint, overrideConfig.Cmd, oconfig.Entrypoint, oconfig.Cmd)
if overrideConfig.Healthcheck != nil {
dconfig.Healthcheck = &docker.HealthConfig{
Test: slices.Clone(overrideConfig.Healthcheck.Test),
@@ -148,16 +194,11 @@ func Override(dconfig *docker.Config, oconfig *v1.ImageConfig, overrideChanges [
if dconfig.Volumes == nil {
dconfig.Volumes = make(map[string]struct{})
}
if oconfig.Volumes == nil {
oconfig.Volumes = make(map[string]struct{})
}
for volume := range overrideConfig.Volumes {
dconfig.Volumes[volume] = struct{}{}
oconfig.Volumes[volume] = struct{}{}
}
}
dconfig.WorkingDir = firstStringElseSecondString(overrideConfig.WorkingDir, dconfig.WorkingDir)
oconfig.WorkingDir = firstStringElseSecondString(overrideConfig.WorkingDir, oconfig.WorkingDir)
dconfig.NetworkDisabled = overrideConfig.NetworkDisabled
dconfig.MacAddress = overrideConfig.MacAddress
dconfig.OnBuild = overrideConfig.OnBuild
@@ -165,16 +206,11 @@ func Override(dconfig *docker.Config, oconfig *v1.ImageConfig, overrideChanges [
if dconfig.Labels == nil {
dconfig.Labels = make(map[string]string)
}
if oconfig.Labels == nil {
oconfig.Labels = make(map[string]string)
}
for k, v := range overrideConfig.Labels {
dconfig.Labels[k] = v
oconfig.Labels[k] = v
}
}
dconfig.StopSignal = overrideConfig.StopSignal
oconfig.StopSignal = overrideConfig.StopSignal
dconfig.StopTimeout = overrideConfig.StopTimeout
dconfig.Shell = firstSliceElseSecondSlice(overrideConfig.Shell, dconfig.Shell)
}

View File

@@ -98,7 +98,7 @@ func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error {
return nil
}
func SetHas(m map[string]struct{}, k string) bool {
func SetHas[K comparable, V any](m map[K]V, k K) bool {
_, ok := m[k]
return ok
}

View File

@@ -35,6 +35,7 @@ import (
securejoin "github.com/cyphar/filepath-securejoin"
units "github.com/docker/go-units"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux"
"github.com/openshift/imagebuilder"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@@ -81,6 +82,25 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) {
return CommonBuildOptionsFromFlagSet(c.Flags(), c.Flag)
}
// If user selected to run with currentLabelOpts then append on the current user and role
func currentLabelOpts() ([]string, error) {
label, err := selinux.CurrentLabel()
if err != nil {
return nil, err
}
if label == "" {
return nil, nil
}
con, err := selinux.NewContext(label)
if err != nil {
return nil, err
}
return []string{
fmt.Sprintf("label=user:%s", con["user"]),
fmt.Sprintf("label=role:%s", con["role"]),
}, nil
}
// CommonBuildOptionsFromFlagSet parses the build options from the bud cli
func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*define.CommonBuildOptions, error) {
var (
@@ -201,6 +221,18 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
OCIHooksDir: ociHooks,
}
securityOpts, _ := flags.GetStringArray("security-opt")
defConfig, err := config.Default()
if err != nil {
return nil, fmt.Errorf("failed to get container config: %w", err)
}
if defConfig.Containers.EnableLabeledUsers {
defSecurityOpts, err := currentLabelOpts()
if err != nil {
return nil, err
}
securityOpts = append(defSecurityOpts, securityOpts...)
}
if err := parseSecurityOpts(securityOpts, commonOpts); err != nil {
return nil, err
}

View File

@@ -8,6 +8,7 @@ import (
"path/filepath"
"github.com/containers/buildah/define"
"github.com/opencontainers/cgroups/devices/config"
"github.com/opencontainers/runc/libcontainer/devices"
)
@@ -47,7 +48,7 @@ func DeviceFromPath(device string) (define.ContainerDevices, error) {
}
for _, d := range srcDevices {
d.Path = filepath.Join(dst, filepath.Base(d.Path))
d.Permissions = devices.Permissions(permissions)
d.Permissions = config.Permissions(permissions)
device := define.BuildahDevice{Device: *d, Source: src, Destination: dst}
devs = append(devs, device)
}