mirror of
https://github.com/containers/podman.git
synced 2025-06-26 04:46:57 +08:00
vendor: bump buildah to v1.32.1-0.20231012130144-244170240d85
Signed-off-by: Aditya R <arajan@redhat.com>
This commit is contained in:
4
go.mod
4
go.mod
@ -12,7 +12,7 @@ require (
|
|||||||
github.com/container-orchestrated-devices/container-device-interface v0.6.1
|
github.com/container-orchestrated-devices/container-device-interface v0.6.1
|
||||||
github.com/containernetworking/cni v1.1.2
|
github.com/containernetworking/cni v1.1.2
|
||||||
github.com/containernetworking/plugins v1.3.0
|
github.com/containernetworking/plugins v1.3.0
|
||||||
github.com/containers/buildah v1.32.0
|
github.com/containers/buildah v1.32.1-0.20231012130144-244170240d85
|
||||||
github.com/containers/common v0.56.1-0.20231010150003-09776aa73db7
|
github.com/containers/common v0.56.1-0.20231010150003-09776aa73db7
|
||||||
github.com/containers/conmon v2.0.20+incompatible
|
github.com/containers/conmon v2.0.20+incompatible
|
||||||
github.com/containers/gvisor-tap-vsock v0.7.1
|
github.com/containers/gvisor-tap-vsock v0.7.1
|
||||||
@ -92,7 +92,7 @@ require (
|
|||||||
github.com/containerd/log v0.1.0 // indirect
|
github.com/containerd/log v0.1.0 // indirect
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
|
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
|
||||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||||
github.com/containers/luksy v0.0.0-20230808154129-d2d74a56682f // indirect
|
github.com/containers/luksy v0.0.0-20230912175440-6df88cb7f0dd // indirect
|
||||||
github.com/coreos/go-oidc/v3 v3.6.0 // indirect
|
github.com/coreos/go-oidc/v3 v3.6.0 // indirect
|
||||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
|
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
|
||||||
github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd // indirect
|
github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd // indirect
|
||||||
|
8
go.sum
8
go.sum
@ -249,8 +249,8 @@ github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHV
|
|||||||
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
|
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
|
||||||
github.com/containernetworking/plugins v1.3.0 h1:QVNXMT6XloyMUoO2wUOqWTC1hWFV62Q6mVDp5H1HnjM=
|
github.com/containernetworking/plugins v1.3.0 h1:QVNXMT6XloyMUoO2wUOqWTC1hWFV62Q6mVDp5H1HnjM=
|
||||||
github.com/containernetworking/plugins v1.3.0/go.mod h1:Pc2wcedTQQCVuROOOaLBPPxrEXqqXBFt3cZ+/yVg6l0=
|
github.com/containernetworking/plugins v1.3.0/go.mod h1:Pc2wcedTQQCVuROOOaLBPPxrEXqqXBFt3cZ+/yVg6l0=
|
||||||
github.com/containers/buildah v1.32.0 h1:uz5Rcf7lGeStj7iPTBgO4UdhQYZqMMzyt9suDf16k1k=
|
github.com/containers/buildah v1.32.1-0.20231012130144-244170240d85 h1:Jn3o/XIIbyQqpnzwsALj5qnyr03dfxLmviMiDlBKRyU=
|
||||||
github.com/containers/buildah v1.32.0/go.mod h1:sN3rA3DbnqekNz3bNdkqWduuirYDuMs54LUCOZOomBE=
|
github.com/containers/buildah v1.32.1-0.20231012130144-244170240d85/go.mod h1:4XtQQpvO6e3qFDzYak6E/KLZLc/Erq02Yr0J9K+SIA4=
|
||||||
github.com/containers/common v0.56.1-0.20231010150003-09776aa73db7 h1:Gx9i5pM2uXoIL3+QDuS3ddko+vGBCoRfisHchQV4K0g=
|
github.com/containers/common v0.56.1-0.20231010150003-09776aa73db7 h1:Gx9i5pM2uXoIL3+QDuS3ddko+vGBCoRfisHchQV4K0g=
|
||||||
github.com/containers/common v0.56.1-0.20231010150003-09776aa73db7/go.mod h1:UoUXLn51o0628B8h4MOdWGKYfS/y0e9mjizyfERMoes=
|
github.com/containers/common v0.56.1-0.20231010150003-09776aa73db7/go.mod h1:UoUXLn51o0628B8h4MOdWGKYfS/y0e9mjizyfERMoes=
|
||||||
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
||||||
@ -263,8 +263,8 @@ github.com/containers/libhvee v0.4.1-0.20231012183749-e51be96b4854 h1:9pHtBDAO1Z
|
|||||||
github.com/containers/libhvee v0.4.1-0.20231012183749-e51be96b4854/go.mod h1:3lTcwI2g7qe8Ekgk9hdDxQeT9KrqXPilQvxJfIJp8TQ=
|
github.com/containers/libhvee v0.4.1-0.20231012183749-e51be96b4854/go.mod h1:3lTcwI2g7qe8Ekgk9hdDxQeT9KrqXPilQvxJfIJp8TQ=
|
||||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||||
github.com/containers/luksy v0.0.0-20230808154129-d2d74a56682f h1:/HjLNYkVoUJNT4mm2dzGl63x7nD6YHxxI/k1kR0TkzA=
|
github.com/containers/luksy v0.0.0-20230912175440-6df88cb7f0dd h1:NbQ782+jynau+ySnK8qBGyLstgiaLOAjoJWrwSLovGc=
|
||||||
github.com/containers/luksy v0.0.0-20230808154129-d2d74a56682f/go.mod h1:hEjwW0sePqkTahMzbzeDsQEXN2zdF2VAccqSj5vb1NY=
|
github.com/containers/luksy v0.0.0-20230912175440-6df88cb7f0dd/go.mod h1:p3x2uBi+Eaqor7MXSnXIoSGmIaocAlRnd3UiEl6AtgQ=
|
||||||
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
||||||
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
|
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
|
||||||
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
|
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
|
||||||
|
2
vendor/github.com/containers/buildah/.gitignore
generated
vendored
2
vendor/github.com/containers/buildah/.gitignore
generated
vendored
@ -10,3 +10,5 @@ Dockerfile*
|
|||||||
!/tests/conformance/**/Dockerfile*
|
!/tests/conformance/**/Dockerfile*
|
||||||
*.swp
|
*.swp
|
||||||
/result/
|
/result/
|
||||||
|
internal/mkcw/embed/entrypoint.o
|
||||||
|
internal/mkcw/embed/entrypoint
|
||||||
|
7
vendor/github.com/containers/buildah/.packit.yaml
generated
vendored
7
vendor/github.com/containers/buildah/.packit.yaml
generated
vendored
@ -11,7 +11,9 @@ srpm_build_deps:
|
|||||||
jobs:
|
jobs:
|
||||||
- job: copr_build
|
- job: copr_build
|
||||||
trigger: pull_request
|
trigger: pull_request
|
||||||
# keep in sync with https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next
|
notifications:
|
||||||
|
failure_comment:
|
||||||
|
message: "Ephemeral COPR build failed. @containers/packit-build please check."
|
||||||
enable_net: true
|
enable_net: true
|
||||||
targets:
|
targets:
|
||||||
- fedora-all-x86_64
|
- fedora-all-x86_64
|
||||||
@ -28,6 +30,9 @@ jobs:
|
|||||||
# Run on commit to main branch
|
# Run on commit to main branch
|
||||||
- job: copr_build
|
- job: copr_build
|
||||||
trigger: commit
|
trigger: commit
|
||||||
|
notifications:
|
||||||
|
failure_comment:
|
||||||
|
message: "podman-next COPR build failed. @containers/packit-build please check."
|
||||||
owner: rhcontainerbot
|
owner: rhcontainerbot
|
||||||
project: podman-next
|
project: podman-next
|
||||||
enable_net: true
|
enable_net: true
|
||||||
|
8
vendor/github.com/containers/buildah/add.go
generated
vendored
8
vendor/github.com/containers/buildah/add.go
generated
vendored
@ -456,9 +456,11 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||||||
// Iterate through every item that matched the glob.
|
// Iterate through every item that matched the glob.
|
||||||
itemsCopied := 0
|
itemsCopied := 0
|
||||||
for _, glob := range localSourceStat.Globbed {
|
for _, glob := range localSourceStat.Globbed {
|
||||||
rel, err := filepath.Rel(contextDir, glob)
|
rel := glob
|
||||||
if err != nil {
|
if filepath.IsAbs(glob) {
|
||||||
return fmt.Errorf("computing path of %q relative to %q: %w", glob, contextDir, err)
|
if rel, err = filepath.Rel(contextDir, glob); err != nil {
|
||||||
|
return fmt.Errorf("computing path of %q relative to %q: %w", glob, contextDir, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||||
return fmt.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir)
|
return fmt.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir)
|
||||||
|
2
vendor/github.com/containers/buildah/define/build.go
generated
vendored
2
vendor/github.com/containers/buildah/define/build.go
generated
vendored
@ -318,6 +318,8 @@ type BuildOptions struct {
|
|||||||
AllPlatforms bool
|
AllPlatforms bool
|
||||||
// UnsetEnvs is a list of environments to not add to final image.
|
// UnsetEnvs is a list of environments to not add to final image.
|
||||||
UnsetEnvs []string
|
UnsetEnvs []string
|
||||||
|
// UnsetLabels is a list of labels to not add to final image from base image.
|
||||||
|
UnsetLabels []string
|
||||||
// Envs is a list of environment variables to set in the final image.
|
// Envs is a list of environment variables to set in the final image.
|
||||||
Envs []string
|
Envs []string
|
||||||
// OSFeatures specifies operating system features the image requires.
|
// OSFeatures specifies operating system features the image requires.
|
||||||
|
2
vendor/github.com/containers/buildah/define/types.go
generated
vendored
2
vendor/github.com/containers/buildah/define/types.go
generated
vendored
@ -29,7 +29,7 @@ const (
|
|||||||
// identify working containers.
|
// identify working containers.
|
||||||
Package = "buildah"
|
Package = "buildah"
|
||||||
// Version for the Package. Also used by .packit.sh for Packit builds.
|
// Version for the Package. Also used by .packit.sh for Packit builds.
|
||||||
Version = "1.32.0"
|
Version = "1.33.0-dev"
|
||||||
|
|
||||||
// DefaultRuntime if containers.conf fails.
|
// DefaultRuntime if containers.conf fails.
|
||||||
DefaultRuntime = "runc"
|
DefaultRuntime = "runc"
|
||||||
|
3
vendor/github.com/containers/buildah/image.go
generated
vendored
3
vendor/github.com/containers/buildah/image.go
generated
vendored
@ -17,6 +17,7 @@ import (
|
|||||||
"github.com/containers/buildah/define"
|
"github.com/containers/buildah/define"
|
||||||
"github.com/containers/buildah/docker"
|
"github.com/containers/buildah/docker"
|
||||||
"github.com/containers/buildah/internal/mkcw"
|
"github.com/containers/buildah/internal/mkcw"
|
||||||
|
"github.com/containers/buildah/internal/tmpdir"
|
||||||
"github.com/containers/image/v5/docker/reference"
|
"github.com/containers/image/v5/docker/reference"
|
||||||
"github.com/containers/image/v5/image"
|
"github.com/containers/image/v5/image"
|
||||||
"github.com/containers/image/v5/manifest"
|
"github.com/containers/image/v5/manifest"
|
||||||
@ -374,7 +375,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
|||||||
logrus.Debugf("layer list: %q", layers)
|
logrus.Debugf("layer list: %q", layers)
|
||||||
|
|
||||||
// Make a temporary directory to hold blobs.
|
// Make a temporary directory to hold blobs.
|
||||||
path, err := os.MkdirTemp(os.TempDir(), define.Package)
|
path, err := os.MkdirTemp(tmpdir.GetTempDir(), define.Package)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("creating temporary directory to hold layer blobs: %w", err)
|
return nil, fmt.Errorf("creating temporary directory to hold layer blobs: %w", err)
|
||||||
}
|
}
|
||||||
|
47
vendor/github.com/containers/buildah/imagebuildah/executor.go
generated
vendored
47
vendor/github.com/containers/buildah/imagebuildah/executor.go
generated
vendored
@ -142,6 +142,7 @@ type Executor struct {
|
|||||||
sshsources map[string]*sshagent.Source
|
sshsources map[string]*sshagent.Source
|
||||||
logPrefix string
|
logPrefix string
|
||||||
unsetEnvs []string
|
unsetEnvs []string
|
||||||
|
unsetLabels []string
|
||||||
processLabel string // Shares processLabel of first stage container with containers of other stages in same build
|
processLabel string // Shares processLabel of first stage container with containers of other stages in same build
|
||||||
mountLabel string // Shares mountLabel of first stage container with containers of other stages in same build
|
mountLabel string // Shares mountLabel of first stage container with containers of other stages in same build
|
||||||
buildOutput string // Specifies instructions for any custom build output
|
buildOutput string // Specifies instructions for any custom build output
|
||||||
@ -300,6 +301,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
|
|||||||
sshsources: sshsources,
|
sshsources: sshsources,
|
||||||
logPrefix: logPrefix,
|
logPrefix: logPrefix,
|
||||||
unsetEnvs: append([]string{}, options.UnsetEnvs...),
|
unsetEnvs: append([]string{}, options.UnsetEnvs...),
|
||||||
|
unsetLabels: append([]string{}, options.UnsetLabels...),
|
||||||
buildOutput: options.BuildOutput,
|
buildOutput: options.BuildOutput,
|
||||||
osVersion: options.OSVersion,
|
osVersion: options.OSVersion,
|
||||||
osFeatures: append([]string{}, options.OSFeatures...),
|
osFeatures: append([]string{}, options.OSFeatures...),
|
||||||
@ -468,14 +470,14 @@ func (b *Executor) getImageTypeAndHistoryAndDiffIDs(ctx context.Context, imageID
|
|||||||
return manifestFormat, oci.History, oci.RootFS.DiffIDs, nil
|
return manifestFormat, oci.History, oci.RootFS.DiffIDs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageExecutor, stages imagebuilder.Stages, stageIndex int) (imageID string, ref reference.Canonical, err error) {
|
func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageExecutor, stages imagebuilder.Stages, stageIndex int) (imageID string, ref reference.Canonical, onlyBaseImage bool, err error) {
|
||||||
stage := stages[stageIndex]
|
stage := stages[stageIndex]
|
||||||
ib := stage.Builder
|
ib := stage.Builder
|
||||||
node := stage.Node
|
node := stage.Node
|
||||||
base, err := ib.From(node)
|
base, err := ib.From(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Debugf("buildStage(node.Children=%#v)", node.Children)
|
logrus.Debugf("buildStage(node.Children=%#v)", node.Children)
|
||||||
return "", nil, err
|
return "", nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this is the last stage, then the image that we produce at
|
// If this is the last stage, then the image that we produce at
|
||||||
@ -506,7 +508,7 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE
|
|||||||
if len(labelLine) > 0 {
|
if len(labelLine) > 0 {
|
||||||
additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader("LABEL" + labelLine + "\n"))
|
additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader("LABEL" + labelLine + "\n"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("while adding additional LABEL step: %w", err)
|
return "", nil, false, fmt.Errorf("while adding additional LABEL step: %w", err)
|
||||||
}
|
}
|
||||||
stage.Node.Children = append(stage.Node.Children, additionalNode.Children...)
|
stage.Node.Children = append(stage.Node.Children, additionalNode.Children...)
|
||||||
}
|
}
|
||||||
@ -525,13 +527,13 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE
|
|||||||
value := env[1]
|
value := env[1]
|
||||||
envLine += fmt.Sprintf(" %q=%q", key, value)
|
envLine += fmt.Sprintf(" %q=%q", key, value)
|
||||||
} else {
|
} else {
|
||||||
return "", nil, fmt.Errorf("BUG: unresolved environment variable: %q", key)
|
return "", nil, false, fmt.Errorf("BUG: unresolved environment variable: %q", key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(envLine) > 0 {
|
if len(envLine) > 0 {
|
||||||
additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader("ENV" + envLine + "\n"))
|
additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader("ENV" + envLine + "\n"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("while adding additional ENV step: %w", err)
|
return "", nil, false, fmt.Errorf("while adding additional ENV step: %w", err)
|
||||||
}
|
}
|
||||||
// make this the first instruction in the stage after its FROM instruction
|
// make this the first instruction in the stage after its FROM instruction
|
||||||
stage.Node.Children = append(additionalNode.Children, stage.Node.Children...)
|
stage.Node.Children = append(additionalNode.Children, stage.Node.Children...)
|
||||||
@ -572,8 +574,8 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Build this stage.
|
// Build this stage.
|
||||||
if imageID, ref, err = stageExecutor.Execute(ctx, base); err != nil {
|
if imageID, ref, onlyBaseImage, err = stageExecutor.Execute(ctx, base); err != nil {
|
||||||
return "", nil, err
|
return "", nil, onlyBaseImage, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// The stage succeeded, so remove its build container if we're
|
// The stage succeeded, so remove its build container if we're
|
||||||
@ -586,7 +588,7 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE
|
|||||||
b.stagesLock.Unlock()
|
b.stagesLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
return imageID, ref, nil
|
return imageID, ref, onlyBaseImage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type stageDependencyInfo struct {
|
type stageDependencyInfo struct {
|
||||||
@ -878,10 +880,11 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
|
|||||||
b.warnOnUnsetBuildArgs(stages, dependencyMap, b.args)
|
b.warnOnUnsetBuildArgs(stages, dependencyMap, b.args)
|
||||||
|
|
||||||
type Result struct {
|
type Result struct {
|
||||||
Index int
|
Index int
|
||||||
ImageID string
|
ImageID string
|
||||||
Ref reference.Canonical
|
OnlyBaseImage bool
|
||||||
Error error
|
Ref reference.Canonical
|
||||||
|
Error error
|
||||||
}
|
}
|
||||||
|
|
||||||
ch := make(chan Result, len(stages))
|
ch := make(chan Result, len(stages))
|
||||||
@ -941,21 +944,23 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
stageID, stageRef, stageErr := b.buildStage(ctx, cleanupStages, stages, index)
|
stageID, stageRef, stageOnlyBaseImage, stageErr := b.buildStage(ctx, cleanupStages, stages, index)
|
||||||
if stageErr != nil {
|
if stageErr != nil {
|
||||||
cancel = true
|
cancel = true
|
||||||
ch <- Result{
|
ch <- Result{
|
||||||
Index: index,
|
Index: index,
|
||||||
Error: stageErr,
|
Error: stageErr,
|
||||||
|
OnlyBaseImage: stageOnlyBaseImage,
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ch <- Result{
|
ch <- Result{
|
||||||
Index: index,
|
Index: index,
|
||||||
ImageID: stageID,
|
ImageID: stageID,
|
||||||
Ref: stageRef,
|
Ref: stageRef,
|
||||||
Error: nil,
|
OnlyBaseImage: stageOnlyBaseImage,
|
||||||
|
Error: nil,
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@ -985,7 +990,9 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
|
|||||||
// We're not populating the cache with intermediate
|
// We're not populating the cache with intermediate
|
||||||
// images, so add this one to the list of images that
|
// images, so add this one to the list of images that
|
||||||
// we'll remove later.
|
// we'll remove later.
|
||||||
if !b.layers {
|
// Only remove intermediate image is `--layers` is not provided
|
||||||
|
// or following stage was not only a base image ( i.e a different image ).
|
||||||
|
if !b.layers && !r.OnlyBaseImage {
|
||||||
cleanupImages = append(cleanupImages, r.ImageID)
|
cleanupImages = append(cleanupImages, r.ImageID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
121
vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
generated
vendored
121
vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
generated
vendored
@ -349,11 +349,6 @@ func (s *StageExecutor) volumeCacheRestore() error {
|
|||||||
func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
|
func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
|
||||||
s.builder.ContentDigester.Restart()
|
s.builder.ContentDigester.Restart()
|
||||||
for _, copy := range copies {
|
for _, copy := range copies {
|
||||||
if copy.Download {
|
|
||||||
logrus.Debugf("ADD %#v, %#v", excludes, copy)
|
|
||||||
} else {
|
|
||||||
logrus.Debugf("COPY %#v, %#v", excludes, copy)
|
|
||||||
}
|
|
||||||
if err := s.volumeCacheInvalidate(copy.Dest); err != nil {
|
if err := s.volumeCacheInvalidate(copy.Dest); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -413,6 +408,16 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
|
|||||||
} else {
|
} else {
|
||||||
contextDir = additionalBuildContext.DownloadedCache
|
contextDir = additionalBuildContext.DownloadedCache
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// This points to a path on the filesystem
|
||||||
|
// Check to see if there's a .containerignore
|
||||||
|
// file, update excludes for this stage before
|
||||||
|
// proceeding
|
||||||
|
buildContextExcludes, _, err := parse.ContainerIgnoreFile(additionalBuildContext.Value, "", nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
excludes = append(excludes, buildContextExcludes...)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
copy.From = additionalBuildContext.Value
|
copy.From = additionalBuildContext.Value
|
||||||
@ -447,6 +452,11 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
|
|||||||
stripSetuid = true // did this change between 18.06 and 19.03?
|
stripSetuid = true // did this change between 18.06 and 19.03?
|
||||||
stripSetgid = true // did this change between 18.06 and 19.03?
|
stripSetgid = true // did this change between 18.06 and 19.03?
|
||||||
}
|
}
|
||||||
|
if copy.Download {
|
||||||
|
logrus.Debugf("ADD %#v, %#v", excludes, copy)
|
||||||
|
} else {
|
||||||
|
logrus.Debugf("COPY %#v, %#v", excludes, copy)
|
||||||
|
}
|
||||||
for _, src := range copy.Src {
|
for _, src := range copy.Src {
|
||||||
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
|
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
|
||||||
// Source is a URL, allowed for ADD but not COPY.
|
// Source is a URL, allowed for ADD but not COPY.
|
||||||
@ -822,7 +832,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
dImage := docker.Image{
|
dImage := docker.Image{
|
||||||
Parent: builder.FromImage,
|
Parent: builder.FromImageID,
|
||||||
ContainerConfig: dConfig,
|
ContainerConfig: dConfig,
|
||||||
Container: builder.Container,
|
Container: builder.Container,
|
||||||
Author: builder.Maintainer(),
|
Author: builder.Maintainer(),
|
||||||
@ -905,13 +915,14 @@ func (s *StageExecutor) getContentSummaryAfterAddingContent() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Execute runs each of the steps in the stage's parsed tree, in turn.
|
// Execute runs each of the steps in the stage's parsed tree, in turn.
|
||||||
func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, ref reference.Canonical, err error) {
|
func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, ref reference.Canonical, onlyBaseImg bool, err error) {
|
||||||
var resourceUsage rusage.Rusage
|
var resourceUsage rusage.Rusage
|
||||||
stage := s.stage
|
stage := s.stage
|
||||||
ib := stage.Builder
|
ib := stage.Builder
|
||||||
checkForLayers := s.executor.layers && s.executor.useCache
|
checkForLayers := s.executor.layers && s.executor.useCache
|
||||||
moreStages := s.index < len(s.stages)-1
|
moreStages := s.index < len(s.stages)-1
|
||||||
lastStage := !moreStages
|
lastStage := !moreStages
|
||||||
|
onlyBaseImage := false
|
||||||
imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[strconv.Itoa(stage.Position)])
|
imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[strconv.Itoa(stage.Position)])
|
||||||
rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[strconv.Itoa(stage.Position)])
|
rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[strconv.Itoa(stage.Position)])
|
||||||
|
|
||||||
@ -924,7 +935,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
// either in local storage, or one that we have to pull from a
|
// either in local storage, or one that we have to pull from a
|
||||||
// registry, subject to the passed-in pull policy.
|
// registry, subject to the passed-in pull policy.
|
||||||
if isStage, err := s.executor.waitForStage(ctx, base, s.stages[:s.index]); isStage && err != nil {
|
if isStage, err := s.executor.waitForStage(ctx, base, s.stages[:s.index]); isStage && err != nil {
|
||||||
return "", nil, err
|
return "", nil, false, err
|
||||||
}
|
}
|
||||||
pullPolicy := s.executor.pullPolicy
|
pullPolicy := s.executor.pullPolicy
|
||||||
s.executor.stagesLock.Lock()
|
s.executor.stagesLock.Lock()
|
||||||
@ -954,7 +965,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
// Start counting resource usage before we potentially pull a base image.
|
// Start counting resource usage before we potentially pull a base image.
|
||||||
if rusage.Supported() {
|
if rusage.Supported() {
|
||||||
if resourceUsage, err = rusage.Get(); err != nil {
|
if resourceUsage, err = rusage.Get(); err != nil {
|
||||||
return "", nil, err
|
return "", nil, false, err
|
||||||
}
|
}
|
||||||
// Log the final incremental resource usage counter before we return.
|
// Log the final incremental resource usage counter before we return.
|
||||||
defer logRusage()
|
defer logRusage()
|
||||||
@ -964,7 +975,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
// the imagebuilder configuration may alter the list of steps we have,
|
// the imagebuilder configuration may alter the list of steps we have,
|
||||||
// so take a snapshot of them *after* that.
|
// so take a snapshot of them *after* that.
|
||||||
if _, err := s.prepare(ctx, base, true, true, preserveBaseImageAnnotationsAtStageStart, pullPolicy); err != nil {
|
if _, err := s.prepare(ctx, base, true, true, preserveBaseImageAnnotationsAtStageStart, pullPolicy); err != nil {
|
||||||
return "", nil, err
|
return "", nil, false, err
|
||||||
}
|
}
|
||||||
children := stage.Node.Children
|
children := stage.Node.Children
|
||||||
|
|
||||||
@ -1022,7 +1033,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
logrus.Debugf("Generating custom build output with options %q", s.executor.buildOutput)
|
logrus.Debugf("Generating custom build output with options %q", s.executor.buildOutput)
|
||||||
buildOutputOption, err = parse.GetBuildOutput(s.executor.buildOutput)
|
buildOutputOption, err = parse.GetBuildOutput(s.executor.buildOutput)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("failed to parse build output: %w", err)
|
return "", nil, false, fmt.Errorf("failed to parse build output: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1033,13 +1044,19 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
// squash the contents of the base image. Whichever is
|
// squash the contents of the base image. Whichever is
|
||||||
// the case, we need to commit() to create a new image.
|
// the case, we need to commit() to create a new image.
|
||||||
logCommit(s.output, -1)
|
logCommit(s.output, -1)
|
||||||
if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), false, s.output, s.executor.squash, lastStage); err != nil {
|
emptyLayer := false
|
||||||
return "", nil, fmt.Errorf("committing base container: %w", err)
|
if s.builder.FromImageID == "" {
|
||||||
|
// No base image means there's nothing to put in a
|
||||||
|
// layer, so don't create one.
|
||||||
|
emptyLayer = true
|
||||||
|
}
|
||||||
|
if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), emptyLayer, s.output, s.executor.squash, lastStage); err != nil {
|
||||||
|
return "", nil, false, fmt.Errorf("committing base container: %w", err)
|
||||||
}
|
}
|
||||||
// Generate build output if needed.
|
// Generate build output if needed.
|
||||||
if canGenerateBuildOutput {
|
if canGenerateBuildOutput {
|
||||||
if err := s.generateBuildOutput(buildOutputOption); err != nil {
|
if err := s.generateBuildOutput(buildOutputOption); err != nil {
|
||||||
return "", nil, err
|
return "", nil, false, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 {
|
} else if len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 {
|
||||||
@ -1047,12 +1064,12 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
// via the command line, so we need to commit.
|
// via the command line, so we need to commit.
|
||||||
logCommit(s.output, -1)
|
logCommit(s.output, -1)
|
||||||
if imgID, ref, err = s.commit(ctx, s.getCreatedBy(stage.Node, ""), true, s.output, s.executor.squash, lastStage); err != nil {
|
if imgID, ref, err = s.commit(ctx, s.getCreatedBy(stage.Node, ""), true, s.output, s.executor.squash, lastStage); err != nil {
|
||||||
return "", nil, err
|
return "", nil, false, err
|
||||||
}
|
}
|
||||||
// Generate build output if needed.
|
// Generate build output if needed.
|
||||||
if canGenerateBuildOutput {
|
if canGenerateBuildOutput {
|
||||||
if err := s.generateBuildOutput(buildOutputOption); err != nil {
|
if err := s.generateBuildOutput(buildOutputOption); err != nil {
|
||||||
return "", nil, err
|
return "", nil, false, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -1061,8 +1078,9 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
// options, so just reuse the base image.
|
// options, so just reuse the base image.
|
||||||
logCommit(s.output, -1)
|
logCommit(s.output, -1)
|
||||||
if imgID, ref, err = s.tagExistingImage(ctx, s.builder.FromImageID, s.output); err != nil {
|
if imgID, ref, err = s.tagExistingImage(ctx, s.builder.FromImageID, s.output); err != nil {
|
||||||
return "", nil, err
|
return "", nil, onlyBaseImage, err
|
||||||
}
|
}
|
||||||
|
onlyBaseImage = true
|
||||||
// If we have reached this point then our build is just performing a tag
|
// If we have reached this point then our build is just performing a tag
|
||||||
// and it contains no steps or instructions (i.e Containerfile only contains
|
// and it contains no steps or instructions (i.e Containerfile only contains
|
||||||
// `FROM <imagename> and nothing else so we will never end up committing this
|
// `FROM <imagename> and nothing else so we will never end up committing this
|
||||||
@ -1070,7 +1088,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
// specified honor that and export the contents of the current build anyways.
|
// specified honor that and export the contents of the current build anyways.
|
||||||
if canGenerateBuildOutput {
|
if canGenerateBuildOutput {
|
||||||
if err := s.generateBuildOutput(buildOutputOption); err != nil {
|
if err := s.generateBuildOutput(buildOutputOption); err != nil {
|
||||||
return "", nil, err
|
return "", nil, onlyBaseImage, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1084,7 +1102,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
// Resolve any arguments in this instruction.
|
// Resolve any arguments in this instruction.
|
||||||
step := ib.Step()
|
step := ib.Step()
|
||||||
if err := step.Resolve(node); err != nil {
|
if err := step.Resolve(node); err != nil {
|
||||||
return "", nil, fmt.Errorf("resolving step %+v: %w", *node, err)
|
return "", nil, false, fmt.Errorf("resolving step %+v: %w", *node, err)
|
||||||
}
|
}
|
||||||
logrus.Debugf("Parsed Step: %+v", *step)
|
logrus.Debugf("Parsed Step: %+v", *step)
|
||||||
if !s.executor.quiet {
|
if !s.executor.quiet {
|
||||||
@ -1097,21 +1115,21 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
command := strings.ToUpper(step.Command)
|
command := strings.ToUpper(step.Command)
|
||||||
// chmod, chown and from flags should have an '=' sign, '--chmod=', '--chown=' or '--from='
|
// chmod, chown and from flags should have an '=' sign, '--chmod=', '--chown=' or '--from='
|
||||||
if command == "COPY" && (flag == "--chmod" || flag == "--chown" || flag == "--from") {
|
if command == "COPY" && (flag == "--chmod" || flag == "--chown" || flag == "--from") {
|
||||||
return "", nil, fmt.Errorf("COPY only supports the --chmod=<permissions> --chown=<uid:gid> and the --from=<image|stage> flags")
|
return "", nil, false, fmt.Errorf("COPY only supports the --chmod=<permissions> --chown=<uid:gid> and the --from=<image|stage> flags")
|
||||||
}
|
}
|
||||||
if command == "ADD" && (flag == "--chmod" || flag == "--chown") {
|
if command == "ADD" && (flag == "--chmod" || flag == "--chown") {
|
||||||
return "", nil, fmt.Errorf("ADD only supports the --chmod=<permissions> and the --chown=<uid:gid> flags")
|
return "", nil, false, fmt.Errorf("ADD only supports the --chmod=<permissions> and the --chown=<uid:gid> flags")
|
||||||
}
|
}
|
||||||
if strings.Contains(flag, "--from") && command == "COPY" {
|
if strings.Contains(flag, "--from") && command == "COPY" {
|
||||||
arr := strings.Split(flag, "=")
|
arr := strings.Split(flag, "=")
|
||||||
if len(arr) != 2 {
|
if len(arr) != 2 {
|
||||||
return "", nil, fmt.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command)
|
return "", nil, false, fmt.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command)
|
||||||
}
|
}
|
||||||
// If arr[1] has an argument within it, resolve it to its
|
// If arr[1] has an argument within it, resolve it to its
|
||||||
// value. Otherwise just return the value found.
|
// value. Otherwise just return the value found.
|
||||||
from, fromErr := imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments())
|
from, fromErr := imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments())
|
||||||
if fromErr != nil {
|
if fromErr != nil {
|
||||||
return "", nil, fmt.Errorf("unable to resolve argument %q: %w", arr[1], fromErr)
|
return "", nil, false, fmt.Errorf("unable to resolve argument %q: %w", arr[1], fromErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Before looking into additional context
|
// Before looking into additional context
|
||||||
@ -1134,7 +1152,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
// replace with image set in build context
|
// replace with image set in build context
|
||||||
from = additionalBuildContext.Value
|
from = additionalBuildContext.Value
|
||||||
if _, err := s.getImageRootfs(ctx, from); err != nil {
|
if _, err := s.getImageRootfs(ctx, from); err != nil {
|
||||||
return "", nil, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from)
|
return "", nil, false, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from)
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -1144,12 +1162,12 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
// result of an earlier stage, wait for that
|
// result of an earlier stage, wait for that
|
||||||
// stage to finish being built.
|
// stage to finish being built.
|
||||||
if isStage, err := s.executor.waitForStage(ctx, from, s.stages[:s.index]); isStage && err != nil {
|
if isStage, err := s.executor.waitForStage(ctx, from, s.stages[:s.index]); isStage && err != nil {
|
||||||
return "", nil, err
|
return "", nil, false, err
|
||||||
}
|
}
|
||||||
if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index {
|
if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index {
|
||||||
break
|
break
|
||||||
} else if _, err = s.getImageRootfs(ctx, from); err != nil {
|
} else if _, err = s.getImageRootfs(ctx, from); err != nil {
|
||||||
return "", nil, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from)
|
return "", nil, false, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from)
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -1171,7 +1189,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
err := ib.Run(step, s, noRunsRemaining)
|
err := ib.Run(step, s, noRunsRemaining)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Debugf("Error building at step %+v: %v", *step, err)
|
logrus.Debugf("Error building at step %+v: %v", *step, err)
|
||||||
return "", nil, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err)
|
return "", nil, false, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err)
|
||||||
}
|
}
|
||||||
// In case we added content, retrieve its digest.
|
// In case we added content, retrieve its digest.
|
||||||
addedContentSummary := s.getContentSummaryAfterAddingContent()
|
addedContentSummary := s.getContentSummaryAfterAddingContent()
|
||||||
@ -1196,13 +1214,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
logCommit(s.output, i)
|
logCommit(s.output, i)
|
||||||
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output, s.executor.squash, lastStage && lastInstruction)
|
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output, s.executor.squash, lastStage && lastInstruction)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("committing container for step %+v: %w", *step, err)
|
return "", nil, false, fmt.Errorf("committing container for step %+v: %w", *step, err)
|
||||||
}
|
}
|
||||||
logImageID(imgID)
|
logImageID(imgID)
|
||||||
// Generate build output if needed.
|
// Generate build output if needed.
|
||||||
if canGenerateBuildOutput {
|
if canGenerateBuildOutput {
|
||||||
if err := s.generateBuildOutput(buildOutputOption); err != nil {
|
if err := s.generateBuildOutput(buildOutputOption); err != nil {
|
||||||
return "", nil, err
|
return "", nil, false, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -1234,7 +1252,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
for _, a := range node.Flags {
|
for _, a := range node.Flags {
|
||||||
arg, err := imagebuilder.ProcessWord(a, s.stage.Builder.Arguments())
|
arg, err := imagebuilder.ProcessWord(a, s.stage.Builder.Arguments())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", nil, false, err
|
||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
case strings.HasPrefix(arg, "--mount="):
|
case strings.HasPrefix(arg, "--mount="):
|
||||||
@ -1246,7 +1264,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
}
|
}
|
||||||
stageMountPoints, err := s.runStageMountPoints(mounts)
|
stageMountPoints, err := s.runStageMountPoints(mounts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", nil, false, err
|
||||||
}
|
}
|
||||||
for _, mountPoint := range stageMountPoints {
|
for _, mountPoint := range stageMountPoints {
|
||||||
if mountPoint.DidExecute {
|
if mountPoint.DidExecute {
|
||||||
@ -1268,7 +1286,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
if needsCacheKey {
|
if needsCacheKey {
|
||||||
cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
|
cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("failed while generating cache key: %w", err)
|
return "", nil, false, fmt.Errorf("failed while generating cache key: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Check if there's already an image based on our parent that
|
// Check if there's already an image based on our parent that
|
||||||
@ -1288,7 +1306,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
s.didExecute = true
|
s.didExecute = true
|
||||||
if err = ib.Run(step, s, noRunsRemaining); err != nil {
|
if err = ib.Run(step, s, noRunsRemaining); err != nil {
|
||||||
logrus.Debugf("Error building at step %+v: %v", *step, err)
|
logrus.Debugf("Error building at step %+v: %v", *step, err)
|
||||||
return "", nil, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err)
|
return "", nil, false, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err)
|
||||||
}
|
}
|
||||||
// Retrieve the digest info for the content that we just copied
|
// Retrieve the digest info for the content that we just copied
|
||||||
// into the rootfs.
|
// into the rootfs.
|
||||||
@ -1297,13 +1315,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
if needsCacheKey {
|
if needsCacheKey {
|
||||||
cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
|
cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("failed while generating cache key: %w", err)
|
return "", nil, false, fmt.Errorf("failed while generating cache key: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
|
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
|
return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
|
||||||
}
|
}
|
||||||
// All the best effort to find cache on localstorage have failed try pulling
|
// All the best effort to find cache on localstorage have failed try pulling
|
||||||
// cache from remote repo if `--cache-from` was configured.
|
// cache from remote repo if `--cache-from` was configured.
|
||||||
@ -1315,7 +1333,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
logCachePulled(cacheKey, ref)
|
logCachePulled(cacheKey, ref)
|
||||||
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
|
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
|
return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
|
||||||
}
|
}
|
||||||
if cacheID != "" {
|
if cacheID != "" {
|
||||||
pulledAndUsedCacheImage = true
|
pulledAndUsedCacheImage = true
|
||||||
@ -1335,7 +1353,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
s.didExecute = true
|
s.didExecute = true
|
||||||
if err = ib.Run(step, s, noRunsRemaining); err != nil {
|
if err = ib.Run(step, s, noRunsRemaining); err != nil {
|
||||||
logrus.Debugf("Error building at step %+v: %v", *step, err)
|
logrus.Debugf("Error building at step %+v: %v", *step, err)
|
||||||
return "", nil, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err)
|
return "", nil, false, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// In case we added content, retrieve its digest.
|
// In case we added content, retrieve its digest.
|
||||||
@ -1344,7 +1362,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
if needsCacheKey {
|
if needsCacheKey {
|
||||||
cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
|
cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("failed while generating cache key: %w", err)
|
return "", nil, false, fmt.Errorf("failed while generating cache key: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1353,7 +1371,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
if checkForLayers && !avoidLookingCache {
|
if checkForLayers && !avoidLookingCache {
|
||||||
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
|
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
|
return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
|
||||||
}
|
}
|
||||||
// All the best effort to find cache on localstorage have failed try pulling
|
// All the best effort to find cache on localstorage have failed try pulling
|
||||||
// cache from remote repo if `--cache-from` was configured and cacheKey was
|
// cache from remote repo if `--cache-from` was configured and cacheKey was
|
||||||
@ -1366,7 +1384,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
logCachePulled(cacheKey, ref)
|
logCachePulled(cacheKey, ref)
|
||||||
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
|
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
|
return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
|
||||||
}
|
}
|
||||||
if cacheID != "" {
|
if cacheID != "" {
|
||||||
pulledAndUsedCacheImage = true
|
pulledAndUsedCacheImage = true
|
||||||
@ -1390,7 +1408,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
err := ib.Run(step, s, noRunsRemaining)
|
err := ib.Run(step, s, noRunsRemaining)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Debugf("Error building at step %+v: %v", *step, err)
|
logrus.Debugf("Error building at step %+v: %v", *step, err)
|
||||||
return "", nil, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err)
|
return "", nil, false, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1407,7 +1425,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
if commitName != "" {
|
if commitName != "" {
|
||||||
logCommit(commitName, i)
|
logCommit(commitName, i)
|
||||||
if imgID, ref, err = s.tagExistingImage(ctx, cacheID, commitName); err != nil {
|
if imgID, ref, err = s.tagExistingImage(ctx, cacheID, commitName); err != nil {
|
||||||
return "", nil, err
|
return "", nil, false, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -1423,12 +1441,12 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
// can be part of build-cache.
|
// can be part of build-cache.
|
||||||
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, false, lastStage && lastInstruction)
|
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, false, lastStage && lastInstruction)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("committing container for step %+v: %w", *step, err)
|
return "", nil, false, fmt.Errorf("committing container for step %+v: %w", *step, err)
|
||||||
}
|
}
|
||||||
// Generate build output if needed.
|
// Generate build output if needed.
|
||||||
if canGenerateBuildOutput {
|
if canGenerateBuildOutput {
|
||||||
if err := s.generateBuildOutput(buildOutputOption); err != nil {
|
if err := s.generateBuildOutput(buildOutputOption); err != nil {
|
||||||
return "", nil, err
|
return "", nil, false, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1446,7 +1464,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
if len(s.executor.cacheTo) != 0 && (!pulledAndUsedCacheImage || cacheID == "") && needsCacheKey {
|
if len(s.executor.cacheTo) != 0 && (!pulledAndUsedCacheImage || cacheID == "") && needsCacheKey {
|
||||||
logCachePush(cacheKey)
|
logCachePush(cacheKey)
|
||||||
if err = s.pushCache(ctx, imgID, cacheKey); err != nil {
|
if err = s.pushCache(ctx, imgID, cacheKey); err != nil {
|
||||||
return "", nil, err
|
return "", nil, false, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1457,12 +1475,12 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
// is the last instruction of the last stage.
|
// is the last instruction of the last stage.
|
||||||
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, true, lastStage && lastInstruction)
|
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, true, lastStage && lastInstruction)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("committing final squash step %+v: %w", *step, err)
|
return "", nil, false, fmt.Errorf("committing final squash step %+v: %w", *step, err)
|
||||||
}
|
}
|
||||||
// Generate build output if needed.
|
// Generate build output if needed.
|
||||||
if canGenerateBuildOutput {
|
if canGenerateBuildOutput {
|
||||||
if err := s.generateBuildOutput(buildOutputOption); err != nil {
|
if err := s.generateBuildOutput(buildOutputOption); err != nil {
|
||||||
return "", nil, err
|
return "", nil, false, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if cacheID != "" {
|
} else if cacheID != "" {
|
||||||
@ -1477,7 +1495,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
// Generate build output if needed.
|
// Generate build output if needed.
|
||||||
if canGenerateBuildOutput {
|
if canGenerateBuildOutput {
|
||||||
if err := s.generateBuildOutput(buildOutputOption); err != nil {
|
if err := s.generateBuildOutput(buildOutputOption); err != nil {
|
||||||
return "", nil, err
|
return "", nil, false, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1508,11 +1526,11 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||||||
// ID that we really should not be pulling anymore (see
|
// ID that we really should not be pulling anymore (see
|
||||||
// containers/podman/issues/10307).
|
// containers/podman/issues/10307).
|
||||||
if _, err := s.prepare(ctx, imgID, false, true, true, define.PullNever); err != nil {
|
if _, err := s.prepare(ctx, imgID, false, true, true, define.PullNever); err != nil {
|
||||||
return "", nil, fmt.Errorf("preparing container for next step: %w", err)
|
return "", nil, false, fmt.Errorf("preparing container for next step: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return imgID, ref, nil
|
return imgID, ref, onlyBaseImage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func historyEntriesEqual(base, derived v1.History) bool {
|
func historyEntriesEqual(base, derived v1.History) bool {
|
||||||
@ -2036,6 +2054,9 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
|
|||||||
if s.executor.commonBuildOptions.IdentityLabel == types.OptionalBoolUndefined || s.executor.commonBuildOptions.IdentityLabel == types.OptionalBoolTrue {
|
if s.executor.commonBuildOptions.IdentityLabel == types.OptionalBoolUndefined || s.executor.commonBuildOptions.IdentityLabel == types.OptionalBoolTrue {
|
||||||
s.builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version)
|
s.builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version)
|
||||||
}
|
}
|
||||||
|
for _, key := range s.executor.unsetLabels {
|
||||||
|
s.builder.UnsetLabel(key)
|
||||||
|
}
|
||||||
for _, annotationSpec := range s.executor.annotations {
|
for _, annotationSpec := range s.executor.annotations {
|
||||||
annotation := strings.SplitN(annotationSpec, "=", 2)
|
annotation := strings.SplitN(annotationSpec, "=", 2)
|
||||||
if len(annotation) > 1 {
|
if len(annotation) > 1 {
|
||||||
|
2
vendor/github.com/containers/buildah/internal/mkcw/types/workload.go
generated
vendored
2
vendor/github.com/containers/buildah/internal/mkcw/types/workload.go
generated
vendored
@ -25,7 +25,7 @@ type SevWorkloadData struct {
|
|||||||
// SnpWorkloadData contains the required CPU generation name.
|
// SnpWorkloadData contains the required CPU generation name.
|
||||||
// https://github.com/virtee/oci2cw/blob/1502d5be33c2fa82d49aaa95781bbab2aa932781/examples/tee-config-snp.json
|
// https://github.com/virtee/oci2cw/blob/1502d5be33c2fa82d49aaa95781bbab2aa932781/examples/tee-config-snp.json
|
||||||
type SnpWorkloadData struct {
|
type SnpWorkloadData struct {
|
||||||
Generation string `json:"gen"` // "milan" (naples=1, rome=2, milan=3, genoa/bergamo=4)
|
Generation string `json:"gen"` // "milan" (naples=1, rome=2, milan=3, genoa/bergamo/siena=4, turin=5)
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
2
vendor/github.com/containers/buildah/internal/mkcw/workload.go
generated
vendored
2
vendor/github.com/containers/buildah/internal/mkcw/workload.go
generated
vendored
@ -35,7 +35,7 @@ const (
|
|||||||
// SNP is a known trusted execution environment type: AMD-SNP
|
// SNP is a known trusted execution environment type: AMD-SNP
|
||||||
SNP = define.SNP
|
SNP = define.SNP
|
||||||
// krun looks for its configuration JSON directly in a disk image if the last twelve bytes
|
// krun looks for its configuration JSON directly in a disk image if the last twelve bytes
|
||||||
// of the disk image are this magic value followed by a little-endian 64-bit
|
// of the disk image are this magic value followed by a little-endian 64-bit
|
||||||
// length-of-the-configuration
|
// length-of-the-configuration
|
||||||
krunMagic = "KRUN"
|
krunMagic = "KRUN"
|
||||||
)
|
)
|
||||||
|
1
vendor/github.com/containers/buildah/pkg/cli/build.go
generated
vendored
1
vendor/github.com/containers/buildah/pkg/cli/build.go
generated
vendored
@ -425,6 +425,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
|
|||||||
Timestamp: timestamp,
|
Timestamp: timestamp,
|
||||||
TransientMounts: iopts.Volumes,
|
TransientMounts: iopts.Volumes,
|
||||||
UnsetEnvs: iopts.UnsetEnvs,
|
UnsetEnvs: iopts.UnsetEnvs,
|
||||||
|
UnsetLabels: iopts.UnsetLabels,
|
||||||
}
|
}
|
||||||
if iopts.Quiet {
|
if iopts.Quiet {
|
||||||
options.ReportWriter = io.Discard
|
options.ReportWriter = io.Discard
|
||||||
|
3
vendor/github.com/containers/buildah/pkg/cli/common.go
generated
vendored
3
vendor/github.com/containers/buildah/pkg/cli/common.go
generated
vendored
@ -104,6 +104,7 @@ type BudResults struct {
|
|||||||
LogRusage bool
|
LogRusage bool
|
||||||
RusageLogFile string
|
RusageLogFile string
|
||||||
UnsetEnvs []string
|
UnsetEnvs []string
|
||||||
|
UnsetLabels []string
|
||||||
Envs []string
|
Envs []string
|
||||||
OSFeatures []string
|
OSFeatures []string
|
||||||
OSVersion string
|
OSVersion string
|
||||||
@ -283,6 +284,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
|
|||||||
fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
|
fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
|
||||||
fs.String("variant", "", "override the `variant` of the specified image")
|
fs.String("variant", "", "override the `variant` of the specified image")
|
||||||
fs.StringSliceVar(&flags.UnsetEnvs, "unsetenv", nil, "unset environment variable from final image")
|
fs.StringSliceVar(&flags.UnsetEnvs, "unsetenv", nil, "unset environment variable from final image")
|
||||||
|
fs.StringSliceVar(&flags.UnsetLabels, "unsetlabel", nil, "unset label when inheriting labels from base image")
|
||||||
return fs
|
return fs
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -328,6 +330,7 @@ func GetBudFlagsCompletions() commonComp.FlagCompletions {
|
|||||||
flagCompletion["target"] = commonComp.AutocompleteNone
|
flagCompletion["target"] = commonComp.AutocompleteNone
|
||||||
flagCompletion["timestamp"] = commonComp.AutocompleteNone
|
flagCompletion["timestamp"] = commonComp.AutocompleteNone
|
||||||
flagCompletion["unsetenv"] = commonComp.AutocompleteNone
|
flagCompletion["unsetenv"] = commonComp.AutocompleteNone
|
||||||
|
flagCompletion["unsetlabel"] = commonComp.AutocompleteNone
|
||||||
flagCompletion["variant"] = commonComp.AutocompleteNone
|
flagCompletion["variant"] = commonComp.AutocompleteNone
|
||||||
return flagCompletion
|
return flagCompletion
|
||||||
}
|
}
|
||||||
|
3
vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go
generated
vendored
3
vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go
generated
vendored
@ -11,6 +11,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/containers/buildah/internal/tmpdir"
|
||||||
"github.com/opencontainers/selinux/go-selinux"
|
"github.com/opencontainers/selinux/go-selinux"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
@ -79,7 +80,7 @@ func (a *AgentServer) Serve(processLabel string) (string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
serveDir, err := os.MkdirTemp("", ".buildah-ssh-sock")
|
serveDir, err := os.MkdirTemp(tmpdir.GetTempDir(), ".buildah-ssh-sock")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
6
vendor/github.com/containers/buildah/run_common.go
generated
vendored
6
vendor/github.com/containers/buildah/run_common.go
generated
vendored
@ -1509,8 +1509,6 @@ func checkIfMountDestinationPreExists(root string, dest string) (bool, error) {
|
|||||||
//
|
//
|
||||||
// If this function succeeds, the caller must unlock runMountArtifacts.TargetLocks (when??)
|
// If this function succeeds, the caller must unlock runMountArtifacts.TargetLocks (when??)
|
||||||
func (b *Builder) runSetupRunMounts(mountPoint string, mounts []string, sources runMountInfo, idMaps IDMaps) ([]specs.Mount, *runMountArtifacts, error) {
|
func (b *Builder) runSetupRunMounts(mountPoint string, mounts []string, sources runMountInfo, idMaps IDMaps) ([]specs.Mount, *runMountArtifacts, error) {
|
||||||
// If `type` is not set default to TypeBind
|
|
||||||
mountType := define.TypeBind
|
|
||||||
mountTargets := make([]string, 0, 10)
|
mountTargets := make([]string, 0, 10)
|
||||||
tmpFiles := make([]string, 0, len(mounts))
|
tmpFiles := make([]string, 0, len(mounts))
|
||||||
mountImages := make([]string, 0, 10)
|
mountImages := make([]string, 0, 10)
|
||||||
@ -1532,6 +1530,10 @@ func (b *Builder) runSetupRunMounts(mountPoint string, mounts []string, sources
|
|||||||
var agent *sshagent.AgentServer
|
var agent *sshagent.AgentServer
|
||||||
var tl *lockfile.LockFile
|
var tl *lockfile.LockFile
|
||||||
tokens := strings.Split(mount, ",")
|
tokens := strings.Split(mount, ",")
|
||||||
|
|
||||||
|
// If `type` is not set default to TypeBind
|
||||||
|
mountType := define.TypeBind
|
||||||
|
|
||||||
for _, field := range tokens {
|
for _, field := range tokens {
|
||||||
if strings.HasPrefix(field, "type=") {
|
if strings.HasPrefix(field, "type=") {
|
||||||
kv := strings.Split(field, "=")
|
kv := strings.Split(field, "=")
|
||||||
|
3
vendor/github.com/containers/buildah/run_freebsd.go
generated
vendored
3
vendor/github.com/containers/buildah/run_freebsd.go
generated
vendored
@ -16,6 +16,7 @@ import (
|
|||||||
"github.com/containers/buildah/copier"
|
"github.com/containers/buildah/copier"
|
||||||
"github.com/containers/buildah/define"
|
"github.com/containers/buildah/define"
|
||||||
"github.com/containers/buildah/internal"
|
"github.com/containers/buildah/internal"
|
||||||
|
"github.com/containers/buildah/internal/tmpdir"
|
||||||
"github.com/containers/buildah/pkg/jail"
|
"github.com/containers/buildah/pkg/jail"
|
||||||
"github.com/containers/buildah/pkg/overlay"
|
"github.com/containers/buildah/pkg/overlay"
|
||||||
"github.com/containers/buildah/pkg/parse"
|
"github.com/containers/buildah/pkg/parse"
|
||||||
@ -72,7 +73,7 @@ func setChildProcess() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *Builder) Run(command []string, options RunOptions) error {
|
func (b *Builder) Run(command []string, options RunOptions) error {
|
||||||
p, err := os.MkdirTemp("", Package)
|
p, err := os.MkdirTemp(tmpdir.GetTempDir(), define.Package)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
7
vendor/github.com/containers/buildah/run_linux.go
generated
vendored
7
vendor/github.com/containers/buildah/run_linux.go
generated
vendored
@ -19,6 +19,7 @@ import (
|
|||||||
"github.com/containers/buildah/copier"
|
"github.com/containers/buildah/copier"
|
||||||
"github.com/containers/buildah/define"
|
"github.com/containers/buildah/define"
|
||||||
"github.com/containers/buildah/internal"
|
"github.com/containers/buildah/internal"
|
||||||
|
"github.com/containers/buildah/internal/tmpdir"
|
||||||
"github.com/containers/buildah/internal/volumes"
|
"github.com/containers/buildah/internal/volumes"
|
||||||
"github.com/containers/buildah/pkg/overlay"
|
"github.com/containers/buildah/pkg/overlay"
|
||||||
"github.com/containers/buildah/pkg/parse"
|
"github.com/containers/buildah/pkg/parse"
|
||||||
@ -71,7 +72,7 @@ func setChildProcess() error {
|
|||||||
|
|
||||||
// Run runs the specified command in the container's root filesystem.
|
// Run runs the specified command in the container's root filesystem.
|
||||||
func (b *Builder) Run(command []string, options RunOptions) error {
|
func (b *Builder) Run(command []string, options RunOptions) error {
|
||||||
p, err := os.MkdirTemp("", define.Package)
|
p, err := os.MkdirTemp(tmpdir.GetTempDir(), define.Package)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -499,7 +500,7 @@ func setupSlirp4netnsNetwork(config *config.Config, netns, cid string, options [
|
|||||||
Mask: res.Subnet.Mask,
|
Mask: res.Subnet.Mask,
|
||||||
}}
|
}}
|
||||||
netStatus := map[string]nettypes.StatusBlock{
|
netStatus := map[string]nettypes.StatusBlock{
|
||||||
slirp4netns.BinaryName: nettypes.StatusBlock{
|
slirp4netns.BinaryName: {
|
||||||
Interfaces: map[string]nettypes.NetInterface{
|
Interfaces: map[string]nettypes.NetInterface{
|
||||||
"tap0": {
|
"tap0": {
|
||||||
Subnets: []nettypes.NetAddress{{IPNet: subnet}},
|
Subnets: []nettypes.NetAddress{{IPNet: subnet}},
|
||||||
@ -541,7 +542,7 @@ func setupPasta(config *config.Config, netns string, options []string) (func(),
|
|||||||
Mask: net.IPv4Mask(255, 255, 255, 0),
|
Mask: net.IPv4Mask(255, 255, 255, 0),
|
||||||
}}
|
}}
|
||||||
netStatus := map[string]nettypes.StatusBlock{
|
netStatus := map[string]nettypes.StatusBlock{
|
||||||
slirp4netns.BinaryName: nettypes.StatusBlock{
|
slirp4netns.BinaryName: {
|
||||||
Interfaces: map[string]nettypes.NetInterface{
|
Interfaces: map[string]nettypes.NetInterface{
|
||||||
"tap0": {
|
"tap0": {
|
||||||
Subnets: []nettypes.NetAddress{{IPNet: subnet}},
|
Subnets: []nettypes.NetAddress{{IPNet: subnet}},
|
||||||
|
4
vendor/github.com/containers/buildah/selinux_tag.sh
generated
vendored
4
vendor/github.com/containers/buildah/selinux_tag.sh
generated
vendored
@ -1,4 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
if pkg-config libselinux 2> /dev/null ; then
|
|
||||||
echo selinux
|
|
||||||
fi
|
|
3
vendor/github.com/containers/luksy/.cirrus.yml
generated
vendored
3
vendor/github.com/containers/luksy/.cirrus.yml
generated
vendored
@ -3,13 +3,14 @@ docker_builder:
|
|||||||
env:
|
env:
|
||||||
HOME: /root
|
HOME: /root
|
||||||
DEBIAN_FRONTEND: noninteractive
|
DEBIAN_FRONTEND: noninteractive
|
||||||
|
CIRRUS_LOG_TIMESTAMP: true
|
||||||
setup_script: |
|
setup_script: |
|
||||||
apt-get -q update
|
apt-get -q update
|
||||||
apt-get -q install -y bats cryptsetup golang
|
apt-get -q install -y bats cryptsetup golang
|
||||||
go version
|
go version
|
||||||
make
|
make
|
||||||
unit_test_script:
|
unit_test_script:
|
||||||
go test -v -cover
|
go test -timeout 45m -v -cover
|
||||||
defaults_script: |
|
defaults_script: |
|
||||||
bats -f defaults ./tests
|
bats -f defaults ./tests
|
||||||
aes_script: |
|
aes_script: |
|
||||||
|
6
vendor/github.com/containers/luksy/Makefile
generated
vendored
6
vendor/github.com/containers/luksy/Makefile
generated
vendored
@ -4,11 +4,11 @@ BATS = bats
|
|||||||
all: luksy
|
all: luksy
|
||||||
|
|
||||||
luksy: cmd/luksy/*.go *.go
|
luksy: cmd/luksy/*.go *.go
|
||||||
$(GO) build -o luksy ./cmd/luksy
|
$(GO) build -o luksy$(shell go env GOEXE) ./cmd/luksy
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
$(RM) luksy luksy.test
|
$(RM) luksy$(shell go env GOEXE) luksy.test
|
||||||
|
|
||||||
test:
|
test:
|
||||||
$(GO) test
|
$(GO) test -timeout 45m -v -cover
|
||||||
$(BATS) ./tests
|
$(BATS) ./tests
|
||||||
|
4
vendor/github.com/containers/luksy/OWNERS
generated
vendored
Normal file
4
vendor/github.com/containers/luksy/OWNERS
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
approvers:
|
||||||
|
- nalind
|
||||||
|
reviewers:
|
||||||
|
- nalind
|
27
vendor/github.com/containers/luksy/decrypt.go
generated
vendored
27
vendor/github.com/containers/luksy/decrypt.go
generated
vendored
@ -4,6 +4,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
@ -11,14 +12,23 @@ import (
|
|||||||
"golang.org/x/crypto/pbkdf2"
|
"golang.org/x/crypto/pbkdf2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ReaderAtSeekCloser is a combination of io.ReaderAt, io.Seeker, and io.Closer,
|
||||||
|
// which is all we really need from an encrypted file.
|
||||||
|
type ReaderAtSeekCloser interface {
|
||||||
|
io.ReaderAt
|
||||||
|
io.Seeker
|
||||||
|
io.Closer
|
||||||
|
}
|
||||||
|
|
||||||
// Decrypt attempts to verify the specified password using information from the
|
// Decrypt attempts to verify the specified password using information from the
|
||||||
// header and read from the specified file.
|
// header and read from the specified file.
|
||||||
//
|
//
|
||||||
// Returns a function which will decrypt payload blocks in succession, the size
|
// Returns a function which will decrypt payload blocks in succession, the size
|
||||||
// of chunks of data that the function expects, the offset in the file where
|
// of chunks of data that the function expects, the offset in the file where
|
||||||
// the payload begins, and the size of the payload.
|
// the payload begins, and the size of the payload, assuming the payload runs
|
||||||
func (h V1Header) Decrypt(password string, f *os.File) (func([]byte) ([]byte, error), int, int64, int64, error) {
|
// to the end of the file.
|
||||||
st, err := f.Stat()
|
func (h V1Header) Decrypt(password string, f ReaderAtSeekCloser) (func([]byte) ([]byte, error), int, int64, int64, error) {
|
||||||
|
size, err := f.Seek(0, io.SeekEnd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, -1, -1, -1, err
|
return nil, -1, -1, -1, err
|
||||||
}
|
}
|
||||||
@ -70,7 +80,7 @@ func (h V1Header) Decrypt(password string, f *os.File) (func([]byte) ([]byte, er
|
|||||||
}
|
}
|
||||||
if bytes.Equal(mkcandidateDerived, h.MKDigest()) {
|
if bytes.Equal(mkcandidateDerived, h.MKDigest()) {
|
||||||
payloadOffset := int64(h.PayloadOffset() * V1SectorSize)
|
payloadOffset := int64(h.PayloadOffset() * V1SectorSize)
|
||||||
return decryptStream, V1SectorSize, payloadOffset, st.Size() - payloadOffset, nil
|
return decryptStream, V1SectorSize, payloadOffset, size - payloadOffset, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if activeKeys == 0 {
|
if activeKeys == 0 {
|
||||||
@ -84,8 +94,9 @@ func (h V1Header) Decrypt(password string, f *os.File) (func([]byte) ([]byte, er
|
|||||||
//
|
//
|
||||||
// Returns a function which will decrypt payload blocks in succession, the size
|
// Returns a function which will decrypt payload blocks in succession, the size
|
||||||
// of chunks of data that the function expects, the offset in the file where
|
// of chunks of data that the function expects, the offset in the file where
|
||||||
// the payload begins, and the size of the payload.
|
// the payload begins, and the size of the payload, assuming the payload runs
|
||||||
func (h V2Header) Decrypt(password string, f *os.File, j V2JSON) (func([]byte) ([]byte, error), int, int64, int64, error) {
|
// to the end of the file.
|
||||||
|
func (h V2Header) Decrypt(password string, f ReaderAtSeekCloser, j V2JSON) (func([]byte) ([]byte, error), int, int64, int64, error) {
|
||||||
foundDigests := 0
|
foundDigests := 0
|
||||||
for d, digest := range j.Digests {
|
for d, digest := range j.Digests {
|
||||||
if digest.Type != "pbkdf2" {
|
if digest.Type != "pbkdf2" {
|
||||||
@ -117,11 +128,11 @@ func (h V2Header) Decrypt(password string, f *os.File, j V2JSON) (func([]byte) (
|
|||||||
}
|
}
|
||||||
payloadOffset = tmp
|
payloadOffset = tmp
|
||||||
if segment.Size == "dynamic" {
|
if segment.Size == "dynamic" {
|
||||||
st, err := f.Stat()
|
size, err := f.Seek(0, io.SeekEnd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
payloadSize = st.Size() - payloadOffset
|
payloadSize = size - payloadOffset
|
||||||
} else {
|
} else {
|
||||||
payloadSize, err = strconv.ParseInt(segment.Size, 10, 64)
|
payloadSize, err = strconv.ParseInt(segment.Size, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
4
vendor/github.com/containers/luksy/encrypt.go
generated
vendored
4
vendor/github.com/containers/luksy/encrypt.go
generated
vendored
@ -246,8 +246,8 @@ func EncryptV2(password []string, cipher string, payloadSectorSize int) ([]byte,
|
|||||||
return nil, nil, -1, errors.New("internal error")
|
return nil, nil, -1, errors.New("internal error")
|
||||||
}
|
}
|
||||||
iterations := IterationsPBKDF2(tuningSalt, len(mkey), hasher)
|
iterations := IterationsPBKDF2(tuningSalt, len(mkey), hasher)
|
||||||
timeCost := 1
|
timeCost := 16
|
||||||
threadsCost := 4
|
threadsCost := 16
|
||||||
memoryCost := MemoryCostArgon2(tuningSalt, len(mkey), timeCost, threadsCost)
|
memoryCost := MemoryCostArgon2(tuningSalt, len(mkey), timeCost, threadsCost)
|
||||||
priority := V2JSONKeyslotPriorityNormal
|
priority := V2JSONKeyslotPriorityNormal
|
||||||
var stripes [][]byte
|
var stripes [][]byte
|
||||||
|
149
vendor/github.com/containers/luksy/encryption.go
generated
vendored
149
vendor/github.com/containers/luksy/encryption.go
generated
vendored
@ -417,9 +417,22 @@ func roundUpToMultiple(i, factor int) int {
|
|||||||
if i < 0 {
|
if i < 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
if factor < 1 {
|
||||||
|
return i
|
||||||
|
}
|
||||||
return i + ((factor - (i % factor)) % factor)
|
return i + ((factor - (i % factor)) % factor)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func roundDownToMultiple(i, factor int) int {
|
||||||
|
if i < 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if factor < 1 {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
return i - (i % factor)
|
||||||
|
}
|
||||||
|
|
||||||
func hasherByName(name string) (func() hash.Hash, error) {
|
func hasherByName(name string) (func() hash.Hash, error) {
|
||||||
switch name {
|
switch name {
|
||||||
case "sha1":
|
case "sha1":
|
||||||
@ -436,13 +449,39 @@ func hasherByName(name string) (func() hash.Hash, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type wrapper struct {
|
type wrapper struct {
|
||||||
fn func(plaintext []byte) ([]byte, error)
|
fn func(plaintext []byte) ([]byte, error)
|
||||||
blockSize int
|
blockSize int
|
||||||
buf []byte
|
buf []byte
|
||||||
buffered, consumed int
|
buffered int
|
||||||
reader io.Reader
|
processed int
|
||||||
eof bool
|
reader io.Reader
|
||||||
writer io.Writer
|
eof bool
|
||||||
|
writer io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wrapper) partialWrite() error {
|
||||||
|
if w.buffered-w.processed >= w.blockSize {
|
||||||
|
toProcess := roundDownToMultiple(w.buffered-w.processed, w.blockSize)
|
||||||
|
processed, err := w.fn(w.buf[w.processed : w.processed+toProcess])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
nProcessed := copy(w.buf[w.processed:], processed)
|
||||||
|
w.processed += nProcessed
|
||||||
|
}
|
||||||
|
if w.processed >= w.blockSize {
|
||||||
|
nWritten, err := w.writer.Write(w.buf[:w.processed])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
copy(w.buf, w.buf[nWritten:w.buffered])
|
||||||
|
w.buffered -= nWritten
|
||||||
|
w.processed -= nWritten
|
||||||
|
if w.processed != 0 {
|
||||||
|
return fmt.Errorf("short write: %d != %d", nWritten, nWritten+w.processed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *wrapper) Write(buf []byte) (int, error) {
|
func (w *wrapper) Write(buf []byte) (int, error) {
|
||||||
@ -451,19 +490,8 @@ func (w *wrapper) Write(buf []byte) (int, error) {
|
|||||||
nBuffered := copy(w.buf[w.buffered:], buf[n:])
|
nBuffered := copy(w.buf[w.buffered:], buf[n:])
|
||||||
w.buffered += nBuffered
|
w.buffered += nBuffered
|
||||||
n += nBuffered
|
n += nBuffered
|
||||||
if w.buffered == len(w.buf) {
|
if err := w.partialWrite(); err != nil {
|
||||||
processed, err := w.fn(w.buf)
|
return n, err
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
nWritten, err := w.writer.Write(processed)
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
w.buffered -= nWritten
|
|
||||||
if nWritten != len(processed) {
|
|
||||||
return n, fmt.Errorf("short write: %d != %d", nWritten, len(processed))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return n, nil
|
return n, nil
|
||||||
@ -472,66 +500,73 @@ func (w *wrapper) Write(buf []byte) (int, error) {
|
|||||||
func (w *wrapper) Read(buf []byte) (int, error) {
|
func (w *wrapper) Read(buf []byte) (int, error) {
|
||||||
n := 0
|
n := 0
|
||||||
for n < len(buf) {
|
for n < len(buf) {
|
||||||
nRead := copy(buf[n:], w.buf[w.consumed:])
|
if !w.eof {
|
||||||
w.consumed += nRead
|
nRead, err := w.reader.Read(w.buf[w.buffered:])
|
||||||
n += nRead
|
if err != nil {
|
||||||
if w.consumed == len(w.buf) && !w.eof {
|
if !errors.Is(err, io.EOF) {
|
||||||
nRead, err := w.reader.Read(w.buf)
|
w.buffered += nRead
|
||||||
w.eof = errors.Is(err, io.EOF)
|
return n, err
|
||||||
if err != nil && !w.eof {
|
}
|
||||||
return n, err
|
w.eof = true
|
||||||
}
|
}
|
||||||
if nRead != len(w.buf) && !w.eof {
|
w.buffered += nRead
|
||||||
return n, fmt.Errorf("short read: %d != %d", nRead, len(w.buf))
|
}
|
||||||
}
|
if w.buffered == 0 && w.eof {
|
||||||
processed, err := w.fn(w.buf[:nRead])
|
return n, io.EOF
|
||||||
|
}
|
||||||
|
if w.buffered-w.processed >= w.blockSize {
|
||||||
|
toProcess := roundDownToMultiple(w.buffered-w.processed, w.blockSize)
|
||||||
|
processed, err := w.fn(w.buf[w.processed : w.processed+toProcess])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
w.buf = processed
|
nProcessed := copy(w.buf[w.processed:], processed)
|
||||||
w.consumed = 0
|
w.processed += nProcessed
|
||||||
|
}
|
||||||
|
nRead := copy(buf[n:], w.buf[:w.processed])
|
||||||
|
n += nRead
|
||||||
|
copy(w.buf, w.buf[nRead:w.buffered])
|
||||||
|
w.processed -= nRead
|
||||||
|
w.buffered -= nRead
|
||||||
|
if w.buffered-w.processed < w.blockSize {
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var eof error
|
return n, nil
|
||||||
if w.consumed == len(w.buf) && w.eof {
|
|
||||||
eof = io.EOF
|
|
||||||
}
|
|
||||||
return n, eof
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *wrapper) Close() error {
|
func (w *wrapper) Close() error {
|
||||||
if w.writer != nil {
|
if w.writer != nil {
|
||||||
if w.buffered%w.blockSize != 0 {
|
if w.buffered%w.blockSize != 0 {
|
||||||
w.buffered += copy(w.buf[w.buffered:], make([]byte, roundUpToMultiple(w.buffered%w.blockSize, w.blockSize)))
|
nPadding := w.blockSize - w.buffered%w.blockSize
|
||||||
|
nWritten, err := w.Write(make([]byte, nPadding))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("flushing write: %v", err)
|
||||||
|
}
|
||||||
|
if nWritten < nPadding {
|
||||||
|
return fmt.Errorf("flushing write: %d != %d", nPadding, nWritten)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
processed, err := w.fn(w.buf[:w.buffered])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
nWritten, err := w.writer.Write(processed)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if nWritten != len(processed) {
|
|
||||||
return fmt.Errorf("short write: %d != %d", nWritten, len(processed))
|
|
||||||
}
|
|
||||||
w.buffered = 0
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncryptWriter creates an io.WriteCloser which buffers writes through an
|
// EncryptWriter creates an io.WriteCloser which buffers writes through an
|
||||||
// encryption function. After writing a final block, the returned writer
|
// encryption function, transforming and writing multiples of the blockSize.
|
||||||
// should be closed.
|
// After writing a final block, the returned writer should be closed.
|
||||||
|
// If only a partial block has been written when Close() is called, a final
|
||||||
|
// block with its length padded with zero bytes will be transformed and
|
||||||
|
// written.
|
||||||
func EncryptWriter(fn func(plaintext []byte) ([]byte, error), writer io.Writer, blockSize int) io.WriteCloser {
|
func EncryptWriter(fn func(plaintext []byte) ([]byte, error), writer io.Writer, blockSize int) io.WriteCloser {
|
||||||
bufferSize := roundUpToMultiple(1024*1024, blockSize)
|
bufferSize := roundUpToMultiple(1024*1024, blockSize)
|
||||||
return &wrapper{fn: fn, blockSize: blockSize, buf: make([]byte, bufferSize), writer: writer}
|
return &wrapper{fn: fn, blockSize: blockSize, buf: make([]byte, bufferSize), writer: writer}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecryptReader creates an io.ReadCloser which buffers reads through a
|
// DecryptReader creates an io.ReadCloser which buffers reads through a
|
||||||
// decryption function. When data will no longer be read, the returned reader
|
// decryption function, decrypting and returning multiples of the blockSize
|
||||||
// should be closed.
|
// until it reaches the end of the file. When data will no longer be read, the
|
||||||
|
// returned reader should be closed.
|
||||||
func DecryptReader(fn func(ciphertext []byte) ([]byte, error), reader io.Reader, blockSize int) io.ReadCloser {
|
func DecryptReader(fn func(ciphertext []byte) ([]byte, error), reader io.Reader, blockSize int) io.ReadCloser {
|
||||||
bufferSize := roundUpToMultiple(1024*1024, blockSize)
|
bufferSize := roundUpToMultiple(1024*1024, blockSize)
|
||||||
return &wrapper{fn: fn, blockSize: blockSize, buf: make([]byte, bufferSize), consumed: bufferSize, reader: reader}
|
return &wrapper{fn: fn, blockSize: blockSize, buf: make([]byte, bufferSize), reader: reader}
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/containers/luksy/luks.go
generated
vendored
4
vendor/github.com/containers/luksy/luks.go
generated
vendored
@ -4,7 +4,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReadHeaderOptions can control some of what ReadHeaders() does.
|
// ReadHeaderOptions can control some of what ReadHeaders() does.
|
||||||
@ -13,7 +13,7 @@ type ReadHeaderOptions struct{}
|
|||||||
// ReadHeaders reads LUKS headers from the specified file, returning either a
|
// ReadHeaders reads LUKS headers from the specified file, returning either a
|
||||||
// LUKSv1 header, or two LUKSv2 headers and a LUKSv2 JSON block, depending on
|
// LUKSv1 header, or two LUKSv2 headers and a LUKSv2 JSON block, depending on
|
||||||
// which format is detected.
|
// which format is detected.
|
||||||
func ReadHeaders(f *os.File, options ReadHeaderOptions) (*V1Header, *V2Header, *V2Header, *V2JSON, error) {
|
func ReadHeaders(f io.ReaderAt, options ReadHeaderOptions) (*V1Header, *V2Header, *V2Header, *V2JSON, error) {
|
||||||
var v1 V1Header
|
var v1 V1Header
|
||||||
var v2a, v2b V2Header
|
var v2a, v2b V2Header
|
||||||
n, err := f.ReadAt(v2a[:], 0)
|
n, err := f.ReadAt(v2a[:], 0)
|
||||||
|
4
vendor/modules.txt
vendored
4
vendor/modules.txt
vendored
@ -139,7 +139,7 @@ github.com/containernetworking/cni/pkg/version
|
|||||||
# github.com/containernetworking/plugins v1.3.0
|
# github.com/containernetworking/plugins v1.3.0
|
||||||
## explicit; go 1.20
|
## explicit; go 1.20
|
||||||
github.com/containernetworking/plugins/pkg/ns
|
github.com/containernetworking/plugins/pkg/ns
|
||||||
# github.com/containers/buildah v1.32.0
|
# github.com/containers/buildah v1.32.1-0.20231012130144-244170240d85
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
github.com/containers/buildah
|
github.com/containers/buildah
|
||||||
github.com/containers/buildah/bind
|
github.com/containers/buildah/bind
|
||||||
@ -312,7 +312,7 @@ github.com/containers/libhvee/pkg/wmiext
|
|||||||
# github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01
|
# github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01
|
||||||
## explicit
|
## explicit
|
||||||
github.com/containers/libtrust
|
github.com/containers/libtrust
|
||||||
# github.com/containers/luksy v0.0.0-20230808154129-d2d74a56682f
|
# github.com/containers/luksy v0.0.0-20230912175440-6df88cb7f0dd
|
||||||
## explicit; go 1.20
|
## explicit; go 1.20
|
||||||
github.com/containers/luksy
|
github.com/containers/luksy
|
||||||
# github.com/containers/ocicrypt v1.1.8
|
# github.com/containers/ocicrypt v1.1.8
|
||||||
|
Reference in New Issue
Block a user