bump buildah to latest

Also includes a small change to make us of
https://github.com/containers/buildah/pull/5039

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
This commit is contained in:
Paul Holzinger
2023-09-12 15:30:07 +02:00
parent 18561f26ad
commit 2c2299ad85
155 changed files with 12220 additions and 14157 deletions

View File

@ -32,7 +32,7 @@ env:
DEBIAN_NAME: "debian-13"
# Image identifiers
IMAGE_SUFFIX: "c20230614t132754z-f38f37d13"
IMAGE_SUFFIX: "c20230816t191118z-f38f37d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
@ -112,7 +112,6 @@ smoke_task:
vendor_task:
name: "Test Vendoring"
alias: vendor
only_if: &not_multiarch $CIRRUS_CRON != 'multiarch'
env:
CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/buildah"
@ -137,8 +136,7 @@ cross_build_task:
name: "Cross Compile"
alias: cross_build
only_if: >-
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
$CIRRUS_CRON != 'multiarch'
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
osx_instance:
image: ghcr.io/cirruslabs/macos-ventura-base:latest
@ -160,8 +158,7 @@ unit_task:
alias: unit
only_if: &not_build_docs >-
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
$CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' &&
$CIRRUS_CRON != 'multiarch'
$CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*'
depends_on: &smoke_vendor_cross
- smoke
- vendor
@ -322,52 +319,6 @@ in_podman_task:
<<: *standardlogs
image_build_task: &image-build
name: "Build multi-arch $FLAVOR"
alias: image_build
# Some of these container images take > 1h to build, limit
# this task to a specific Cirrus-Cron entry with this name.
only_if: $CIRRUS_CRON == 'multiarch'
depends_on:
- smoke
timeout_in: 120m # emulation is sssllllooooowwww
gce_instance:
<<: *standardvm
image_name: build-push-${IMAGE_SUFFIX}
# More muscle required for parallel multi-arch build
type: "n2-standard-4"
matrix:
- env:
FLAVOR: upstream
- env:
FLAVOR: testing
- env:
FLAVOR: stable
env:
DISTRO_NV: "${FEDORA_NAME}" # Required for repo cache extraction
BUILDAH_USERNAME: ENCRYPTED[70e1d4f026cba5d82fc067944baab10f7c71c64bb6b75fce4eeb5c106694b3bbc8e08f8a1b926d6e03e85cf4e21833bb]
BUILDAH_PASSWORD: ENCRYPTED[2dc7f4f623bfc856e1d5030df263b9e48ddab39abacea7a8bc714179c188df15fc0a5bb5d3414a24637d4e39aa51b7b5]
CONTAINERS_USERNAME: ENCRYPTED[88cd93c753f78d70e4beb5dbebd4402d682daf45793d7e0fe8b75b358f768e8734aef3f130ffb4ebca9bdea8d220a188]
CONTAINERS_PASSWORD: ENCRYPTED[886cf4cc126e50b2fd7f2792235a22bb79e4b81db43f803a6214a38d3fd6c04cd4e64570b562cb32b04e5fbc435404b6]
main_script:
- source /etc/automation_environment
- main.sh $CIRRUS_REPO_CLONE_URL contrib/buildahimage $FLAVOR
test_image_build_task:
<<: *image-build
alias: test_image_build
# Allow this to run inside a PR w/ [CI:BUILD] only.
only_if: $CIRRUS_PR != '' && $CIRRUS_CHANGE_TITLE =~ '.*CI:BUILD.*'
# This takes a LONG time, only run when requested. N/B: Any task
# made to depend on this one will block FOREVER unless triggered.
# DO NOT ADD THIS TASK AS DEPENDENCY FOR `success_task`.
trigger_type: manual
# Overwrite all 'env', don't push anything, just do the build.
env:
DRYRUN: 1
# Status aggregator for all tests. This task simply ensures a defined
# set of tasks all passed, and allows confirming that based on the status
# of this task.
@ -384,7 +335,6 @@ success_task:
- cross_build
- integration
- in_podman
- image_build
container:
image: "quay.io/libpod/alpine:latest"

View File

@ -2,29 +2,35 @@
# See the documentation for more information:
# https://packit.dev/docs/configuration/
# Build targets can be found at:
# https://copr.fedorainfracloud.org/coprs/rhcontainerbot/packit-builds/
# and
# https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/
specfile_path: rpm/buildah.spec
upstream_tag_template: v{version}
jobs:
- &copr
job: copr_build
trigger: pull_request
owner: rhcontainerbot
project: packit-builds
enable_net: true
srpm_build_deps:
- make
srpm_build_deps:
- make
- <<: *copr
# Run on commit to main branch
jobs:
- job: copr_build
trigger: pull_request
# keep in sync with https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next
enable_net: true
targets:
- fedora-all-x86_64
- fedora-all-aarch64
- fedora-eln-x86_64
- fedora-eln-aarch64
- centos-stream+epel-next-8-x86_64
- centos-stream+epel-next-8-aarch64
- centos-stream+epel-next-9-x86_64
- centos-stream+epel-next-9-aarch64
additional_repos:
- "copr://rhcontainerbot/podman-next"
# Run on commit to main branch
- job: copr_build
trigger: commit
branch: main
owner: rhcontainerbot
project: podman-next
enable_net: true
- job: propose_downstream
trigger: release

View File

@ -39,7 +39,7 @@ LIBSECCOMP_COMMIT := release-2.3
EXTRA_LDFLAGS ?=
BUILDAH_LDFLAGS := $(GO_LDFLAGS) '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)'
SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/parse/*.go internal/source/*.go internal/util/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go
SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/mkcw/*.go internal/mkcw/types/*.go internal/parse/*.go internal/source/*.go internal/util/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go
LINTFLAGS ?=
@ -69,14 +69,26 @@ static:
mkdir -p ./bin
cp -rfp ./result/bin/* ./bin/
bin/buildah: $(SOURCES) cmd/buildah/*.go
bin/buildah: $(SOURCES) cmd/buildah/*.go internal/mkcw/embed/entrypoint.gz
$(GO_BUILD) $(BUILDAH_LDFLAGS) $(GO_GCFLAGS) "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah
ifneq ($(shell as --version | grep x86_64),)
internal/mkcw/embed/entrypoint: internal/mkcw/embed/entrypoint.s
$(AS) -o $(patsubst %.s,%.o,$^) $^
$(LD) -o $@ $(patsubst %.s,%.o,$^)
strip $@
else
.PHONY: internal/mkcw/embed/entrypoint
endif
internal/mkcw/embed/entrypoint.gz: internal/mkcw/embed/entrypoint
$(RM) $@
gzip -k $^
.PHONY: buildah
buildah: bin/buildah
# TODO: remove `grep -v loong64` from `ALL_CROSS_TARGETS` once go.etcd.io/bbolt 1.3.7 is out.
ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list | grep -v loong64)))
ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list)))
LINUX_CROSS_TARGETS := $(filter bin/buildah.linux.%,$(ALL_CROSS_TARGETS))
DARWIN_CROSS_TARGETS := $(filter bin/buildah.darwin.%,$(ALL_CROSS_TARGETS))
WINDOWS_CROSS_TARGETS := $(addsuffix .exe,$(filter bin/buildah.windows.%,$(ALL_CROSS_TARGETS)))

View File

@ -386,6 +386,11 @@ type ImportFromImageOptions struct {
SystemContext *types.SystemContext
}
// ConfidentialWorkloadOptions encapsulates options which control whether or not
// we output an image whose rootfs contains a LUKS-compatibly-encrypted disk image
// instead of the usual rootfs contents.
type ConfidentialWorkloadOptions = define.ConfidentialWorkloadOptions
// NewBuilder creates a new build container.
func NewBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) {
if options.CommonBuildOpts == nil {
@ -433,6 +438,9 @@ func OpenBuilder(store storage.Store, container string) (*Builder, error) {
b.store = store
b.fixupConfig(nil)
b.setupLogger()
if b.CommonBuildOpts == nil {
b.CommonBuildOpts = &CommonBuildOptions{}
}
return b, nil
}
@ -469,6 +477,9 @@ func OpenBuilderByPath(store storage.Store, path string) (*Builder, error) {
b.store = store
b.fixupConfig(nil)
b.setupLogger()
if b.CommonBuildOpts == nil {
b.CommonBuildOpts = &CommonBuildOptions{}
}
return b, nil
}
if err != nil {
@ -506,6 +517,9 @@ func OpenAllBuilders(store storage.Store) (builders []*Builder, err error) {
b.store = store
b.setupLogger()
b.fixupConfig(nil)
if b.CommonBuildOpts == nil {
b.CommonBuildOpts = &CommonBuildOptions{}
}
builders = append(builders, b)
continue
}

View File

@ -79,9 +79,11 @@ func setSeccomp(spec *specs.Spec) error {
case specs.ArchS390X:
return libseccomp.ArchS390X
case specs.ArchPARISC:
/* fallthrough */ /* for now */
return libseccomp.ArchPARISC
case specs.ArchPARISC64:
/* fallthrough */ /* for now */
return libseccomp.ArchPARISC64
case specs.ArchRISCV64:
return libseccomp.ArchRISCV64
default:
logrus.Errorf("unmappable arch %v", specArch)
}

View File

@ -105,6 +105,10 @@ type CommitOptions struct {
// integers in the slice represent 0-indexed layer indices, with support for negative
// indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer.
OciEncryptLayers *[]int
// ConfidentialWorkloadOptions is used to force the output image's rootfs to contain a
// LUKS-compatibly encrypted disk image (for use with krun) instead of the usual
// contents of a rootfs.
ConfidentialWorkloadOptions ConfidentialWorkloadOptions
// UnsetEnvs is a list of environments to not add to final image.
// Deprecated: use UnsetEnv() before committing instead.
UnsetEnvs []string

217
vendor/github.com/containers/buildah/convertcw.go generated vendored Normal file
View File

@ -0,0 +1,217 @@
package buildah
import (
"context"
"fmt"
"io"
"time"
"github.com/containers/buildah/define"
"github.com/containers/buildah/internal/mkcw"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
// CWConvertImageOptions provides both required and optional bits of
// configuration for CWConvertImage().
type CWConvertImageOptions struct {
// Required parameters.
InputImage string
// If supplied, we'll tag the resulting image with the specified name.
Tag string
OutputImage types.ImageReference
// If supplied, we'll register the workload with this server.
// Practically necessary if DiskEncryptionPassphrase is not set, in
// which case we'll generate one and throw it away after.
AttestationURL string
// Used to measure the environment. If left unset (0), defaults will be applied.
CPUs int
Memory int
// Can be manually set. If left unset ("", false, nil), reasonable values will be used.
TeeType define.TeeType
IgnoreAttestationErrors bool
WorkloadID string
DiskEncryptionPassphrase string
Slop string
FirmwareLibrary string
BaseImage string
Logger *logrus.Logger
// Passed through to BuilderOptions. Most settings won't make
// sense to be made available here because we don't launch a process.
ContainerSuffix string
PullPolicy PullPolicy
BlobDirectory string
SignaturePolicyPath string
ReportWriter io.Writer
IDMappingOptions *IDMappingOptions
Format string
MaxPullRetries int
PullRetryDelay time.Duration
OciDecryptConfig *encconfig.DecryptConfig
MountLabel string
}
// CWConvertImage takes the rootfs and configuration from one image, generates a
// LUKS-encrypted disk image that more or less includes them both, and puts the
// result into a new container image.
// Returns the new image's ID and digest on success, along with a canonical
// reference for it if a repository name was specified.
func CWConvertImage(ctx context.Context, systemContext *types.SystemContext, store storage.Store, options CWConvertImageOptions) (string, reference.Canonical, digest.Digest, error) {
// Apply our defaults if some options aren't set.
logger := options.Logger
if logger == nil {
logger = logrus.StandardLogger()
}
// Now create the target working container, pulling the base image if
// there is one and it isn't present.
builderOptions := BuilderOptions{
FromImage: options.BaseImage,
SystemContext: systemContext,
Logger: logger,
ContainerSuffix: options.ContainerSuffix,
PullPolicy: options.PullPolicy,
BlobDirectory: options.BlobDirectory,
SignaturePolicyPath: options.SignaturePolicyPath,
ReportWriter: options.ReportWriter,
IDMappingOptions: options.IDMappingOptions,
Format: options.Format,
MaxPullRetries: options.MaxPullRetries,
PullRetryDelay: options.PullRetryDelay,
OciDecryptConfig: options.OciDecryptConfig,
MountLabel: options.MountLabel,
}
target, err := NewBuilder(ctx, store, builderOptions)
if err != nil {
return "", nil, "", fmt.Errorf("creating container from target image: %w", err)
}
defer func() {
if err := target.Delete(); err != nil {
logrus.Warnf("deleting target container: %v", err)
}
}()
targetDir, err := target.Mount("")
if err != nil {
return "", nil, "", fmt.Errorf("mounting target container: %w", err)
}
defer func() {
if err := target.Unmount(); err != nil {
logrus.Warnf("unmounting target container: %v", err)
}
}()
// Mount the source image, pulling it first if necessary.
builderOptions = BuilderOptions{
FromImage: options.InputImage,
SystemContext: systemContext,
Logger: logger,
ContainerSuffix: options.ContainerSuffix,
PullPolicy: options.PullPolicy,
BlobDirectory: options.BlobDirectory,
SignaturePolicyPath: options.SignaturePolicyPath,
ReportWriter: options.ReportWriter,
IDMappingOptions: options.IDMappingOptions,
Format: options.Format,
MaxPullRetries: options.MaxPullRetries,
PullRetryDelay: options.PullRetryDelay,
OciDecryptConfig: options.OciDecryptConfig,
MountLabel: options.MountLabel,
}
source, err := NewBuilder(ctx, store, builderOptions)
if err != nil {
return "", nil, "", fmt.Errorf("creating container from source image: %w", err)
}
defer func() {
if err := source.Delete(); err != nil {
logrus.Warnf("deleting source container: %v", err)
}
}()
sourceInfo := GetBuildInfo(source)
if err != nil {
return "", nil, "", fmt.Errorf("retrieving info about source image: %w", err)
}
sourceImageID := sourceInfo.FromImageID
sourceSize, err := store.ImageSize(sourceImageID)
if err != nil {
return "", nil, "", fmt.Errorf("computing size of source image: %w", err)
}
sourceDir, err := source.Mount("")
if err != nil {
return "", nil, "", fmt.Errorf("mounting source container: %w", err)
}
defer func() {
if err := source.Unmount(); err != nil {
logrus.Warnf("unmounting source container: %v", err)
}
}()
// Generate the image contents.
archiveOptions := mkcw.ArchiveOptions{
AttestationURL: options.AttestationURL,
CPUs: options.CPUs,
Memory: options.Memory,
TempDir: targetDir,
TeeType: options.TeeType,
IgnoreAttestationErrors: options.IgnoreAttestationErrors,
ImageSize: sourceSize,
WorkloadID: options.WorkloadID,
DiskEncryptionPassphrase: options.DiskEncryptionPassphrase,
Slop: options.Slop,
FirmwareLibrary: options.FirmwareLibrary,
Logger: logger,
}
rc, workloadConfig, err := mkcw.Archive(sourceDir, &source.OCIv1, archiveOptions)
if err != nil {
return "", nil, "", fmt.Errorf("generating encrypted image content: %w", err)
}
if err = archive.Untar(rc, targetDir, &archive.TarOptions{}); err != nil {
if err = rc.Close(); err != nil {
logger.Warnf("cleaning up: %v", err)
}
return "", nil, "", fmt.Errorf("saving encrypted image content: %w", err)
}
if err = rc.Close(); err != nil {
return "", nil, "", fmt.Errorf("cleaning up: %w", err)
}
// Commit the image. Clear out most of the configuration (if there is any — we default
// to scratch as a base) so that an engine that doesn't or can't set up a TEE will just
// run the static entrypoint. The rest of the configuration which the runtime consults
// is in the .krun_config.json file in the encrypted filesystem.
logger.Log(logrus.DebugLevel, "committing disk image")
target.ClearAnnotations()
target.ClearEnv()
target.ClearLabels()
target.ClearOnBuild()
target.ClearPorts()
target.ClearVolumes()
target.SetCmd(nil)
target.SetCreatedBy(fmt.Sprintf(": convert %q for use with %q", sourceImageID, workloadConfig.Type))
target.SetDomainname("")
target.SetEntrypoint([]string{"/entrypoint"})
target.SetHealthcheck(nil)
target.SetHostname("")
target.SetMaintainer("")
target.SetShell(nil)
target.SetUser("")
target.SetWorkDir("")
commitOptions := CommitOptions{
SystemContext: systemContext,
}
if options.Tag != "" {
commitOptions.AdditionalTags = append(commitOptions.AdditionalTags, options.Tag)
}
return target.Commit(ctx, options.OutputImage, commitOptions)
}

View File

@ -19,7 +19,6 @@ import (
"syscall"
"time"
"github.com/containers/buildah/util"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/fileutils"
@ -1141,7 +1140,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
cb := func() error {
tw := tar.NewWriter(bulkWriter)
defer tw.Close()
hardlinkChecker := new(util.HardlinkChecker)
hardlinkChecker := new(hardlinkChecker)
itemsCopied := 0
for i, item := range queue {
// if we're not discarding the names of individual directories, keep track of this one
@ -1353,7 +1352,7 @@ func handleRename(rename map[string]string, name string) string {
return name
}
func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath string, options GetOptions, tw *tar.Writer, hardlinkChecker *util.HardlinkChecker, idMappings *idtools.IDMappings) error {
func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath string, options GetOptions, tw *tar.Writer, hardlinkChecker *hardlinkChecker, idMappings *idtools.IDMappings) error {
// build the header using the name provided
hdr, err := tar.FileInfoHeader(srcfi, symlinkTarget)
if err != nil {

View File

@ -1,6 +1,7 @@
//go:build darwin || (linux && mips) || (linux && mipsle) || (linux && mips64) || (linux && mips64le)
// +build darwin linux,mips linux,mipsle linux,mips64 linux,mips64le
package util
package copier
import (
"syscall"

View File

@ -1,7 +1,7 @@
//go:build (linux && !mips && !mipsle && !mips64 && !mips64le) || freebsd
// +build linux,!mips,!mipsle,!mips64,!mips64le freebsd
package util
package copier
import (
"syscall"

View File

@ -0,0 +1,32 @@
//go:build linux || darwin || freebsd
// +build linux darwin freebsd
package copier
import (
"os"
"sync"
"syscall"
)
type hardlinkDeviceAndInode struct {
device, inode uint64
}
type hardlinkChecker struct {
hardlinks sync.Map
}
func (h *hardlinkChecker) Check(fi os.FileInfo) string {
if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 {
if name, ok := h.hardlinks.Load(makeHardlinkDeviceAndInode(st)); ok && name.(string) != "" {
return name.(string)
}
}
return ""
}
func (h *hardlinkChecker) Add(fi os.FileInfo, name string) {
if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 {
h.hardlinks.Store(makeHardlinkDeviceAndInode(st), name)
}
}

View File

@ -0,0 +1,17 @@
//go:build !linux && !darwin
// +build !linux,!darwin
package copier
import (
"os"
)
type hardlinkChecker struct {
}
func (h *hardlinkChecker) Check(fi os.FileInfo) string {
return ""
}
func (h *hardlinkChecker) Add(fi os.FileInfo, name string) {
}

View File

@ -163,6 +163,10 @@ type BuildOptions struct {
// It allows end user to export recently built rootfs into a directory or tar.
// See the documentation of 'buildah build --output' for the details of the format.
BuildOutput string
// ConfidentialWorkload controls whether or not, and if so, how, we produce an
// image that's meant to be run using krun as a VM instead of a conventional
// process-type container.
ConfidentialWorkload ConfidentialWorkloadOptions
// Additional tags to add to the image that we write, if we know of a
// way to add them.
AdditionalTags []string
@ -244,6 +248,8 @@ type BuildOptions struct {
Squash bool
// Labels metadata for an image
Labels []string
// LayerLabels metadata for an intermediate image
LayerLabels []string
// Annotation metadata for an image
Annotations []string
// OnBuild commands to be run by images based on this image

View File

@ -47,8 +47,16 @@ const (
OCI = "oci"
// DOCKER used to define the "docker" image format
DOCKER = "docker"
// SEV is a known trusted execution environment type: AMD-SEV (secure encrypted virtualization using encrypted state, requires epyc 1000 "naples")
SEV TeeType = "sev"
// SNP is a known trusted execution environment type: AMD-SNP (SEV secure nested pages) (requires epyc 3000 "milan")
SNP TeeType = "snp"
)
// TeeType is a supported trusted execution environment type.
type TeeType string
var (
// DefaultCapabilities is the list of capabilities which we grant by
// default to containers which are running under UID 0.
@ -105,6 +113,23 @@ type BuildOutputOption struct {
IsStdout bool
}
// ConfidentialWorkloadOptions encapsulates options which control whether or not
// we output an image whose rootfs contains a LUKS-compatibly-encrypted disk image
// instead of the usual rootfs contents.
type ConfidentialWorkloadOptions struct {
Convert bool
AttestationURL string
CPUs int
Memory int
TempDir string
TeeType TeeType
IgnoreAttestationErrors bool
WorkloadID string
DiskEncryptionPassphrase string
Slop string
FirmwareLibrary string
}
// TempDirForURL checks if the passed-in string looks like a URL or -. If it is,
// TempDirForURL creates a temporary directory, arranges for its contents to be
// the contents of that URL, and returns the temporary directory's path, along

View File

@ -16,6 +16,7 @@ import (
"github.com/containers/buildah/copier"
"github.com/containers/buildah/define"
"github.com/containers/buildah/docker"
"github.com/containers/buildah/internal/mkcw"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/manifest"
@ -69,6 +70,7 @@ type containerImageRef struct {
annotations map[string]string
preferredManifestType string
squash bool
confidentialWorkload ConfidentialWorkloadOptions
omitHistory bool
emptyLayer bool
idMappingOptions *define.IDMappingOptions
@ -158,6 +160,52 @@ func computeLayerMIMEType(what string, layerCompression archive.Compression) (om
return omediaType, dmediaType, nil
}
// Extract the container's whole filesystem as a filesystem image, wrapped
// in LUKS-compatible encryption.
func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWorkloadOptions) (io.ReadCloser, error) {
var image v1.Image
if err := json.Unmarshal(i.oconfig, &image); err != nil {
return nil, fmt.Errorf("recreating OCI configuration for %q: %w", i.containerID, err)
}
mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
if err != nil {
return nil, fmt.Errorf("mounting container %q: %w", i.containerID, err)
}
archiveOptions := mkcw.ArchiveOptions{
AttestationURL: options.AttestationURL,
CPUs: options.CPUs,
Memory: options.Memory,
TempDir: options.TempDir,
TeeType: options.TeeType,
IgnoreAttestationErrors: options.IgnoreAttestationErrors,
WorkloadID: options.WorkloadID,
DiskEncryptionPassphrase: options.DiskEncryptionPassphrase,
Slop: options.Slop,
FirmwareLibrary: options.FirmwareLibrary,
}
rc, _, err := mkcw.Archive(mountPoint, &image, archiveOptions)
if err != nil {
if _, err2 := i.store.Unmount(i.containerID, false); err2 != nil {
logrus.Debugf("unmounting container %q: %v", i.containerID, err2)
}
return nil, fmt.Errorf("converting rootfs %q: %w", i.containerID, err)
}
return ioutils.NewReadCloserWrapper(rc, func() error {
if err = rc.Close(); err != nil {
err = fmt.Errorf("closing tar archive of container %q: %w", i.containerID, err)
}
if _, err2 := i.store.Unmount(i.containerID, false); err == nil {
if err2 != nil {
err2 = fmt.Errorf("unmounting container %q: %w", i.containerID, err2)
}
err = err2
} else {
logrus.Debugf("unmounting container %q: %v", i.containerID, err2)
}
return err
}), nil
}
// Extract the container's whole filesystem as if it were a single layer.
// Takes ExtractRootfsOptions as argument which allows caller to configure
// preserve nature of setuid,setgid,sticky and extended attributes
@ -221,7 +269,7 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest,
oimage.RootFS.DiffIDs = []digest.Digest{}
// Only clear the history if we're squashing, otherwise leave it be so that we can append
// entries to it.
if i.squash || i.omitHistory {
if i.confidentialWorkload.Convert || i.squash || i.omitHistory {
oimage.History = []v1.History{}
}
@ -237,6 +285,24 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest,
}
// Always replace this value, since we're newer than our base image.
dimage.Created = created
// If we're producing a confidential workload, override the command and
// assorted other settings that aren't expected to work correctly.
if i.confidentialWorkload.Convert {
dimage.Config.Entrypoint = []string{"/entrypoint"}
oimage.Config.Entrypoint = []string{"/entrypoint"}
dimage.Config.Cmd = nil
oimage.Config.Cmd = nil
dimage.Config.User = ""
oimage.Config.User = ""
dimage.Config.WorkingDir = ""
oimage.Config.WorkingDir = ""
dimage.Config.Healthcheck = nil
dimage.Config.Shell = nil
dimage.Config.Volumes = nil
oimage.Config.Volumes = nil
dimage.Config.ExposedPorts = nil
oimage.Config.ExposedPorts = nil
}
// Clear the list of diffIDs, since we always repopulate it.
dimage.RootFS = &docker.V2S2RootFS{}
dimage.RootFS.Type = docker.TypeLayers
@ -244,7 +310,7 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest,
// Only clear the history if we're squashing, otherwise leave it be so
// that we can append entries to it. Clear the parent, too, we no
// longer include its layers and history.
if i.squash || i.omitHistory {
if i.confidentialWorkload.Convert || i.squash || i.omitHistory {
dimage.Parent = ""
dimage.History = []docker.V2S2History{}
}
@ -296,7 +362,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
for layer != nil {
layers = append(append([]string{}, layerID), layers...)
layerID = layer.Parent
if layerID == "" || i.squash {
if layerID == "" || i.confidentialWorkload.Convert || i.squash {
err = nil
break
}
@ -333,7 +399,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
blobLayers := make(map[digest.Digest]blobLayerInfo)
for _, layerID := range layers {
what := fmt.Sprintf("layer %q", layerID)
if i.squash {
if i.confidentialWorkload.Convert || i.squash {
what = fmt.Sprintf("container %q", i.containerID)
}
// The default layer media type assumes no compression.
@ -351,7 +417,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
// If we already know the digest of the contents of parent
// layers, reuse their blobsums, diff IDs, and sizes.
if !i.squash && layerID != i.layerID && layer.UncompressedDigest != "" {
if !i.confidentialWorkload.Convert && !i.squash && layerID != i.layerID && layer.UncompressedDigest != "" {
layerBlobSum := layer.UncompressedDigest
layerBlobSize := layer.UncompressedSize
diffID := layer.UncompressedDigest
@ -389,7 +455,13 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
var rc io.ReadCloser
var errChan chan error
if i.squash {
if i.confidentialWorkload.Convert {
// Convert the root filesystem into an encrypted disk image.
rc, err = i.extractConfidentialWorkloadFS(i.confidentialWorkload)
if err != nil {
return nil, err
}
} else if i.squash {
// Extract the root filesystem as a single layer.
rc, errChan, err = i.extractRootfs(ExtractRootfsOptions{})
if err != nil {
@ -842,6 +914,7 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
annotations: b.Annotations(),
preferredManifestType: manifestType,
squash: options.Squash,
confidentialWorkload: options.ConfidentialWorkloadOptions,
omitHistory: options.OmitHistory,
emptyLayer: options.EmptyLayer && !options.Squash,
idMappingOptions: &b.IDMappingOptions,

View File

@ -100,6 +100,7 @@ type Executor struct {
iidfile string
squash bool
labels []string
layerLabels []string
annotations []string
layers bool
noHosts bool
@ -115,6 +116,7 @@ type Executor struct {
groupAdd []string
ignoreFile string
args map[string]string
globalArgs map[string]string
unusedArgs map[string]struct{}
capabilities []string
devices define.ContainerDevices
@ -146,6 +148,7 @@ type Executor struct {
osVersion string
osFeatures []string
envs []string
confidentialWorkload define.ConfidentialWorkloadOptions
}
type imageTypeAndHistoryAndDiffIDs struct {
@ -263,6 +266,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
iidfile: options.IIDFile,
squash: options.Squash,
labels: append([]string{}, options.Labels...),
layerLabels: append([]string{}, options.LayerLabels...),
annotations: append([]string{}, options.Annotations...),
layers: options.Layers,
noHosts: options.CommonBuildOpts.NoHosts,
@ -300,6 +304,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
osVersion: options.OSVersion,
osFeatures: append([]string{}, options.OSFeatures...),
envs: append([]string{}, options.Envs...),
confidentialWorkload: options.ConfidentialWorkload,
}
if exec.err == nil {
exec.err = os.Stderr
@ -313,6 +318,11 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
exec.unusedArgs[arg] = struct{}{}
}
}
// Use this flag to collect all args declared before
// first stage and treat them as global args which is
// accessible to all stages.
foundFirstStage := false
globalArgs := make(map[string]string)
for _, line := range mainNode.Children {
node := line
for node != nil { // tokens on this line, though we only care about the first
@ -324,12 +334,20 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
// and value, or just an argument, since they can be
// separated by either "=" or whitespace.
list := strings.SplitN(arg.Value, "=", 2)
if !foundFirstStage {
if len(list) > 1 {
globalArgs[list[0]] = list[1]
}
}
delete(exec.unusedArgs, list[0])
}
case "FROM":
foundFirstStage = true
}
break
}
}
exec.globalArgs = globalArgs
return &exec, nil
}
@ -360,15 +378,11 @@ func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, e
if imageRef, err := alltransports.ParseImageName(output); err == nil {
return imageRef, nil
}
runtime, err := libimage.RuntimeFromStore(b.store, &libimage.RuntimeOptions{SystemContext: b.systemContext})
resolved, err := libimage.NormalizeName(output)
if err != nil {
return nil, err
}
resolved, err := runtime.ResolveName(output)
if err != nil {
return nil, err
}
imageRef, err := storageTransport.Transport.ParseStoreReference(b.store, resolved)
imageRef, err := storageTransport.Transport.ParseStoreReference(b.store, resolved.String())
if err == nil {
return imageRef, nil
}
@ -623,6 +637,9 @@ func (b *Executor) warnOnUnsetBuildArgs(stages imagebuilder.Stages, dependencyMa
if _, isBuiltIn := builtinAllowedBuildArgs[argName]; isBuiltIn {
shouldWarn = false
}
if _, isGlobalArg := b.globalArgs[argName]; isGlobalArg {
shouldWarn = false
}
if shouldWarn {
b.logger.Warnf("missing %q build argument. Try adding %q to the command line", argName, fmt.Sprintf("--build-arg %s=<VALUE>", argName))
}

View File

@ -18,6 +18,7 @@ import (
"github.com/containers/buildah/define"
buildahdocker "github.com/containers/buildah/docker"
"github.com/containers/buildah/internal"
"github.com/containers/buildah/internal/tmpdir"
internalUtil "github.com/containers/buildah/internal/util"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/pkg/rusage"
@ -401,7 +402,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
// additional context contains a tar file
// so download and explode tar to buildah
// temp and point context to that.
path, subdir, err := define.TempDirForURL(internalUtil.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value)
path, subdir, err := define.TempDirForURL(tmpdir.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value)
if err != nil {
return fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err)
}
@ -537,7 +538,7 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
// additional context contains a tar file
// so download and explode tar to buildah
// temp and point context to that.
path, subdir, err := define.TempDirForURL(internalUtil.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value)
path, subdir, err := define.TempDirForURL(tmpdir.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value)
if err != nil {
return nil, fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err)
}
@ -1032,7 +1033,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// squash the contents of the base image. Whichever is
// the case, we need to commit() to create a new image.
logCommit(s.output, -1)
if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), false, s.output, s.executor.squash); err != nil {
if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), false, s.output, s.executor.squash, lastStage); err != nil {
return "", nil, fmt.Errorf("committing base container: %w", err)
}
// Generate build output if needed.
@ -1045,7 +1046,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// The image would be modified by the labels passed
// via the command line, so we need to commit.
logCommit(s.output, -1)
if imgID, ref, err = s.commit(ctx, s.getCreatedBy(stage.Node, ""), true, s.output, s.executor.squash); err != nil {
if imgID, ref, err = s.commit(ctx, s.getCreatedBy(stage.Node, ""), true, s.output, s.executor.squash, lastStage); err != nil {
return "", nil, err
}
// Generate build output if needed.
@ -1193,7 +1194,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// stage.
if lastStage || imageIsUsedLater {
logCommit(s.output, i)
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output, s.executor.squash)
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output, s.executor.squash, lastStage && lastInstruction)
if err != nil {
return "", nil, fmt.Errorf("committing container for step %+v: %w", *step, err)
}
@ -1420,7 +1421,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// because at this point we want to save history for
// layers even if its a squashed build so that they
// can be part of build-cache.
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, false)
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, false, lastStage && lastInstruction)
if err != nil {
return "", nil, fmt.Errorf("committing container for step %+v: %w", *step, err)
}
@ -1454,7 +1455,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// Create a squashed version of this image
// if we're supposed to create one and this
// is the last instruction of the last stage.
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, true)
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, true, lastStage && lastInstruction)
if err != nil {
return "", nil, fmt.Errorf("committing final squash step %+v: %w", *step, err)
}
@ -1941,7 +1942,7 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p
// commit writes the container's contents to an image, using a passed-in tag as
// the name if there is one, generating a unique ID-based one otherwise.
// or commit via any custom exporter if specified.
func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer bool, output string, squash bool) (string, reference.Canonical, error) {
func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer bool, output string, squash, finalInstruction bool) (string, reference.Canonical, error) {
ib := s.stage.Builder
var imageRef types.ImageReference
if output != "" {
@ -2016,6 +2017,19 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
}
s.builder.ClearLabels()
if output == "" {
// If output is not set then we are committing
// an intermediate image, in such case we must
// honor layer labels if they are configured.
for _, labelString := range s.executor.layerLabels {
label := strings.SplitN(labelString, "=", 2)
if len(label) > 1 {
s.builder.SetLabel(label[0], label[1])
} else {
s.builder.SetLabel(label[0], "")
}
}
}
for k, v := range config.Labels {
s.builder.SetLabel(k, v)
}
@ -2056,6 +2070,9 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
HistoryTimestamp: s.executor.timestamp,
Manifest: s.executor.manifest,
}
if finalInstruction {
options.ConfidentialWorkloadOptions = s.executor.confidentialWorkload
}
imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options)
if err != nil {
return "", nil, err

View File

@ -107,6 +107,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
GIDMap: gidmap,
},
NetworkInterface: netInt,
CommonBuildOpts: &CommonBuildOptions{},
}
if err := builder.initConfig(ctx, image, systemContext); err != nil {

View File

@ -23,11 +23,11 @@ sudo yum -y install buildah
#### [Debian](https://debian.org)
The buildah package is available in
the [Bullseye](https://packages.debian.org/bullseye/buildah), which
is the current stable release (Debian 11), as well as Debian Unstable/Sid.
the [Bookworm](https://packages.debian.org/bookworm/buildah), which
is the current stable release (Debian 12), as well as Debian Unstable/Sid.
```bash
# Debian Stable/Bullseye or Unstable/Sid
# Debian Stable/Bookworm or Unstable/Sid
sudo apt-get update
sudo apt-get -y install buildah
```

View File

@ -0,0 +1,464 @@
package mkcw
import (
"archive/tar"
"bytes"
"compress/gzip"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/containers/luksy"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/go-units"
digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
const minimumImageSize = 10 * 1024 * 1024
// ArchiveOptions includes optional settings for generating an archive.
type ArchiveOptions struct {
// If supplied, we'll register the workload with this server.
// Practically necessary if DiskEncryptionPassphrase is not set, in
// which case we'll generate one and throw it away after.
AttestationURL string
// Used to measure the environment. If left unset (0, ""), defaults will be applied.
CPUs int
Memory int
// Can be manually set. If left unset ("", false, nil), reasonable values will be used.
TempDir string
TeeType TeeType
IgnoreAttestationErrors bool
ImageSize int64
WorkloadID string
Slop string
DiskEncryptionPassphrase string
FirmwareLibrary string
Logger *logrus.Logger
}
type chainRetrievalError struct {
stderr string
err error
}
func (c chainRetrievalError) Error() string {
if trimmed := strings.TrimSpace(c.stderr); trimmed != "" {
return fmt.Sprintf("retrieving SEV certificate chain: sevctl: %v: %v", strings.TrimSpace(c.stderr), c.err)
}
return fmt.Sprintf("retrieving SEV certificate chain: sevctl: %v", c.err)
}
// Archive generates a WorkloadConfig for a specified directory and produces a
// tar archive of a container image's rootfs with the expected contents.
// The input directory will have a ".krun_config.json" file added to it while
// this function is running, but it will be removed on completion.
func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadCloser, WorkloadConfig, error) {
const (
teeDefaultCPUs = 2
teeDefaultMemory = 512
teeDefaultFilesystem = "ext4"
teeDefaultTeeType = SNP
)
if path == "" {
return nil, WorkloadConfig{}, fmt.Errorf("required path not specified")
}
logger := options.Logger
if logger == nil {
logger = logrus.StandardLogger()
}
teeType := options.TeeType
if teeType == "" {
teeType = teeDefaultTeeType
}
cpus := options.CPUs
if cpus == 0 {
cpus = teeDefaultCPUs
}
memory := options.Memory
if memory == 0 {
memory = teeDefaultMemory
}
filesystem := teeDefaultFilesystem
workloadID := options.WorkloadID
if workloadID == "" {
digestInput := path + filesystem + time.Now().String()
workloadID = digest.Canonical.FromString(digestInput).Encoded()
}
workloadConfig := WorkloadConfig{
Type: teeType,
WorkloadID: workloadID,
CPUs: cpus,
Memory: memory,
AttestationURL: options.AttestationURL,
}
// Do things which are specific to the type of TEE we're building for.
var chainBytes []byte
var chainBytesFile string
var chainInfo fs.FileInfo
switch teeType {
default:
return nil, WorkloadConfig{}, fmt.Errorf("don't know how to generate TeeData for TEE type %q", teeType)
case SEV, SEV_NO_ES:
// If we need a certificate chain, get it.
chain, err := os.CreateTemp(options.TempDir, "chain")
if err != nil {
return nil, WorkloadConfig{}, err
}
chain.Close()
defer func() {
if err := os.Remove(chain.Name()); err != nil {
logger.Warnf("error removing temporary file %q: %v", chain.Name(), err)
}
}()
logrus.Debugf("sevctl export -f %s", chain.Name())
cmd := exec.Command("sevctl", "export", "-f", chain.Name())
var stdout, stderr bytes.Buffer
cmd.Stdout, cmd.Stderr = &stdout, &stderr
if err := cmd.Run(); err != nil {
if !options.IgnoreAttestationErrors {
return nil, WorkloadConfig{}, chainRetrievalError{stderr.String(), err}
}
logger.Warn(chainRetrievalError{stderr.String(), err}.Error())
}
if chainBytes, err = os.ReadFile(chain.Name()); err != nil {
chainBytes = []byte{}
}
var teeData SevWorkloadData
if len(chainBytes) > 0 {
chainBytesFile = "sev.chain"
chainInfo, err = os.Stat(chain.Name())
if err != nil {
return nil, WorkloadConfig{}, err
}
teeData.VendorChain = "/" + chainBytesFile
}
encodedTeeData, err := json.Marshal(teeData)
if err != nil {
return nil, WorkloadConfig{}, fmt.Errorf("encoding tee data: %w", err)
}
workloadConfig.TeeData = string(encodedTeeData)
case SNP:
teeData := SnpWorkloadData{
Generation: "milan",
}
encodedTeeData, err := json.Marshal(teeData)
if err != nil {
return nil, WorkloadConfig{}, fmt.Errorf("encoding tee data: %w", err)
}
workloadConfig.TeeData = string(encodedTeeData)
}
// Write part of the config blob where the krun init process will be
// looking for it. The oci2cw tool used `buildah inspect` output, but
// init is just looking for fields that have the right names in any
// object, and the image's config will have that, so let's try encoding
// it directly.
krunConfigPath := filepath.Join(path, ".krun_config.json")
krunConfigBytes, err := json.Marshal(ociConfig)
if err != nil {
return nil, WorkloadConfig{}, fmt.Errorf("creating .krun_config from image configuration: %w", err)
}
if err := ioutils.AtomicWriteFile(krunConfigPath, krunConfigBytes, 0o600); err != nil {
return nil, WorkloadConfig{}, fmt.Errorf("saving krun config: %w", err)
}
defer func() {
if err := os.Remove(krunConfigPath); err != nil {
logger.Warnf("removing krun configuration file: %v", err)
}
}()
// Encode the workload config, in case it fails for any reason.
cleanedUpWorkloadConfig := workloadConfig
switch cleanedUpWorkloadConfig.Type {
default:
return nil, WorkloadConfig{}, fmt.Errorf("don't know how to canonicalize TEE type %q", cleanedUpWorkloadConfig.Type)
case SEV, SEV_NO_ES:
cleanedUpWorkloadConfig.Type = SEV
case SNP:
cleanedUpWorkloadConfig.Type = SNP
}
workloadConfigBytes, err := json.Marshal(cleanedUpWorkloadConfig)
if err != nil {
return nil, WorkloadConfig{}, err
}
// Make sure we have the passphrase to use for encrypting the disk image.
diskEncryptionPassphrase := options.DiskEncryptionPassphrase
if diskEncryptionPassphrase == "" {
diskEncryptionPassphrase, err = GenerateDiskEncryptionPassphrase()
if err != nil {
return nil, WorkloadConfig{}, err
}
}
// If we weren't told how big the image should be, get a rough estimate
// of the input data size, then add a hedge to it.
imageSize := slop(options.ImageSize, options.Slop)
if imageSize == 0 {
var sourceSize int64
if err := filepath.WalkDir(path, func(path string, d fs.DirEntry, err error) error {
if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) {
return err
}
info, err := d.Info()
if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) {
return err
}
sourceSize += info.Size()
return nil
}); err != nil {
return nil, WorkloadConfig{}, err
}
imageSize = slop(sourceSize, options.Slop)
}
if imageSize%4096 != 0 {
imageSize += (4096 - (imageSize % 4096))
}
if imageSize < minimumImageSize {
imageSize = minimumImageSize
}
// Create a file to use as the unencrypted version of the disk image.
plain, err := os.CreateTemp(options.TempDir, "plain.img")
if err != nil {
return nil, WorkloadConfig{}, err
}
removePlain := true
defer func() {
if removePlain {
if err := os.Remove(plain.Name()); err != nil {
logger.Warnf("removing temporary file %q: %v", plain.Name(), err)
}
}
}()
// Lengthen the plaintext disk image file.
if err := plain.Truncate(imageSize); err != nil {
plain.Close()
return nil, WorkloadConfig{}, err
}
plainInfo, err := plain.Stat()
plain.Close()
if err != nil {
return nil, WorkloadConfig{}, err
}
// Format the disk image with the filesystem contents.
if _, stderr, err := MakeFS(path, plain.Name(), filesystem); err != nil {
if strings.TrimSpace(stderr) != "" {
return nil, WorkloadConfig{}, fmt.Errorf("%s: %w", strings.TrimSpace(stderr), err)
}
return nil, WorkloadConfig{}, err
}
// If we're registering the workload, we can do that now.
if workloadConfig.AttestationURL != "" {
if err := SendRegistrationRequest(workloadConfig, diskEncryptionPassphrase, options.FirmwareLibrary, options.IgnoreAttestationErrors, logger); err != nil {
return nil, WorkloadConfig{}, err
}
}
// Try to encrypt on the fly.
pipeReader, pipeWriter := io.Pipe()
removePlain = false
go func() {
var err error
defer func() {
if err := os.Remove(plain.Name()); err != nil {
logger.Warnf("removing temporary file %q: %v", plain.Name(), err)
}
if err != nil {
pipeWriter.CloseWithError(err)
} else {
pipeWriter.Close()
}
}()
plain, err := os.Open(plain.Name())
if err != nil {
logrus.Errorf("opening unencrypted disk image %q: %v", plain.Name(), err)
return
}
defer plain.Close()
tw := tar.NewWriter(pipeWriter)
defer tw.Flush()
// Write /entrypoint
var decompressedEntrypoint bytes.Buffer
decompressor, err := gzip.NewReader(bytes.NewReader(entrypointCompressedBytes))
if err != nil {
logrus.Errorf("decompressing copy of entrypoint: %v", err)
return
}
defer decompressor.Close()
if _, err = io.Copy(&decompressedEntrypoint, decompressor); err != nil {
logrus.Errorf("decompressing copy of entrypoint: %v", err)
return
}
entrypointHeader, err := tar.FileInfoHeader(plainInfo, "")
if err != nil {
logrus.Errorf("building header for entrypoint: %v", err)
return
}
entrypointHeader.Name = "entrypoint"
entrypointHeader.Mode = 0o755
entrypointHeader.Uname, entrypointHeader.Gname = "", ""
entrypointHeader.Uid, entrypointHeader.Gid = 0, 0
entrypointHeader.Size = int64(decompressedEntrypoint.Len())
if err = tw.WriteHeader(entrypointHeader); err != nil {
logrus.Errorf("writing header for %q: %v", entrypointHeader.Name, err)
return
}
if _, err = io.Copy(tw, &decompressedEntrypoint); err != nil {
logrus.Errorf("writing %q: %v", entrypointHeader.Name, err)
return
}
// Write /sev.chain
if chainInfo != nil {
chainHeader, err := tar.FileInfoHeader(chainInfo, "")
if err != nil {
logrus.Errorf("building header for %q: %v", chainInfo.Name(), err)
return
}
chainHeader.Name = chainBytesFile
chainHeader.Mode = 0o600
chainHeader.Uname, chainHeader.Gname = "", ""
chainHeader.Uid, chainHeader.Gid = 0, 0
chainHeader.Size = int64(len(chainBytes))
if err = tw.WriteHeader(chainHeader); err != nil {
logrus.Errorf("writing header for %q: %v", chainHeader.Name, err)
return
}
if _, err = tw.Write(chainBytes); err != nil {
logrus.Errorf("writing %q: %v", chainHeader.Name, err)
return
}
}
// Write /krun-sev.json.
workloadConfigHeader, err := tar.FileInfoHeader(plainInfo, "")
if err != nil {
logrus.Errorf("building header for %q: %v", plainInfo.Name(), err)
return
}
workloadConfigHeader.Name = "krun-sev.json"
workloadConfigHeader.Mode = 0o600
workloadConfigHeader.Uname, workloadConfigHeader.Gname = "", ""
workloadConfigHeader.Uid, workloadConfigHeader.Gid = 0, 0
workloadConfigHeader.Size = int64(len(workloadConfigBytes))
if err = tw.WriteHeader(workloadConfigHeader); err != nil {
logrus.Errorf("writing header for %q: %v", workloadConfigHeader.Name, err)
return
}
if _, err = tw.Write(workloadConfigBytes); err != nil {
logrus.Errorf("writing %q: %v", workloadConfigHeader.Name, err)
return
}
// Write /tmp.
tmpHeader, err := tar.FileInfoHeader(plainInfo, "")
if err != nil {
logrus.Errorf("building header for %q: %v", plainInfo.Name(), err)
return
}
tmpHeader.Name = "tmp/"
tmpHeader.Typeflag = tar.TypeDir
tmpHeader.Mode = 0o1777
tmpHeader.Uname, workloadConfigHeader.Gname = "", ""
tmpHeader.Uid, workloadConfigHeader.Gid = 0, 0
tmpHeader.Size = 0
if err = tw.WriteHeader(tmpHeader); err != nil {
logrus.Errorf("writing header for %q: %v", tmpHeader.Name, err)
return
}
// Now figure out the footer that we'll append to the encrypted disk.
var footer bytes.Buffer
lengthBuffer := make([]byte, 8)
footer.Write(workloadConfigBytes)
footer.WriteString("KRUN")
binary.LittleEndian.PutUint64(lengthBuffer, uint64(len(workloadConfigBytes)))
footer.Write(lengthBuffer)
// Start encrypting and write /disk.img.
header, encrypt, blockSize, err := luksy.EncryptV1([]string{diskEncryptionPassphrase}, "")
paddingBoundary := int64(4096)
paddingNeeded := (paddingBoundary - ((int64(len(header)) + imageSize + int64(footer.Len())) % paddingBoundary)) % paddingBoundary
diskHeader := workloadConfigHeader
diskHeader.Name = "disk.img"
diskHeader.Mode = 0o600
diskHeader.Size = int64(len(header)) + imageSize + paddingNeeded + int64(footer.Len())
if err = tw.WriteHeader(diskHeader); err != nil {
logrus.Errorf("writing archive header for disk.img: %v", err)
return
}
if _, err = io.Copy(tw, bytes.NewReader(header)); err != nil {
logrus.Errorf("writing encryption header for disk.img: %v", err)
return
}
encryptWrapper := luksy.EncryptWriter(encrypt, tw, blockSize)
if _, err = io.Copy(encryptWrapper, plain); err != nil {
logrus.Errorf("encrypting disk.img: %v", err)
return
}
encryptWrapper.Close()
if _, err = tw.Write(make([]byte, paddingNeeded)); err != nil {
logrus.Errorf("writing padding for disk.img: %v", err)
return
}
if _, err = io.Copy(tw, &footer); err != nil {
logrus.Errorf("writing footer for disk.img: %v", err)
return
}
tw.Close()
}()
return pipeReader, workloadConfig, nil
}
func slop(size int64, slop string) int64 {
if slop == "" {
return size * 5 / 4
}
for _, factor := range strings.Split(slop, "+") {
factor = strings.TrimSpace(factor)
if factor == "" {
continue
}
if strings.HasSuffix(factor, "%") {
percentage := strings.TrimSuffix(factor, "%")
percent, err := strconv.ParseInt(percentage, 10, 8)
if err != nil {
logrus.Warnf("parsing percentage %q: %v", factor, err)
} else {
size *= (percent + 100)
size /= 100
}
} else {
more, err := units.RAMInBytes(factor)
if err != nil {
logrus.Warnf("parsing %q as a size: %v", factor, err)
} else {
size += more
}
}
}
return size
}

View File

@ -0,0 +1,250 @@
package mkcw
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"github.com/containers/buildah/internal/mkcw/types"
"github.com/sirupsen/logrus"
)
type (
RegistrationRequest = types.RegistrationRequest
TeeConfig = types.TeeConfig
TeeConfigFlags = types.TeeConfigFlags
TeeConfigMinFW = types.TeeConfigMinFW
)
type measurementError struct {
err error
}
func (m measurementError) Error() string {
return fmt.Sprintf("generating measurement for attestation: %v", m.err)
}
type attestationError struct {
err error
}
func (a attestationError) Error() string {
return fmt.Sprintf("registering workload: %v", a.err)
}
type httpError struct {
statusCode int
}
func (h httpError) Error() string {
if statusText := http.StatusText(h.statusCode); statusText != "" {
return fmt.Sprintf("received server status %d (%q)", h.statusCode, statusText)
}
return fmt.Sprintf("received server status %d", h.statusCode)
}
// SendRegistrationRequest registers a workload with the specified decryption
// passphrase with the service whose location is part of the WorkloadConfig.
func SendRegistrationRequest(workloadConfig WorkloadConfig, diskEncryptionPassphrase, firmwareLibrary string, ignoreAttestationErrors bool, logger *logrus.Logger) error {
if workloadConfig.AttestationURL == "" {
return errors.New("attestation URL not provided")
}
// Measure the execution environment.
measurement, err := GenerateMeasurement(workloadConfig, firmwareLibrary)
if err != nil {
if !ignoreAttestationErrors {
return &measurementError{err}
}
logger.Warnf("generating measurement for attestation: %v", err)
}
// Build the workload registration (attestation) request body.
var teeConfigBytes []byte
switch workloadConfig.Type {
case SEV, SEV_NO_ES, SNP:
var cbits types.TeeConfigFlagBits
switch workloadConfig.Type {
case SEV:
cbits = types.SEV_CONFIG_NO_DEBUG |
types.SEV_CONFIG_NO_KEY_SHARING |
types.SEV_CONFIG_ENCRYPTED_STATE |
types.SEV_CONFIG_NO_SEND |
types.SEV_CONFIG_DOMAIN |
types.SEV_CONFIG_SEV
case SEV_NO_ES:
cbits = types.SEV_CONFIG_NO_DEBUG |
types.SEV_CONFIG_NO_KEY_SHARING |
types.SEV_CONFIG_NO_SEND |
types.SEV_CONFIG_DOMAIN |
types.SEV_CONFIG_SEV
case SNP:
cbits = types.SNP_CONFIG_SMT |
types.SNP_CONFIG_MANDATORY |
types.SNP_CONFIG_MIGRATE_MA |
types.SNP_CONFIG_DEBUG
default:
panic("internal error") // shouldn't happen
}
teeConfig := TeeConfig{
Flags: TeeConfigFlags{
Bits: cbits,
},
MinFW: TeeConfigMinFW{
Major: 0,
Minor: 0,
},
}
teeConfigBytes, err = json.Marshal(teeConfig)
if err != nil {
return err
}
default:
return fmt.Errorf("don't know how to generate tee_config for %q TEEs", workloadConfig.Type)
}
registrationRequest := RegistrationRequest{
WorkloadID: workloadConfig.WorkloadID,
LaunchMeasurement: measurement,
TeeConfig: string(teeConfigBytes),
Passphrase: diskEncryptionPassphrase,
}
registrationRequestBytes, err := json.Marshal(registrationRequest)
if err != nil {
return err
}
// Register the workload.
parsedURL, err := url.Parse(workloadConfig.AttestationURL)
if err != nil {
return err
}
parsedURL.Path = path.Join(parsedURL.Path, "/kbs/v0/register_workload")
if err != nil {
return err
}
url := parsedURL.String()
requestContentType := "application/json"
requestBody := bytes.NewReader(registrationRequestBytes)
defer http.DefaultClient.CloseIdleConnections()
resp, err := http.Post(url, requestContentType, requestBody)
if resp != nil {
if resp.Body != nil {
resp.Body.Close()
}
switch resp.StatusCode {
default:
if !ignoreAttestationErrors {
return &attestationError{&httpError{resp.StatusCode}}
}
logger.Warn(attestationError{&httpError{resp.StatusCode}}.Error())
case http.StatusOK, http.StatusAccepted:
// great!
}
}
if err != nil {
if !ignoreAttestationErrors {
return &attestationError{err}
}
logger.Warn(attestationError{err}.Error())
}
return nil
}
// GenerateMeasurement generates the runtime measurement using the CPU count,
// memory size, and the firmware shared library, whatever it's called, wherever
// it is.
// If firmwareLibrary is a path, it will be the only one checked.
// If firmwareLibrary is a filename, it will be checked for in a hard-coded set
// of directories.
// If firmwareLibrary is empty, both the filename and the directory it is in
// will be taken from a hard-coded set of candidates.
func GenerateMeasurement(workloadConfig WorkloadConfig, firmwareLibrary string) (string, error) {
cpuString := fmt.Sprintf("%d", workloadConfig.CPUs)
memoryString := fmt.Sprintf("%d", workloadConfig.Memory)
var prefix string
switch workloadConfig.Type {
case SEV:
prefix = "SEV-ES"
case SEV_NO_ES:
prefix = "SEV"
case SNP:
prefix = "SNP"
default:
return "", fmt.Errorf("don't know which measurement to use for TEE type %q", workloadConfig.Type)
}
sharedLibraryDirs := []string{
"/usr/local/lib64",
"/usr/local/lib",
"/lib64",
"/lib",
"/usr/lib64",
"/usr/lib",
}
if llp, ok := os.LookupEnv("LD_LIBRARY_PATH"); ok {
sharedLibraryDirs = append(sharedLibraryDirs, strings.Split(llp, ":")...)
}
libkrunfwNames := []string{
"libkrunfw-sev.so.4",
"libkrunfw-sev.so.3",
"libkrunfw-sev.so",
}
var pathsToCheck []string
if firmwareLibrary == "" {
for _, sharedLibraryDir := range sharedLibraryDirs {
if sharedLibraryDir == "" {
continue
}
for _, libkrunfw := range libkrunfwNames {
candidate := filepath.Join(sharedLibraryDir, libkrunfw)
pathsToCheck = append(pathsToCheck, candidate)
}
}
} else {
if filepath.IsAbs(firmwareLibrary) {
pathsToCheck = append(pathsToCheck, firmwareLibrary)
} else {
for _, sharedLibraryDir := range sharedLibraryDirs {
if sharedLibraryDir == "" {
continue
}
candidate := filepath.Join(sharedLibraryDir, firmwareLibrary)
pathsToCheck = append(pathsToCheck, candidate)
}
}
}
for _, candidate := range pathsToCheck {
if _, err := os.Lstat(candidate); err == nil {
var stdout, stderr bytes.Buffer
logrus.Debugf("krunfw_measurement -c %s -m %s %s", cpuString, memoryString, candidate)
cmd := exec.Command("krunfw_measurement", "-c", cpuString, "-m", memoryString, candidate)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
if stderr.Len() > 0 {
err = fmt.Errorf("krunfw_measurement: %s: %w", strings.TrimSpace(stderr.String()), err)
}
return "", err
}
scanner := bufio.NewScanner(&stdout)
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, prefix+":") {
return strings.TrimSpace(strings.TrimPrefix(line, prefix+":")), nil
}
}
return "", fmt.Errorf("generating measurement: no line starting with %q found in output from krunfw_measurement", prefix+":")
}
}
return "", fmt.Errorf("generating measurement: none of %v found: %w", pathsToCheck, os.ErrNotExist)
}

Binary file not shown.

View File

@ -0,0 +1,6 @@
package mkcw
import _ "embed"
//go:embed "embed/entrypoint.gz"
var entrypointCompressedBytes []byte

View File

@ -0,0 +1,51 @@
package mkcw
import (
"crypto/rand"
"encoding/hex"
"fmt"
"os"
"github.com/containers/luksy"
)
// CheckLUKSPassphrase checks that the specified LUKS-encrypted file can be
// decrypted using the specified passphrase.
func CheckLUKSPassphrase(path, decryptionPassphrase string) error {
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
v1header, v2headerA, v2headerB, v2json, err := luksy.ReadHeaders(f, luksy.ReadHeaderOptions{})
if err != nil {
return err
}
if v1header != nil {
_, _, _, _, err = v1header.Decrypt(decryptionPassphrase, f)
return err
}
if v2headerA == nil && v2headerB == nil {
return fmt.Errorf("no LUKS headers read from %q", path)
}
if v2headerA != nil {
if _, _, _, _, err = v2headerA.Decrypt(decryptionPassphrase, f, *v2json); err != nil {
return err
}
}
if v2headerB != nil {
if _, _, _, _, err = v2headerB.Decrypt(decryptionPassphrase, f, *v2json); err != nil {
return err
}
}
return nil
}
// GenerateDiskEncryptionPassphrase generates a random disk encryption password
func GenerateDiskEncryptionPassphrase() (string, error) {
randomizedBytes := make([]byte, 32)
if _, err := rand.Read(randomizedBytes); err != nil {
return "", err
}
return hex.EncodeToString(randomizedBytes), nil
}

View File

@ -0,0 +1,38 @@
package mkcw
import (
"fmt"
"os/exec"
"strings"
"github.com/sirupsen/logrus"
)
// MakeFS formats the imageFile as a filesystem of the specified type,
// populating it with the contents of the directory at sourcePath.
// Recognized filesystem types are "ext2", "ext3", "ext4", and "btrfs".
// Note that krun's init is currently hard-wired to assume "ext4".
// Returns the stdout, stderr, and any error returned by the mkfs command.
func MakeFS(sourcePath, imageFile, filesystem string) (string, string, error) {
var stdout, stderr strings.Builder
// N.B. mkfs.xfs can accept a protofile via its -p option, but the
// protofile format doesn't allow us to supply timestamp information or
// specify that files are hard linked
switch filesystem {
case "ext2", "ext3", "ext4":
logrus.Debugf("mkfs -t %s --rootdir %q %q", filesystem, sourcePath, imageFile)
cmd := exec.Command("mkfs", "-t", filesystem, "-d", sourcePath, imageFile)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
return stdout.String(), stderr.String(), err
case "btrfs":
logrus.Debugf("mkfs -t %s --rootdir %q %q", filesystem, sourcePath, imageFile)
cmd := exec.Command("mkfs", "-t", filesystem, "--rootdir", sourcePath, imageFile)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
return stdout.String(), stderr.String(), err
}
return "", "", fmt.Errorf("don't know how to make a %q filesystem with contents", filesystem)
}

View File

@ -0,0 +1,47 @@
package types
// RegistrationRequest is the body of the request which we use for registering
// this confidential workload with the attestation server.
// https://github.com/virtee/reference-kbs/blob/10b2a4c0f8caf78a077210b172863bbae54f66aa/src/main.rs#L83
type RegistrationRequest struct {
WorkloadID string `json:"workload_id"`
LaunchMeasurement string `json:"launch_measurement"`
Passphrase string `json:"passphrase"`
TeeConfig string `json:"tee_config"` // JSON-encoded teeConfig? or specific to the type of TEE?
}
// TeeConfig contains information about a trusted execution environment.
type TeeConfig struct {
Flags TeeConfigFlags `json:"flags"` // runtime requirement bits
MinFW TeeConfigMinFW `json:"minfw"` // minimum platform firmware version
}
// TeeConfigFlags is a bit field containing policy flags specific to the environment.
// https://github.com/virtee/sev/blob/d3e40917fd8531c69f47c2498e9667fe8a5303aa/src/launch/sev.rs#L172
// https://github.com/virtee/sev/blob/d3e40917fd8531c69f47c2498e9667fe8a5303aa/src/launch/snp.rs#L114
type TeeConfigFlags struct {
Bits TeeConfigFlagBits `json:"bits"`
}
// TeeConfigFlagBits are bits representing run-time expectations.
type TeeConfigFlagBits int
const (
SEV_CONFIG_NO_DEBUG TeeConfigFlagBits = 0b00000001 //revive:disable-line:var-naming no debugging of guests
SEV_CONFIG_NO_KEY_SHARING TeeConfigFlagBits = 0b00000010 //revive:disable-line:var-naming no sharing keys between guests
SEV_CONFIG_ENCRYPTED_STATE TeeConfigFlagBits = 0b00000100 //revive:disable-line:var-naming requires SEV-ES
SEV_CONFIG_NO_SEND TeeConfigFlagBits = 0b00001000 //revive:disable-line:var-naming no transferring the guest to another platform
SEV_CONFIG_DOMAIN TeeConfigFlagBits = 0b00010000 //revive:disable-line:var-naming no transferring the guest out of the domain (?)
SEV_CONFIG_SEV TeeConfigFlagBits = 0b00100000 //revive:disable-line:var-naming no transferring the guest to non-SEV platforms
SNP_CONFIG_SMT TeeConfigFlagBits = 0b00000001 //revive:disable-line:var-naming SMT is enabled on the host machine
SNP_CONFIG_MANDATORY TeeConfigFlagBits = 0b00000010 //revive:disable-line:var-naming reserved bit which should always be set
SNP_CONFIG_MIGRATE_MA TeeConfigFlagBits = 0b00000100 //revive:disable-line:var-naming allowed to use a migration agent
SNP_CONFIG_DEBUG TeeConfigFlagBits = 0b00001000 //revive:disable-line:var-naming allow debugging
)
// TeeConfigFlagMinFW corresponds to a minimum version of the kernel+initrd
// combination that should be booted.
type TeeConfigMinFW struct {
Major int `json:"major"`
Minor int `json:"minor"`
}

View File

@ -0,0 +1,34 @@
package types
import "github.com/containers/buildah/define"
// WorkloadConfig is the data type which is encoded and stored in /krun-sev.json in a container
// image, and included directly in the disk image.
// https://github.com/containers/libkrun/blob/57c59dc5359bdeeb8260b3493e9f63d3708f9ab9/src/vmm/src/resources.rs#L57
type WorkloadConfig struct {
Type define.TeeType `json:"tee"`
TeeData string `json:"tee_data"` // Type == SEV: JSON-encoded SevWorkloadData, SNP: JSON-encoded SnpWorkloadData, others?
WorkloadID string `json:"workload_id"`
CPUs int `json:"cpus"`
Memory int `json:"ram_mib"`
AttestationURL string `json:"attestation_url"`
}
// SevWorkloadData contains the path to the SEV certificate chain and optionally,
// the attestation server's public key(?)
// https://github.com/containers/libkrun/blob/d31747aa92cf83df2abaeb87e2a83311c135d003/src/vmm/src/linux/tee/amdsev.rs#L222
type SevWorkloadData struct {
VendorChain string `json:"vendor_chain"`
AttestationServerPubkey string `json:"attestation_server_pubkey"`
}
// SnpWorkloadData contains the required CPU generation name.
// https://github.com/virtee/oci2cw/blob/1502d5be33c2fa82d49aaa95781bbab2aa932781/examples/tee-config-snp.json
type SnpWorkloadData struct {
Generation string `json:"gen"` // "milan" (naples=1, rome=2, milan=3, genoa/bergamo=4)
}
const (
// SEV_NO_ES is a known trusted execution environment type: AMD-SEV (secure encrypted virtualization without encrypted state, requires epyc 1000 "naples")
SEV_NO_ES define.TeeType = "sev_no_es" //revive:disable-line:var-naming
)

View File

@ -0,0 +1,223 @@
package mkcw
import (
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"github.com/containers/buildah/define"
"github.com/containers/buildah/internal/mkcw/types"
)
type (
// WorkloadConfig is the data type which is encoded and stored in an image.
WorkloadConfig = types.WorkloadConfig
// SevWorkloadData is the type of data in WorkloadConfig.TeeData when the type is SEV.
SevWorkloadData = types.SevWorkloadData
// SnpWorkloadData is the type of data in WorkloadConfig.TeeData when the type is SNP.
SnpWorkloadData = types.SnpWorkloadData
// TeeType is one of the known types of trusted execution environments for which we
// can generate suitable image contents.
TeeType = define.TeeType
)
const (
maxWorkloadConfigSize = 1024 * 1024
preferredPaddingBoundary = 4096
// SEV is a known trusted execution environment type: AMD-SEV
SEV = define.SEV
// SEV_NO_ES is a known trusted execution environment type: AMD-SEV without encrypted state
SEV_NO_ES = types.SEV_NO_ES //revive:disable-line:var-naming
// SNP is a known trusted execution environment type: AMD-SNP
SNP = define.SNP
// krun looks for its configuration JSON directly in a disk image if the last twelve bytes
// of the disk image are this magic value followed by a little-endian 64-bit
// length-of-the-configuration
krunMagic = "KRUN"
)
// ReadWorkloadConfigFromImage reads the workload configuration from the
// specified disk image file
func ReadWorkloadConfigFromImage(path string) (WorkloadConfig, error) {
// Read the last 12 bytes, which should be "KRUN" followed by a 64-bit
// little-endian length. The (length) bytes immediately preceding
// these hold the JSON-encoded workloadConfig.
var wc WorkloadConfig
f, err := os.Open(path)
if err != nil {
return wc, err
}
defer f.Close()
// Read those last 12 bytes.
finalTwelve := make([]byte, 12)
if _, err = f.Seek(-12, io.SeekEnd); err != nil {
return wc, fmt.Errorf("checking for workload config signature: %w", err)
}
if n, err := f.Read(finalTwelve); err != nil || n != len(finalTwelve) {
if err != nil && !errors.Is(err, io.EOF) {
return wc, fmt.Errorf("reading workload config signature (%d bytes read): %w", n, err)
}
if n != len(finalTwelve) {
return wc, fmt.Errorf("short read (expected 12 bytes at the end of %q, got %d)", path, n)
}
}
if magic := string(finalTwelve[0:4]); magic != "KRUN" {
return wc, fmt.Errorf("expected magic string KRUN in %q, found %q)", path, magic)
}
length := binary.LittleEndian.Uint64(finalTwelve[4:])
if length > maxWorkloadConfigSize {
return wc, fmt.Errorf("workload config in %q is %d bytes long, which seems unreasonable (max allowed %d)", path, length, maxWorkloadConfigSize)
}
// Read and decode the config.
configBytes := make([]byte, length)
if _, err = f.Seek(-(int64(length) + 12), io.SeekEnd); err != nil {
return wc, fmt.Errorf("looking for workload config from disk image: %w", err)
}
if n, err := f.Read(configBytes); err != nil || n != len(configBytes) {
if err != nil {
return wc, fmt.Errorf("reading workload config from disk image: %w", err)
}
return wc, fmt.Errorf("short read (expected %d bytes near the end of %q, got %d)", len(configBytes), path, n)
}
err = json.Unmarshal(configBytes, &wc)
if err != nil {
err = fmt.Errorf("unmarshaling configuration %q: %w", string(configBytes), err)
}
return wc, err
}
// WriteWorkloadConfigToImage writes the workload configuration to the
// specified disk image file, overwriting a previous configuration if it's
// asked to and it finds one
func WriteWorkloadConfigToImage(imageFile *os.File, workloadConfigBytes []byte, overwrite bool) error {
// Read those last 12 bytes to check if there's a configuration there already, which we should overwrite.
var overwriteOffset int64
if overwrite {
finalTwelve := make([]byte, 12)
if _, err := imageFile.Seek(-12, io.SeekEnd); err != nil {
return fmt.Errorf("checking for workload config signature: %w", err)
}
if n, err := imageFile.Read(finalTwelve); err != nil || n != len(finalTwelve) {
if err != nil && !errors.Is(err, io.EOF) {
return fmt.Errorf("reading workload config signature (%d bytes read): %w", n, err)
}
if n != len(finalTwelve) {
return fmt.Errorf("short read (expected 12 bytes at the end of %q, got %d)", imageFile.Name(), n)
}
}
if magic := string(finalTwelve[0:4]); magic == "KRUN" {
length := binary.LittleEndian.Uint64(finalTwelve[4:])
if length < maxWorkloadConfigSize {
overwriteOffset = int64(length + 12)
}
}
}
// If we found a configuration in the file, try to figure out how much padding was used.
paddingSize := int64(preferredPaddingBoundary)
if overwriteOffset != 0 {
st, err := imageFile.Stat()
if err != nil {
return err
}
for _, possiblePaddingLength := range []int64{0x100000, 0x10000, 0x1000, 0x200, 0x100} {
if overwriteOffset > possiblePaddingLength {
continue
}
if st.Size()%possiblePaddingLength != 0 {
continue
}
if _, err := imageFile.Seek(-possiblePaddingLength, io.SeekEnd); err != nil {
return fmt.Errorf("checking size of padding at end of file: %w", err)
}
buf := make([]byte, possiblePaddingLength)
n, err := imageFile.Read(buf)
if err != nil {
return fmt.Errorf("reading possible padding at end of file: %w", err)
}
if n != len(buf) {
return fmt.Errorf("short read checking size of padding at end of file: %d != %d", n, len(buf))
}
if bytes.Equal(buf[:possiblePaddingLength-overwriteOffset], make([]byte, possiblePaddingLength-overwriteOffset)) {
// everything up to the configuration was zero bytes, so it was padding
overwriteOffset = possiblePaddingLength
paddingSize = possiblePaddingLength
break
}
}
}
// Append the krun configuration to a new buffer.
var formatted bytes.Buffer
nWritten, err := formatted.Write(workloadConfigBytes)
if err != nil {
return fmt.Errorf("building workload config: %w", err)
}
if nWritten != len(workloadConfigBytes) {
return fmt.Errorf("short write appending configuration to buffer: %d != %d", nWritten, len(workloadConfigBytes))
}
// Append the magic string to the buffer.
nWritten, err = formatted.WriteString(krunMagic)
if err != nil {
return fmt.Errorf("building workload config signature: %w", err)
}
if nWritten != len(krunMagic) {
return fmt.Errorf("short write appending krun magic to buffer: %d != %d", nWritten, len(krunMagic))
}
// Append the 64-bit little-endian length of the workload configuration to the buffer.
workloadConfigLengthBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(workloadConfigLengthBytes, uint64(len(workloadConfigBytes)))
nWritten, err = formatted.Write(workloadConfigLengthBytes)
if err != nil {
return fmt.Errorf("building workload config signature size: %w", err)
}
if nWritten != len(workloadConfigLengthBytes) {
return fmt.Errorf("short write appending configuration length to buffer: %d != %d", nWritten, len(workloadConfigLengthBytes))
}
// Build a copy of that data, with padding preceding it.
var padded bytes.Buffer
if int64(formatted.Len())%paddingSize != 0 {
extra := paddingSize - (int64(formatted.Len()) % paddingSize)
nWritten, err := padded.Write(make([]byte, extra))
if err != nil {
return fmt.Errorf("buffering padding: %w", err)
}
if int64(nWritten) != extra {
return fmt.Errorf("short write buffering padding for disk image: %d != %d", nWritten, extra)
}
}
extra := int64(formatted.Len())
nWritten, err = padded.Write(formatted.Bytes())
if err != nil {
return fmt.Errorf("buffering workload config: %w", err)
}
if int64(nWritten) != extra {
return fmt.Errorf("short write buffering workload config: %d != %d", nWritten, extra)
}
// Write the buffer to the file, starting with padding.
if _, err = imageFile.Seek(-overwriteOffset, io.SeekEnd); err != nil {
return fmt.Errorf("preparing to write workload config: %w", err)
}
nWritten, err = imageFile.Write(padded.Bytes())
if err != nil {
return fmt.Errorf("writing workload config: %w", err)
}
if nWritten != padded.Len() {
return fmt.Errorf("short write writing configuration to disk image: %d != %d", nWritten, padded.Len())
}
offset, err := imageFile.Seek(0, io.SeekCurrent)
if err != nil {
return fmt.Errorf("preparing mark end of disk image: %w", err)
}
if err = imageFile.Truncate(offset); err != nil {
return fmt.Errorf("marking end of disk image: %w", err)
}
return nil
}

View File

@ -1,449 +1,15 @@
package parse
import (
"context"
"fmt"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"errors"
"github.com/containers/buildah/define"
"github.com/containers/buildah/internal"
internalUtil "github.com/containers/buildah/internal/util"
"github.com/containers/common/pkg/parse"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/unshare"
specs "github.com/opencontainers/runtime-spec/specs-go"
selinux "github.com/opencontainers/selinux/go-selinux"
)
const (
// TypeTmpfs is the type for mounting tmpfs
TypeTmpfs = "tmpfs"
// TypeCache is the type for mounting a common persistent cache from host
TypeCache = "cache"
// mount=type=cache must create a persistent directory on host so its available for all consecutive builds.
// Lifecycle of following directory will be inherited from how host machine treats temporary directory
BuildahCacheDir = "buildah-cache"
// mount=type=cache allows users to lock a cache store while its being used by another build
BuildahCacheLockfile = "buildah-cache-lockfile"
// All the lockfiles are stored in a separate directory inside `BuildahCacheDir`
// Example `/var/tmp/buildah-cache/<target>/buildah-cache-lockfile`
BuildahCacheLockfileDir = "buildah-cache-lockfiles"
)
var (
errBadMntOption = errors.New("invalid mount option")
errBadOptionArg = errors.New("must provide an argument for option")
errBadVolDest = errors.New("must set volume destination")
errBadVolSrc = errors.New("must set volume source")
errDuplicateDest = errors.New("duplicate mount destination")
)
// GetBindMount parses a single bind mount entry from the --mount flag.
// Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty.
// Caller is expected to perform unmount of any mounted images
func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, string, error) {
newMount := specs.Mount{
Type: define.TypeBind,
}
setRelabel := false
mountReadability := false
setDest := false
bindNonRecursive := false
fromImage := ""
for _, val := range args {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
case "type":
// This is already processed
continue
case "bind-nonrecursive":
newMount.Options = append(newMount.Options, "bind")
bindNonRecursive = true
case "ro", "nosuid", "nodev", "noexec":
// TODO: detect duplication of these options.
// (Is this necessary?)
newMount.Options = append(newMount.Options, kv[0])
mountReadability = true
case "rw", "readwrite":
newMount.Options = append(newMount.Options, "rw")
mountReadability = true
case "readonly":
// Alias for "ro"
newMount.Options = append(newMount.Options, "ro")
mountReadability = true
case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U":
newMount.Options = append(newMount.Options, kv[0])
case "from":
if len(kv) == 1 {
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
fromImage = kv[1]
case "bind-propagation":
if len(kv) == 1 {
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Options = append(newMount.Options, kv[1])
case "src", "source":
if len(kv) == 1 {
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Source = kv[1]
case "target", "dst", "destination":
if len(kv) == 1 {
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
targetPath := kv[1]
if !path.IsAbs(targetPath) {
targetPath = filepath.Join(workDir, targetPath)
}
if err := parse.ValidateVolumeCtrDir(targetPath); err != nil {
return newMount, "", err
}
newMount.Destination = targetPath
setDest = true
case "relabel":
if setRelabel {
return newMount, "", fmt.Errorf("cannot pass 'relabel' option more than once: %w", errBadOptionArg)
}
setRelabel = true
if len(kv) != 2 {
return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption)
}
switch kv[1] {
case "private":
newMount.Options = append(newMount.Options, "Z")
case "shared":
newMount.Options = append(newMount.Options, "z")
default:
return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption)
}
case "consistency":
// Option for OS X only, has no meaning on other platforms
// and can thus be safely ignored.
// See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts
default:
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadMntOption)
}
}
// default mount readability is always readonly
if !mountReadability {
newMount.Options = append(newMount.Options, "ro")
}
// Following variable ensures that we return imagename only if we did additional mount
isImageMounted := false
if fromImage != "" {
mountPoint := ""
if additionalMountPoints != nil {
if val, ok := additionalMountPoints[fromImage]; ok {
mountPoint = val.MountPoint
}
}
// if mountPoint of image was not found in additionalMap
// or additionalMap was nil, try mounting image
if mountPoint == "" {
image, err := internalUtil.LookupImage(ctx, store, fromImage)
if err != nil {
return newMount, "", err
}
mountPoint, err = image.Mount(context.Background(), nil, imageMountLabel)
if err != nil {
return newMount, "", err
}
isImageMounted = true
}
contextDir = mountPoint
}
// buildkit parity: default bind option must be `rbind`
// unless specified
if !bindNonRecursive {
newMount.Options = append(newMount.Options, "rbind")
}
if !setDest {
return newMount, fromImage, errBadVolDest
}
// buildkit parity: support absolute path for sources from current build context
if contextDir != "" {
// path should be /contextDir/specified path
newMount.Source = filepath.Join(contextDir, filepath.Clean(string(filepath.Separator)+newMount.Source))
} else {
// looks like its coming from `build run --mount=type=bind` allow using absolute path
// error out if no source is set
if newMount.Source == "" {
return newMount, "", errBadVolSrc
}
if err := parse.ValidateVolumeHostDir(newMount.Source); err != nil {
return newMount, "", err
}
}
opts, err := parse.ValidateVolumeOpts(newMount.Options)
if err != nil {
return newMount, fromImage, err
}
newMount.Options = opts
if !isImageMounted {
// we don't want any cleanups if image was not mounted explicitly
// so dont return anything
fromImage = ""
}
return newMount, fromImage, nil
}
// CleanCacheMount gets the cache parent created by `--mount=type=cache` and removes it.
func CleanCacheMount() error {
cacheParent := filepath.Join(internalUtil.GetTempDir(), BuildahCacheDir+"-"+strconv.Itoa(unshare.GetRootlessUID()))
return os.RemoveAll(cacheParent)
}
// GetCacheMount parses a single cache mount entry from the --mount flag.
//
// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??).
func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, *lockfile.LockFile, error) {
var err error
var mode uint64
var buildahLockFilesDir string
var (
setDest bool
setShared bool
setReadOnly bool
foundSElinuxLabel bool
)
fromStage := ""
newMount := specs.Mount{
Type: define.TypeBind,
}
// if id is set a new subdirectory with `id` will be created under /host-temp/buildah-build-cache/id
id := ""
//buidkit parity: cache directory defaults to 755
mode = 0o755
//buidkit parity: cache directory defaults to uid 0 if not specified
uid := 0
//buidkit parity: cache directory defaults to gid 0 if not specified
gid := 0
// sharing mode
sharing := "shared"
for _, val := range args {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
case "type":
// This is already processed
continue
case "nosuid", "nodev", "noexec":
// TODO: detect duplication of these options.
// (Is this necessary?)
newMount.Options = append(newMount.Options, kv[0])
case "rw", "readwrite":
newMount.Options = append(newMount.Options, "rw")
case "readonly", "ro":
// Alias for "ro"
newMount.Options = append(newMount.Options, "ro")
setReadOnly = true
case "Z", "z":
newMount.Options = append(newMount.Options, kv[0])
foundSElinuxLabel = true
case "shared", "rshared", "private", "rprivate", "slave", "rslave", "U":
newMount.Options = append(newMount.Options, kv[0])
setShared = true
case "sharing":
sharing = kv[1]
case "bind-propagation":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Options = append(newMount.Options, kv[1])
case "id":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
id = kv[1]
case "from":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
fromStage = kv[1]
case "target", "dst", "destination":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
targetPath := kv[1]
if !path.IsAbs(targetPath) {
targetPath = filepath.Join(workDir, targetPath)
}
if err := parse.ValidateVolumeCtrDir(targetPath); err != nil {
return newMount, nil, err
}
newMount.Destination = targetPath
setDest = true
case "src", "source":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Source = kv[1]
case "mode":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
mode, err = strconv.ParseUint(kv[1], 8, 32)
if err != nil {
return newMount, nil, fmt.Errorf("unable to parse cache mode: %w", err)
}
case "uid":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
uid, err = strconv.Atoi(kv[1])
if err != nil {
return newMount, nil, fmt.Errorf("unable to parse cache uid: %w", err)
}
case "gid":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
gid, err = strconv.Atoi(kv[1])
if err != nil {
return newMount, nil, fmt.Errorf("unable to parse cache gid: %w", err)
}
default:
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadMntOption)
}
}
// If selinux is enabled and no selinux option was configured
// default to `z` i.e shared content label.
if !foundSElinuxLabel && (selinux.EnforceMode() != selinux.Disabled) && fromStage == "" {
newMount.Options = append(newMount.Options, "z")
}
if !setDest {
return newMount, nil, errBadVolDest
}
if fromStage != "" {
// do not create cache on host
// instead use read-only mounted stage as cache
mountPoint := ""
if additionalMountPoints != nil {
if val, ok := additionalMountPoints[fromStage]; ok {
if val.IsStage {
mountPoint = val.MountPoint
}
}
}
// Cache does not supports using image so if not stage found
// return with error
if mountPoint == "" {
return newMount, nil, fmt.Errorf("no stage found with name %s", fromStage)
}
// path should be /contextDir/specified path
newMount.Source = filepath.Join(mountPoint, filepath.Clean(string(filepath.Separator)+newMount.Source))
} else {
// we need to create cache on host if no image is being used
// since type is cache and cache can be reused by consecutive builds
// create a common cache directory, which persists on hosts within temp lifecycle
// add subdirectory if specified
// cache parent directory: creates separate cache parent for each user.
cacheParent := filepath.Join(internalUtil.GetTempDir(), BuildahCacheDir+"-"+strconv.Itoa(unshare.GetRootlessUID()))
// create cache on host if not present
err = os.MkdirAll(cacheParent, os.FileMode(0755))
if err != nil {
return newMount, nil, fmt.Errorf("unable to create build cache directory: %w", err)
}
if id != "" {
newMount.Source = filepath.Join(cacheParent, filepath.Clean(id))
buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, filepath.Clean(id))
} else {
newMount.Source = filepath.Join(cacheParent, filepath.Clean(newMount.Destination))
buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, filepath.Clean(newMount.Destination))
}
idPair := idtools.IDPair{
UID: uid,
GID: gid,
}
//buildkit parity: change uid and gid if specified otheriwise keep `0`
err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair)
if err != nil {
return newMount, nil, fmt.Errorf("unable to change uid,gid of cache directory: %w", err)
}
// create a subdirectory inside `cacheParent` just to store lockfiles
buildahLockFilesDir = filepath.Join(cacheParent, buildahLockFilesDir)
err = os.MkdirAll(buildahLockFilesDir, os.FileMode(0700))
if err != nil {
return newMount, nil, fmt.Errorf("unable to create build cache lockfiles directory: %w", err)
}
}
var targetLock *lockfile.LockFile // = nil
succeeded := false
defer func() {
if !succeeded && targetLock != nil {
targetLock.Unlock()
}
}()
switch sharing {
case "locked":
// lock parent cache
lockfile, err := lockfile.GetLockFile(filepath.Join(buildahLockFilesDir, BuildahCacheLockfile))
if err != nil {
return newMount, nil, fmt.Errorf("unable to acquire lock when sharing mode is locked: %w", err)
}
// Will be unlocked after the RUN step is executed.
lockfile.Lock()
targetLock = lockfile
case "shared":
// do nothing since default is `shared`
break
default:
// error out for unknown values
return newMount, nil, fmt.Errorf("unrecognized value %q for field `sharing`: %w", sharing, err)
}
// buildkit parity: default sharing should be shared
// unless specified
if !setShared {
newMount.Options = append(newMount.Options, "shared")
}
// buildkit parity: cache must writable unless `ro` or `readonly` is configured explicitly
if !setReadOnly {
newMount.Options = append(newMount.Options, "rw")
}
newMount.Options = append(newMount.Options, "bind")
opts, err := parse.ValidateVolumeOpts(newMount.Options)
if err != nil {
return newMount, nil, err
}
newMount.Options = opts
succeeded = true
return newMount, targetLock, nil
}
// ValidateVolumeMountHostDir validates the host path of buildah --volume
func ValidateVolumeMountHostDir(hostDir string) error {
if !filepath.IsAbs(hostDir) {
@ -484,22 +50,6 @@ func SplitStringWithColonEscape(str string) []string {
return result
}
func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) {
finalVolumeMounts := make(map[string]specs.Mount)
for _, volume := range volumes {
volumeMount, err := Volume(volume)
if err != nil {
return nil, err
}
if _, ok := finalVolumeMounts[volumeMount.Destination]; ok {
return nil, fmt.Errorf("%v: %w", volumeMount.Destination, errDuplicateDest)
}
finalVolumeMounts[volumeMount.Destination] = volumeMount
}
return finalVolumeMounts, nil
}
// Volume parses the input of --volume
func Volume(volume string) (specs.Mount, error) {
mount := specs.Mount{}
@ -527,178 +77,3 @@ func Volume(volume string) (specs.Mount, error) {
mount.Options = mountOpts
return mount, nil
}
// UnlockLockArray is a helper for cleaning up after GetVolumes and the like.
func UnlockLockArray(locks []*lockfile.LockFile) {
for _, lock := range locks {
lock.Unlock()
}
}
// GetVolumes gets the volumes from --volume and --mount
//
// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??).
func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, mounts []string, contextDir string, workDir string) ([]specs.Mount, []string, []*lockfile.LockFile, error) {
unifiedMounts, mountedImages, targetLocks, err := getMounts(ctx, store, mounts, contextDir, workDir)
if err != nil {
return nil, mountedImages, nil, err
}
succeeded := false
defer func() {
if !succeeded {
UnlockLockArray(targetLocks)
}
}()
volumeMounts, err := getVolumeMounts(volumes)
if err != nil {
return nil, mountedImages, nil, err
}
for dest, mount := range volumeMounts {
if _, ok := unifiedMounts[dest]; ok {
return nil, mountedImages, nil, fmt.Errorf("%v: %w", dest, errDuplicateDest)
}
unifiedMounts[dest] = mount
}
finalMounts := make([]specs.Mount, 0, len(unifiedMounts))
for _, mount := range unifiedMounts {
finalMounts = append(finalMounts, mount)
}
succeeded = true
return finalMounts, mountedImages, targetLocks, nil
}
// getMounts takes user-provided input from the --mount flag and creates OCI
// spec mounts.
// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ...
// buildah run --mount type=tmpfs,target=/dev/shm ...
//
// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??).
func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string, workDir string) (map[string]specs.Mount, []string, []*lockfile.LockFile, error) {
// If `type` is not set default to "bind"
mountType := define.TypeBind
finalMounts := make(map[string]specs.Mount)
mountedImages := make([]string, 0)
targetLocks := make([]*lockfile.LockFile, 0)
succeeded := false
defer func() {
if !succeeded {
UnlockLockArray(targetLocks)
}
}()
errInvalidSyntax := errors.New("incorrect mount format: should be --mount type=<bind|tmpfs>,[src=<host-dir>,]target=<ctr-dir>[,options]")
// TODO(vrothberg): the manual parsing can be replaced with a regular expression
// to allow a more robust parsing of the mount format and to give
// precise errors regarding supported format versus supported options.
for _, mount := range mounts {
tokens := strings.Split(mount, ",")
if len(tokens) < 2 {
return nil, mountedImages, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
}
for _, field := range tokens {
if strings.HasPrefix(field, "type=") {
kv := strings.Split(field, "=")
if len(kv) != 2 {
return nil, mountedImages, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
}
mountType = kv[1]
}
}
switch mountType {
case define.TypeBind:
mount, image, err := GetBindMount(ctx, tokens, contextDir, store, "", nil, workDir)
if err != nil {
return nil, mountedImages, nil, err
}
if _, ok := finalMounts[mount.Destination]; ok {
return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
}
finalMounts[mount.Destination] = mount
mountedImages = append(mountedImages, image)
case TypeCache:
mount, tl, err := GetCacheMount(tokens, store, "", nil, workDir)
if err != nil {
return nil, mountedImages, nil, err
}
if tl != nil {
targetLocks = append(targetLocks, tl)
}
if _, ok := finalMounts[mount.Destination]; ok {
return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
}
finalMounts[mount.Destination] = mount
case TypeTmpfs:
mount, err := GetTmpfsMount(tokens)
if err != nil {
return nil, mountedImages, nil, err
}
if _, ok := finalMounts[mount.Destination]; ok {
return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
}
finalMounts[mount.Destination] = mount
default:
return nil, mountedImages, nil, fmt.Errorf("invalid filesystem type %q", mountType)
}
}
succeeded = true
return finalMounts, mountedImages, targetLocks, nil
}
// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag
func GetTmpfsMount(args []string) (specs.Mount, error) {
newMount := specs.Mount{
Type: TypeTmpfs,
Source: TypeTmpfs,
}
setDest := false
for _, val := range args {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
case "type":
// This is already processed
continue
case "ro", "nosuid", "nodev", "noexec":
newMount.Options = append(newMount.Options, kv[0])
case "readonly":
// Alias for "ro"
newMount.Options = append(newMount.Options, "ro")
case "tmpcopyup":
//the path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself.
newMount.Options = append(newMount.Options, kv[0])
case "tmpfs-mode":
if len(kv) == 1 {
return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1]))
case "tmpfs-size":
if len(kv) == 1 {
return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1]))
case "src", "source":
return newMount, errors.New("source is not supported with tmpfs mounts")
case "target", "dst", "destination":
if len(kv) == 1 {
return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
return newMount, err
}
newMount.Destination = kv[1]
setDest = true
default:
return newMount, fmt.Errorf("%v: %w", kv[0], errBadMntOption)
}
}
if !setDest {
return newMount, errBadVolDest
}
return newMount, nil
}

View File

@ -0,0 +1,28 @@
package tmpdir
import (
"os"
"path/filepath"
"github.com/containers/common/pkg/config"
"github.com/sirupsen/logrus"
)
// GetTempDir returns base for a temporary directory on host.
func GetTempDir() string {
if tmpdir, ok := os.LookupEnv("TMPDIR"); ok {
abs, err := filepath.Abs(tmpdir)
if err == nil {
return abs
}
logrus.Warnf("ignoring TMPDIR from environment, evaluating it: %v", err)
}
containerConfig, err := config.Default()
if err != nil {
tmpdir, err := containerConfig.ImageCopyTmpDir()
if err != nil {
return tmpdir
}
}
return "/var/tmp"
}

View File

@ -8,7 +8,6 @@ import (
"github.com/containers/buildah/define"
"github.com/containers/common/libimage"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
@ -50,21 +49,6 @@ func NormalizePlatform(platform v1.Platform) v1.Platform {
}
}
// GetTempDir returns base for a temporary directory on host.
func GetTempDir() string {
if tmpdir, ok := os.LookupEnv("TMPDIR"); ok {
return tmpdir
}
containerConfig, err := config.Default()
if err != nil {
tmpdir, err := containerConfig.ImageCopyTmpDir()
if err != nil {
return tmpdir
}
}
return "/var/tmp"
}
// ExportFromReader reads bytes from given reader and exports to external tar, directory or stdout.
func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error {
var err error

View File

@ -0,0 +1,637 @@
package volumes
import (
"context"
"fmt"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"errors"
"github.com/containers/buildah/define"
"github.com/containers/buildah/internal"
internalParse "github.com/containers/buildah/internal/parse"
"github.com/containers/buildah/internal/tmpdir"
internalUtil "github.com/containers/buildah/internal/util"
"github.com/containers/common/pkg/parse"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/unshare"
specs "github.com/opencontainers/runtime-spec/specs-go"
selinux "github.com/opencontainers/selinux/go-selinux"
)
const (
// TypeTmpfs is the type for mounting tmpfs
TypeTmpfs = "tmpfs"
// TypeCache is the type for mounting a common persistent cache from host
TypeCache = "cache"
// mount=type=cache must create a persistent directory on host so its available for all consecutive builds.
// Lifecycle of following directory will be inherited from how host machine treats temporary directory
buildahCacheDir = "buildah-cache"
// mount=type=cache allows users to lock a cache store while its being used by another build
BuildahCacheLockfile = "buildah-cache-lockfile"
// All the lockfiles are stored in a separate directory inside `BuildahCacheDir`
// Example `/var/tmp/buildah-cache/<target>/buildah-cache-lockfile`
BuildahCacheLockfileDir = "buildah-cache-lockfiles"
)
var (
errBadMntOption = errors.New("invalid mount option")
errBadOptionArg = errors.New("must provide an argument for option")
errBadVolDest = errors.New("must set volume destination")
errBadVolSrc = errors.New("must set volume source")
errDuplicateDest = errors.New("duplicate mount destination")
)
// CacheParent returns a cache parent for --mount=type=cache
func CacheParent() string {
return filepath.Join(tmpdir.GetTempDir(), buildahCacheDir+"-"+strconv.Itoa(unshare.GetRootlessUID()))
}
// GetBindMount parses a single bind mount entry from the --mount flag.
// Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty.
// Caller is expected to perform unmount of any mounted images
func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, string, error) {
newMount := specs.Mount{
Type: define.TypeBind,
}
setRelabel := false
mountReadability := false
setDest := false
bindNonRecursive := false
fromImage := ""
for _, val := range args {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
case "type":
// This is already processed
continue
case "bind-nonrecursive":
newMount.Options = append(newMount.Options, "bind")
bindNonRecursive = true
case "ro", "nosuid", "nodev", "noexec":
// TODO: detect duplication of these options.
// (Is this necessary?)
newMount.Options = append(newMount.Options, kv[0])
mountReadability = true
case "rw", "readwrite":
newMount.Options = append(newMount.Options, "rw")
mountReadability = true
case "readonly":
// Alias for "ro"
newMount.Options = append(newMount.Options, "ro")
mountReadability = true
case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U":
newMount.Options = append(newMount.Options, kv[0])
case "from":
if len(kv) == 1 {
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
fromImage = kv[1]
case "bind-propagation":
if len(kv) == 1 {
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Options = append(newMount.Options, kv[1])
case "src", "source":
if len(kv) == 1 {
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Source = kv[1]
case "target", "dst", "destination":
if len(kv) == 1 {
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
targetPath := kv[1]
if !path.IsAbs(targetPath) {
targetPath = filepath.Join(workDir, targetPath)
}
if err := parse.ValidateVolumeCtrDir(targetPath); err != nil {
return newMount, "", err
}
newMount.Destination = targetPath
setDest = true
case "relabel":
if setRelabel {
return newMount, "", fmt.Errorf("cannot pass 'relabel' option more than once: %w", errBadOptionArg)
}
setRelabel = true
if len(kv) != 2 {
return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption)
}
switch kv[1] {
case "private":
newMount.Options = append(newMount.Options, "Z")
case "shared":
newMount.Options = append(newMount.Options, "z")
default:
return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption)
}
case "consistency":
// Option for OS X only, has no meaning on other platforms
// and can thus be safely ignored.
// See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts
default:
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadMntOption)
}
}
// default mount readability is always readonly
if !mountReadability {
newMount.Options = append(newMount.Options, "ro")
}
// Following variable ensures that we return imagename only if we did additional mount
isImageMounted := false
if fromImage != "" {
mountPoint := ""
if additionalMountPoints != nil {
if val, ok := additionalMountPoints[fromImage]; ok {
mountPoint = val.MountPoint
}
}
// if mountPoint of image was not found in additionalMap
// or additionalMap was nil, try mounting image
if mountPoint == "" {
image, err := internalUtil.LookupImage(ctx, store, fromImage)
if err != nil {
return newMount, "", err
}
mountPoint, err = image.Mount(context.Background(), nil, imageMountLabel)
if err != nil {
return newMount, "", err
}
isImageMounted = true
}
contextDir = mountPoint
}
// buildkit parity: default bind option must be `rbind`
// unless specified
if !bindNonRecursive {
newMount.Options = append(newMount.Options, "rbind")
}
if !setDest {
return newMount, fromImage, errBadVolDest
}
// buildkit parity: support absolute path for sources from current build context
if contextDir != "" {
// path should be /contextDir/specified path
newMount.Source = filepath.Join(contextDir, filepath.Clean(string(filepath.Separator)+newMount.Source))
} else {
// looks like its coming from `build run --mount=type=bind` allow using absolute path
// error out if no source is set
if newMount.Source == "" {
return newMount, "", errBadVolSrc
}
if err := parse.ValidateVolumeHostDir(newMount.Source); err != nil {
return newMount, "", err
}
}
opts, err := parse.ValidateVolumeOpts(newMount.Options)
if err != nil {
return newMount, fromImage, err
}
newMount.Options = opts
if !isImageMounted {
// we don't want any cleanups if image was not mounted explicitly
// so dont return anything
fromImage = ""
}
return newMount, fromImage, nil
}
// GetCacheMount parses a single cache mount entry from the --mount flag.
//
// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??).
func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, *lockfile.LockFile, error) {
var err error
var mode uint64
var buildahLockFilesDir string
var (
setDest bool
setShared bool
setReadOnly bool
foundSElinuxLabel bool
)
fromStage := ""
newMount := specs.Mount{
Type: define.TypeBind,
}
// if id is set a new subdirectory with `id` will be created under /host-temp/buildah-build-cache/id
id := ""
//buidkit parity: cache directory defaults to 755
mode = 0o755
//buidkit parity: cache directory defaults to uid 0 if not specified
uid := 0
//buidkit parity: cache directory defaults to gid 0 if not specified
gid := 0
// sharing mode
sharing := "shared"
for _, val := range args {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
case "type":
// This is already processed
continue
case "nosuid", "nodev", "noexec":
// TODO: detect duplication of these options.
// (Is this necessary?)
newMount.Options = append(newMount.Options, kv[0])
case "rw", "readwrite":
newMount.Options = append(newMount.Options, "rw")
case "readonly", "ro":
// Alias for "ro"
newMount.Options = append(newMount.Options, "ro")
setReadOnly = true
case "Z", "z":
newMount.Options = append(newMount.Options, kv[0])
foundSElinuxLabel = true
case "shared", "rshared", "private", "rprivate", "slave", "rslave", "U":
newMount.Options = append(newMount.Options, kv[0])
setShared = true
case "sharing":
sharing = kv[1]
case "bind-propagation":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Options = append(newMount.Options, kv[1])
case "id":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
id = kv[1]
case "from":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
fromStage = kv[1]
case "target", "dst", "destination":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
targetPath := kv[1]
if !path.IsAbs(targetPath) {
targetPath = filepath.Join(workDir, targetPath)
}
if err := parse.ValidateVolumeCtrDir(targetPath); err != nil {
return newMount, nil, err
}
newMount.Destination = targetPath
setDest = true
case "src", "source":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Source = kv[1]
case "mode":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
mode, err = strconv.ParseUint(kv[1], 8, 32)
if err != nil {
return newMount, nil, fmt.Errorf("unable to parse cache mode: %w", err)
}
case "uid":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
uid, err = strconv.Atoi(kv[1])
if err != nil {
return newMount, nil, fmt.Errorf("unable to parse cache uid: %w", err)
}
case "gid":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
gid, err = strconv.Atoi(kv[1])
if err != nil {
return newMount, nil, fmt.Errorf("unable to parse cache gid: %w", err)
}
default:
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadMntOption)
}
}
// If selinux is enabled and no selinux option was configured
// default to `z` i.e shared content label.
if !foundSElinuxLabel && (selinux.EnforceMode() != selinux.Disabled) && fromStage == "" {
newMount.Options = append(newMount.Options, "z")
}
if !setDest {
return newMount, nil, errBadVolDest
}
if fromStage != "" {
// do not create cache on host
// instead use read-only mounted stage as cache
mountPoint := ""
if additionalMountPoints != nil {
if val, ok := additionalMountPoints[fromStage]; ok {
if val.IsStage {
mountPoint = val.MountPoint
}
}
}
// Cache does not supports using image so if not stage found
// return with error
if mountPoint == "" {
return newMount, nil, fmt.Errorf("no stage found with name %s", fromStage)
}
// path should be /contextDir/specified path
newMount.Source = filepath.Join(mountPoint, filepath.Clean(string(filepath.Separator)+newMount.Source))
} else {
// we need to create cache on host if no image is being used
// since type is cache and cache can be reused by consecutive builds
// create a common cache directory, which persists on hosts within temp lifecycle
// add subdirectory if specified
// cache parent directory: creates separate cache parent for each user.
cacheParent := CacheParent()
// create cache on host if not present
err = os.MkdirAll(cacheParent, os.FileMode(0755))
if err != nil {
return newMount, nil, fmt.Errorf("unable to create build cache directory: %w", err)
}
if id != "" {
newMount.Source = filepath.Join(cacheParent, filepath.Clean(id))
buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, filepath.Clean(id))
} else {
newMount.Source = filepath.Join(cacheParent, filepath.Clean(newMount.Destination))
buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, filepath.Clean(newMount.Destination))
}
idPair := idtools.IDPair{
UID: uid,
GID: gid,
}
//buildkit parity: change uid and gid if specified otheriwise keep `0`
err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair)
if err != nil {
return newMount, nil, fmt.Errorf("unable to change uid,gid of cache directory: %w", err)
}
// create a subdirectory inside `cacheParent` just to store lockfiles
buildahLockFilesDir = filepath.Join(cacheParent, buildahLockFilesDir)
err = os.MkdirAll(buildahLockFilesDir, os.FileMode(0700))
if err != nil {
return newMount, nil, fmt.Errorf("unable to create build cache lockfiles directory: %w", err)
}
}
var targetLock *lockfile.LockFile // = nil
succeeded := false
defer func() {
if !succeeded && targetLock != nil {
targetLock.Unlock()
}
}()
switch sharing {
case "locked":
// lock parent cache
lockfile, err := lockfile.GetLockFile(filepath.Join(buildahLockFilesDir, BuildahCacheLockfile))
if err != nil {
return newMount, nil, fmt.Errorf("unable to acquire lock when sharing mode is locked: %w", err)
}
// Will be unlocked after the RUN step is executed.
lockfile.Lock()
targetLock = lockfile
case "shared":
// do nothing since default is `shared`
break
default:
// error out for unknown values
return newMount, nil, fmt.Errorf("unrecognized value %q for field `sharing`: %w", sharing, err)
}
// buildkit parity: default sharing should be shared
// unless specified
if !setShared {
newMount.Options = append(newMount.Options, "shared")
}
// buildkit parity: cache must writable unless `ro` or `readonly` is configured explicitly
if !setReadOnly {
newMount.Options = append(newMount.Options, "rw")
}
newMount.Options = append(newMount.Options, "bind")
opts, err := parse.ValidateVolumeOpts(newMount.Options)
if err != nil {
return newMount, nil, err
}
newMount.Options = opts
succeeded = true
return newMount, targetLock, nil
}
func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) {
finalVolumeMounts := make(map[string]specs.Mount)
for _, volume := range volumes {
volumeMount, err := internalParse.Volume(volume)
if err != nil {
return nil, err
}
if _, ok := finalVolumeMounts[volumeMount.Destination]; ok {
return nil, fmt.Errorf("%v: %w", volumeMount.Destination, errDuplicateDest)
}
finalVolumeMounts[volumeMount.Destination] = volumeMount
}
return finalVolumeMounts, nil
}
// UnlockLockArray is a helper for cleaning up after GetVolumes and the like.
func UnlockLockArray(locks []*lockfile.LockFile) {
for _, lock := range locks {
lock.Unlock()
}
}
// GetVolumes gets the volumes from --volume and --mount
//
// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??).
func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, mounts []string, contextDir string, workDir string) ([]specs.Mount, []string, []*lockfile.LockFile, error) {
unifiedMounts, mountedImages, targetLocks, err := getMounts(ctx, store, mounts, contextDir, workDir)
if err != nil {
return nil, mountedImages, nil, err
}
succeeded := false
defer func() {
if !succeeded {
UnlockLockArray(targetLocks)
}
}()
volumeMounts, err := getVolumeMounts(volumes)
if err != nil {
return nil, mountedImages, nil, err
}
for dest, mount := range volumeMounts {
if _, ok := unifiedMounts[dest]; ok {
return nil, mountedImages, nil, fmt.Errorf("%v: %w", dest, errDuplicateDest)
}
unifiedMounts[dest] = mount
}
finalMounts := make([]specs.Mount, 0, len(unifiedMounts))
for _, mount := range unifiedMounts {
finalMounts = append(finalMounts, mount)
}
succeeded = true
return finalMounts, mountedImages, targetLocks, nil
}
// getMounts takes user-provided input from the --mount flag and creates OCI
// spec mounts.
// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ...
// buildah run --mount type=tmpfs,target=/dev/shm ...
//
// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??).
func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string, workDir string) (map[string]specs.Mount, []string, []*lockfile.LockFile, error) {
// If `type` is not set default to "bind"
mountType := define.TypeBind
finalMounts := make(map[string]specs.Mount)
mountedImages := make([]string, 0)
targetLocks := make([]*lockfile.LockFile, 0)
succeeded := false
defer func() {
if !succeeded {
UnlockLockArray(targetLocks)
}
}()
errInvalidSyntax := errors.New("incorrect mount format: should be --mount type=<bind|tmpfs>,[src=<host-dir>,]target=<ctr-dir>[,options]")
// TODO(vrothberg): the manual parsing can be replaced with a regular expression
// to allow a more robust parsing of the mount format and to give
// precise errors regarding supported format versus supported options.
for _, mount := range mounts {
tokens := strings.Split(mount, ",")
if len(tokens) < 2 {
return nil, mountedImages, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
}
for _, field := range tokens {
if strings.HasPrefix(field, "type=") {
kv := strings.Split(field, "=")
if len(kv) != 2 {
return nil, mountedImages, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
}
mountType = kv[1]
}
}
switch mountType {
case define.TypeBind:
mount, image, err := GetBindMount(ctx, tokens, contextDir, store, "", nil, workDir)
if err != nil {
return nil, mountedImages, nil, err
}
if _, ok := finalMounts[mount.Destination]; ok {
return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
}
finalMounts[mount.Destination] = mount
mountedImages = append(mountedImages, image)
case TypeCache:
mount, tl, err := GetCacheMount(tokens, store, "", nil, workDir)
if err != nil {
return nil, mountedImages, nil, err
}
if tl != nil {
targetLocks = append(targetLocks, tl)
}
if _, ok := finalMounts[mount.Destination]; ok {
return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
}
finalMounts[mount.Destination] = mount
case TypeTmpfs:
mount, err := GetTmpfsMount(tokens)
if err != nil {
return nil, mountedImages, nil, err
}
if _, ok := finalMounts[mount.Destination]; ok {
return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
}
finalMounts[mount.Destination] = mount
default:
return nil, mountedImages, nil, fmt.Errorf("invalid filesystem type %q", mountType)
}
}
succeeded = true
return finalMounts, mountedImages, targetLocks, nil
}
// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag
func GetTmpfsMount(args []string) (specs.Mount, error) {
newMount := specs.Mount{
Type: TypeTmpfs,
Source: TypeTmpfs,
}
setDest := false
for _, val := range args {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
case "type":
// This is already processed
continue
case "ro", "nosuid", "nodev", "noexec":
newMount.Options = append(newMount.Options, kv[0])
case "readonly":
// Alias for "ro"
newMount.Options = append(newMount.Options, "ro")
case "tmpcopyup":
//the path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself.
newMount.Options = append(newMount.Options, kv[0])
case "tmpfs-mode":
if len(kv) == 1 {
return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1]))
case "tmpfs-size":
if len(kv) == 1 {
return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1]))
case "src", "source":
return newMount, errors.New("source is not supported with tmpfs mounts")
case "target", "dst", "destination":
if len(kv) == 1 {
return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
return newMount, err
}
newMount.Destination = kv[1]
setDest = true
default:
return newMount, fmt.Errorf("%v: %w", kv[0], errBadMntOption)
}
}
if !setDest {
return newMount, errBadVolDest
}
return newMount, nil
}

View File

@ -92,6 +92,19 @@ type lookupGroupEntry struct {
user string
}
func scanWithoutComments(rc *bufio.Scanner) (string, bool) {
for {
if !rc.Scan() {
return "", false
}
line := rc.Text()
if strings.HasPrefix(strings.TrimSpace(line), "#") {
continue
}
return line, true
}
}
func parseNextPasswd(rc *bufio.Scanner) *lookupPasswdEntry {
if !rc.Scan() {
return nil
@ -118,10 +131,13 @@ func parseNextPasswd(rc *bufio.Scanner) *lookupPasswdEntry {
}
func parseNextGroup(rc *bufio.Scanner) *lookupGroupEntry {
if !rc.Scan() {
// On FreeBSD, /etc/group may contain comments:
// https://man.freebsd.org/cgi/man.cgi?query=group&sektion=5&format=html
// We need to ignore those lines rather than trying to parse them.
line, ok := scanWithoutComments(rc)
if !ok {
return nil
}
line := rc.Text()
fields := strings.Split(line, ":")
if len(fields) != 4 {
return nil

View File

@ -296,6 +296,13 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
iopts.Quiet = true
}
}
var confidentialWorkloadOptions define.ConfidentialWorkloadOptions
if c.Flag("cw").Changed {
confidentialWorkloadOptions, err = parse.GetConfidentialWorkloadOptions(iopts.CWOptions)
if err != nil {
return options, nil, nil, err
}
}
var cacheTo []reference.Named
var cacheFrom []reference.Named
cacheTo = nil
@ -364,6 +371,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
CacheTTL: cacheTTL,
CNIConfigDir: iopts.CNIConfigDir,
CNIPluginPath: iopts.CNIPlugInPath,
ConfidentialWorkload: confidentialWorkloadOptions,
CPPFlags: iopts.CPPFlags,
CommonBuildOpts: commonOpts,
Compression: compression,
@ -383,6 +391,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
Isolation: isolation,
Jobs: &iopts.Jobs,
Labels: iopts.Label,
LayerLabels: iopts.LayerLabel,
Layers: layers,
LogFile: iopts.Logfile,
LogRusage: iopts.LogRusage,

View File

@ -72,6 +72,7 @@ type BudResults struct {
From string
Iidfile string
Label []string
LayerLabel []string
Logfile string
LogSplitByPlatform bool
Manifest string
@ -106,6 +107,7 @@ type BudResults struct {
Envs []string
OSFeatures []string
OSVersion string
CWOptions string
}
// FromAndBugResults represents the results for common flags
@ -216,6 +218,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.BoolVar(&flags.Compress, "compress", false, "this is a legacy option, which has no effect on the image")
fs.StringArrayVar(&flags.CPPFlags, "cpp-flag", []string{}, "set additional flag to pass to C preprocessor (cpp)")
fs.StringVar(&flags.Creds, "creds", "", "use `[username[:password]]` for accessing the registry")
fs.StringVarP(&flags.CWOptions, "cw", "", "", "confidential workload `options`")
fs.BoolVarP(&flags.DisableCompression, "disable-compression", "D", true, "don't compress layers by default")
fs.BoolVar(&flags.DisableContentTrust, "disable-content-trust", false, "this is a Docker specific option and is a NOOP")
fs.StringArrayVar(&flags.Envs, "env", []string{}, "set environment variable for the image")
@ -226,6 +229,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.StringVar(&flags.Iidfile, "iidfile", "", "`file` to write the image ID to")
fs.IntVar(&flags.Jobs, "jobs", 1, "how many stages to run in parallel")
fs.StringArrayVar(&flags.Label, "label", []string{}, "set metadata for an image (default [])")
fs.StringArrayVar(&flags.LayerLabel, "layer-label", []string{}, "set metadata for an intermediate image (default [])")
fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr")
fs.BoolVar(&flags.LogSplitByPlatform, "logsplit", false, "split logfile to different files for each platform")
fs.Int("loglevel", 0, "NO LONGER USED, flag ignored, and hidden")
@ -297,6 +301,7 @@ func GetBudFlagsCompletions() commonComp.FlagCompletions {
flagCompletion["cert-dir"] = commonComp.AutocompleteDefault
flagCompletion["cpp-flag"] = commonComp.AutocompleteNone
flagCompletion["creds"] = commonComp.AutocompleteNone
flagCompletion["cw"] = commonComp.AutocompleteNone
flagCompletion["env"] = commonComp.AutocompleteNone
flagCompletion["file"] = commonComp.AutocompleteDefault
flagCompletion["format"] = commonComp.AutocompleteNone
@ -306,6 +311,7 @@ func GetBudFlagsCompletions() commonComp.FlagCompletions {
flagCompletion["iidfile"] = commonComp.AutocompleteDefault
flagCompletion["jobs"] = commonComp.AutocompleteNone
flagCompletion["label"] = commonComp.AutocompleteNone
flagCompletion["layer-label"] = commonComp.AutocompleteNone
flagCompletion["logfile"] = commonComp.AutocompleteDefault
flagCompletion["manifest"] = commonComp.AutocompleteDefault
flagCompletion["os"] = commonComp.AutocompleteNone

View File

@ -16,9 +16,11 @@ import (
"github.com/containerd/containerd/platforms"
"github.com/containers/buildah/define"
mkcwtypes "github.com/containers/buildah/internal/mkcw/types"
internalParse "github.com/containers/buildah/internal/parse"
internalUtil "github.com/containers/buildah/internal/util"
"github.com/containers/buildah/internal/tmpdir"
"github.com/containers/buildah/pkg/sshagent"
"github.com/containers/common/pkg/auth"
"github.com/containers/common/pkg/config"
"github.com/containers/common/pkg/parse"
"github.com/containers/image/v5/docker/reference"
@ -68,11 +70,6 @@ func RepoNamesToNamedReferences(destList []string) ([]reference.Named, error) {
return result, nil
}
// CleanCacheMount gets the cache parent created by `--mount=type=cache` and removes it.
func CleanCacheMount() error {
return internalParse.CleanCacheMount()
}
// CommonBuildOptions parses the build options from the bud cli
func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) {
return CommonBuildOptionsFromFlagSet(c.Flags(), c.Flag)
@ -449,9 +446,13 @@ func SystemContextFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name strin
func getAuthFile(authfile string) string {
if authfile != "" {
return authfile
absAuthfile, err := filepath.Abs(authfile)
if err == nil {
return absAuthfile
}
logrus.Warnf("ignoring passed-in auth file path, evaluating it: %v", err)
}
return os.Getenv("REGISTRY_AUTH_FILE")
return auth.GetDefaultAuthFile()
}
// PlatformFromOptions parses the operating system (os) and architecture (arch)
@ -635,6 +636,76 @@ func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) {
return define.BuildOutputOption{Path: path, IsDir: isDir, IsStdout: isStdout}, nil
}
// GetConfidentialWorkloadOptions parses a confidential workload settings
// argument, which controls both whether or not we produce an image that
// expects to be run using krun, and how we handle things like encrypting
// the disk image that the container image will contain.
func GetConfidentialWorkloadOptions(arg string) (define.ConfidentialWorkloadOptions, error) {
options := define.ConfidentialWorkloadOptions{
TempDir: GetTempDir(),
}
defaults := options
for _, option := range strings.Split(arg, ",") {
var err error
switch {
case strings.HasPrefix(option, "type="):
options.TeeType = define.TeeType(strings.ToLower(strings.TrimPrefix(option, "type=")))
switch options.TeeType {
case define.SEV, define.SNP, mkcwtypes.SEV_NO_ES:
default:
return options, fmt.Errorf("parsing type= value %q: unrecognized value", options.TeeType)
}
case strings.HasPrefix(option, "attestation_url="), strings.HasPrefix(option, "attestation-url="):
options.Convert = true
options.AttestationURL = strings.TrimPrefix(option, "attestation_url=")
if options.AttestationURL == option {
options.AttestationURL = strings.TrimPrefix(option, "attestation-url=")
}
case strings.HasPrefix(option, "passphrase="), strings.HasPrefix(option, "passphrase="):
options.Convert = true
options.DiskEncryptionPassphrase = strings.TrimPrefix(option, "passphrase=")
case strings.HasPrefix(option, "workload_id="), strings.HasPrefix(option, "workload-id="):
options.WorkloadID = strings.TrimPrefix(option, "workload_id=")
if options.WorkloadID == option {
options.WorkloadID = strings.TrimPrefix(option, "workload-id=")
}
case strings.HasPrefix(option, "cpus="):
options.CPUs, err = strconv.Atoi(strings.TrimPrefix(option, "cpus="))
if err != nil {
return options, fmt.Errorf("parsing cpus= value %q: %w", strings.TrimPrefix(option, "cpus="), err)
}
case strings.HasPrefix(option, "memory="):
options.Memory, err = strconv.Atoi(strings.TrimPrefix(option, "memory="))
if err != nil {
return options, fmt.Errorf("parsing memory= value %q: %w", strings.TrimPrefix(option, "memorys"), err)
}
case option == "ignore_attestation_errors", option == "ignore-attestation-errors":
options.IgnoreAttestationErrors = true
case strings.HasPrefix(option, "ignore_attestation_errors="), strings.HasPrefix(option, "ignore-attestation-errors="):
val := strings.TrimPrefix(option, "ignore_attestation_errors=")
if val == option {
val = strings.TrimPrefix(option, "ignore-attestation-errors=")
}
options.IgnoreAttestationErrors = val == "true" || val == "yes" || val == "on" || val == "1"
case strings.HasPrefix(option, "firmware-library="), strings.HasPrefix(option, "firmware_library="):
val := strings.TrimPrefix(option, "firmware-library=")
if val == option {
val = strings.TrimPrefix(option, "firmware_library=")
}
options.FirmwareLibrary = val
case strings.HasPrefix(option, "slop="):
options.Slop = strings.TrimPrefix(option, "slop=")
default:
knownOptions := []string{"type", "attestation_url", "passphrase", "workload_id", "cpus", "memory", "firmware_library", "slop"}
return options, fmt.Errorf("expected one or more of %q as arguments for --cw, not %q", knownOptions, option)
}
}
if options != defaults && !options.Convert {
return options, fmt.Errorf("--cw arguments missing one or more of (%q, %q)", "passphrase", "attestation_url")
}
return options, nil
}
// IDMappingOptions parses the build options related to user namespaces and ID mapping.
func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) {
return IDMappingOptionsFromFlagSet(c.Flags(), c.PersistentFlags(), c.Flag)
@ -997,7 +1068,7 @@ func isValidDeviceMode(mode string) bool {
}
func GetTempDir() string {
return internalUtil.GetTempDir()
return tmpdir.GetTempDir()
}
// Secrets parses the --secret flag

View File

@ -201,8 +201,13 @@ func NewSource(paths []string) (*Source, error) {
if len(paths) == 0 {
socket = os.Getenv("SSH_AUTH_SOCK")
if socket == "" {
return nil, errors.New("$SSH_AUTH_SOCK not set")
return nil, errors.New("SSH_AUTH_SOCK not set in environment")
}
absSocket, err := filepath.Abs(socket)
if err != nil {
return nil, fmt.Errorf("evaluating SSH_AUTH_SOCK in environment: %w", err)
}
socket = absSocket
}
for _, p := range paths {
if socket != "" {

View File

@ -0,0 +1,38 @@
//go:build linux || freebsd || darwin
// +build linux freebsd darwin
package util
import (
"fmt"
"syscall"
"github.com/docker/go-units"
)
func ParseUlimit(ulimit string) (*units.Ulimit, error) {
ul, err := units.ParseUlimit(ulimit)
if err != nil {
return nil, fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", ulimit, err)
}
if ul.Hard != -1 && ul.Soft == -1 {
return ul, nil
}
rl, err := ul.GetRlimit()
if err != nil {
return nil, err
}
var limit syscall.Rlimit
if err := syscall.Getrlimit(rl.Type, &limit); err != nil {
return nil, err
}
if ul.Soft == -1 {
ul.Soft = int64(limit.Cur)
}
if ul.Hard == -1 {
ul.Hard = int64(limit.Max)
}
return ul, nil
}

View File

@ -0,0 +1,16 @@
package util
import (
"fmt"
"github.com/docker/go-units"
)
func ParseUlimit(ulimit string) (*units.Ulimit, error) {
ul, err := units.ParseUlimit(ulimit)
if err != nil {
return nil, fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", ulimit, err)
}
return ul, nil
}

View File

@ -0,0 +1,13 @@
package volumes
import (
"os"
"github.com/containers/buildah/internal/volumes"
)
// CleanCacheMount gets the cache parent created by `--mount=type=cache` and removes it.
func CleanCacheMount() error {
cacheParent := volumes.CacheParent()
return os.RemoveAll(cacheParent)
}

View File

@ -95,6 +95,10 @@ type PushOptions struct {
CompressionFormat *compression.Algorithm
// CompressionLevel specifies what compression level is used
CompressionLevel *int
// ForceCompressionFormat ensures that the compression algorithm set in
// CompressionFormat is used exclusively, and blobs of other compression
// algorithms are not reused.
ForceCompressionFormat bool
}
// Push copies the contents of the image to a new location.
@ -110,6 +114,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
libimageOptions.OciEncryptLayers = options.OciEncryptLayers
libimageOptions.CompressionFormat = options.CompressionFormat
libimageOptions.CompressionLevel = options.CompressionLevel
libimageOptions.ForceCompressionFormat = options.ForceCompressionFormat
libimageOptions.PolicyAllowStorage = true
if options.Quiet {

View File

@ -26,8 +26,8 @@ import (
"github.com/containers/buildah/copier"
"github.com/containers/buildah/define"
"github.com/containers/buildah/internal"
internalParse "github.com/containers/buildah/internal/parse"
internalUtil "github.com/containers/buildah/internal/util"
"github.com/containers/buildah/internal/volumes"
"github.com/containers/buildah/pkg/overlay"
"github.com/containers/buildah/pkg/sshagent"
"github.com/containers/buildah/util"
@ -1358,7 +1358,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
succeeded := false
defer func() {
if !succeeded {
internalParse.UnlockLockArray(mountArtifacts.TargetLocks)
volumes.UnlockLockArray(mountArtifacts.TargetLocks)
}
}()
// Add temporary copies of the contents of volume locations at the
@ -1522,7 +1522,7 @@ func (b *Builder) runSetupRunMounts(mountPoint string, mounts []string, sources
succeeded := false
defer func() {
if !succeeded {
internalParse.UnlockLockArray(targetLocks)
volumes.UnlockLockArray(targetLocks)
}
}()
for _, mount := range mounts {
@ -1626,7 +1626,7 @@ func (b *Builder) getBindMount(tokens []string, context *imageTypes.SystemContex
return nil, "", errors.New("Context Directory for current run invocation is not configured")
}
var optionMounts []specs.Mount
mount, image, err := internalParse.GetBindMount(context, tokens, contextDir, b.store, b.MountLabel, stageMountPoints, workDir)
mount, image, err := volumes.GetBindMount(context, tokens, contextDir, b.store, b.MountLabel, stageMountPoints, workDir)
if err != nil {
return nil, image, err
}
@ -1640,7 +1640,7 @@ func (b *Builder) getBindMount(tokens []string, context *imageTypes.SystemContex
func (b *Builder) getTmpfsMount(tokens []string, idMaps IDMaps) (*specs.Mount, error) {
var optionMounts []specs.Mount
mount, err := internalParse.GetTmpfsMount(tokens)
mount, err := volumes.GetTmpfsMount(tokens)
if err != nil {
return nil, err
}
@ -1953,7 +1953,7 @@ func (b *Builder) cleanupRunMounts(context *imageTypes.SystemContext, mountpoint
}
}
// unlock if any locked files from this RUN statement
internalParse.UnlockLockArray(artifacts.TargetLocks)
volumes.UnlockLockArray(artifacts.TargetLocks)
return prevErr
}

View File

@ -19,6 +19,7 @@ import (
"github.com/containers/buildah/pkg/jail"
"github.com/containers/buildah/pkg/overlay"
"github.com/containers/buildah/pkg/parse"
butil "github.com/containers/buildah/pkg/util"
"github.com/containers/buildah/util"
"github.com/containers/common/libnetwork/resolvconf"
nettypes "github.com/containers/common/libnetwork/types"
@ -559,7 +560,7 @@ func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string)
ulimit = append(defaultUlimits, ulimit...)
for _, u := range ulimit {
if ul, err = units.ParseUlimit(u); err != nil {
if ul, err = butil.ParseUlimit(u); err != nil {
return fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", u, err)
}

View File

@ -19,9 +19,10 @@ import (
"github.com/containers/buildah/copier"
"github.com/containers/buildah/define"
"github.com/containers/buildah/internal"
internalParse "github.com/containers/buildah/internal/parse"
"github.com/containers/buildah/internal/volumes"
"github.com/containers/buildah/pkg/overlay"
"github.com/containers/buildah/pkg/parse"
butil "github.com/containers/buildah/pkg/util"
"github.com/containers/buildah/util"
"github.com/containers/common/libnetwork/pasta"
"github.com/containers/common/libnetwork/resolvconf"
@ -873,7 +874,7 @@ func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string)
ulimit = append(defaultUlimits, ulimit...)
for _, u := range ulimit {
if ul, err = units.ParseUlimit(u); err != nil {
if ul, err = butil.ParseUlimit(u); err != nil {
return fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", u, err)
}
@ -1253,7 +1254,7 @@ func checkIdsGreaterThan5(ids []specs.LinuxIDMapping) bool {
// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??).
func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir string) (*specs.Mount, *lockfile.LockFile, error) {
var optionMounts []specs.Mount
mount, targetLock, err := internalParse.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints, workDir)
mount, targetLock, err := volumes.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints, workDir)
if err != nil {
return nil, nil, err
}

View File

@ -5,32 +5,9 @@ package util
import (
"os"
"sync"
"syscall"
)
type hardlinkDeviceAndInode struct {
device, inode uint64
}
type HardlinkChecker struct {
hardlinks sync.Map
}
func (h *HardlinkChecker) Check(fi os.FileInfo) string {
if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 {
if name, ok := h.hardlinks.Load(makeHardlinkDeviceAndInode(st)); ok && name.(string) != "" {
return name.(string)
}
}
return ""
}
func (h *HardlinkChecker) Add(fi os.FileInfo, name string) {
if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 {
h.hardlinks.Store(makeHardlinkDeviceAndInode(st), name)
}
}
func UID(st os.FileInfo) int {
return int(st.Sys().(*syscall.Stat_t).Uid)
}

View File

@ -1,3 +1,4 @@
//go:build !linux && !darwin
// +build !linux,!darwin
package util
@ -6,15 +7,6 @@ import (
"os"
)
type HardlinkChecker struct {
}
func (h *HardlinkChecker) Check(fi os.FileInfo) string {
return ""
}
func (h *HardlinkChecker) Add(fi os.FileInfo, name string) {
}
func UID(st os.FileInfo) int {
return 0
}

View File

@ -1,4 +1,4 @@
package version
// Version is the version of the build.
const Version = "0.56.0-dev"
const Version = "0.56.0"

View File

@ -6,12 +6,12 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 27
VersionMinor = 28
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = "-dev"
VersionDev = ""
)
// Version is the specification version that the package types support.

16
vendor/github.com/containers/luksy/.cirrus.yml generated vendored Normal file
View File

@ -0,0 +1,16 @@
docker_builder:
name: CI
env:
HOME: /root
DEBIAN_FRONTEND: noninteractive
setup_script: |
apt-get -q update
apt-get -q install -y bats cryptsetup golang
go version
make
unit_test_script:
go test -v -cover
defaults_script: |
bats -f defaults ./tests
aes_script: |
bats -f aes ./tests

2
vendor/github.com/containers/luksy/.dockerignore generated vendored Normal file
View File

@ -0,0 +1,2 @@
lukstool
lukstool.test

21
vendor/github.com/containers/luksy/.gitignore generated vendored Normal file
View File

@ -0,0 +1,21 @@
# If you prefer the allow list template instead of the deny list, see community template:
# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
#
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
# Go workspace file
go.work

7
vendor/github.com/containers/luksy/Dockerfile generated vendored Normal file
View File

@ -0,0 +1,7 @@
FROM registry.fedoraproject.org/fedora
RUN dnf -y install golang make
WORKDIR /go/src/github.com/containers/luksy/
COPY / /go/src/github.com/containers/luksy/
RUN make clean all
FROM registry.fedoraproject.org/fedora-minimal
COPY --from=0 /go/src/github.com/containers/luksy/ /usr/local/bin/

201
vendor/github.com/containers/luksy/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

14
vendor/github.com/containers/luksy/Makefile generated vendored Normal file
View File

@ -0,0 +1,14 @@
GO = go
BATS = bats
all: luksy
luksy: cmd/luksy/*.go *.go
$(GO) build -o luksy ./cmd/luksy
clean:
$(RM) luksy luksy.test
test:
$(GO) test
$(BATS) ./tests

10
vendor/github.com/containers/luksy/README.md generated vendored Normal file
View File

@ -0,0 +1,10 @@
luksy: offline encryption/decryption using LUKS formats [![Cirrus CI Status](https://img.shields.io/cirrus/github/containers/luksy/main)](https://cirrus-ci.com/github/containers/luksy/main)
-
luksy implements encryption and decryption using LUKSv1 and LUKSv2 formats.
Think of it as a clunkier cousin of gzip/bzip2/xz that doesn't actually produce
smaller output than input, but it encrypts, and that's nice.
* The main goal is to be able to encrypt/decrypt when we don't have access to
the Linux device mapper. Duplicating functions of cryptsetup that it can
perform without accessing the Linux device mapper is not a priority.
* If you can use cryptsetup instead, use cryptsetup instead.

244
vendor/github.com/containers/luksy/decrypt.go generated vendored Normal file
View File

@ -0,0 +1,244 @@
package luksy
import (
"bytes"
"errors"
"fmt"
"os"
"strconv"
"golang.org/x/crypto/argon2"
"golang.org/x/crypto/pbkdf2"
)
// Decrypt attempts to verify the specified password using information from the
// header and read from the specified file.
//
// Returns a function which will decrypt payload blocks in succession, the size
// of chunks of data that the function expects, the offset in the file where
// the payload begins, and the size of the payload.
func (h V1Header) Decrypt(password string, f *os.File) (func([]byte) ([]byte, error), int, int64, int64, error) {
st, err := f.Stat()
if err != nil {
return nil, -1, -1, -1, err
}
hasher, err := hasherByName(h.HashSpec())
if err != nil {
return nil, -1, -1, -1, fmt.Errorf("unsupported digest algorithm %q: %w", h.HashSpec(), err)
}
activeKeys := 0
for k := 0; k < v1NumKeys; k++ {
keyslot, err := h.KeySlot(k)
if err != nil {
return nil, -1, -1, -1, fmt.Errorf("reading key slot %d: %w", k, err)
}
active, err := keyslot.Active()
if err != nil {
return nil, -1, -1, -1, fmt.Errorf("checking if key slot %d is active: %w", k, err)
}
if !active {
continue
}
activeKeys++
passwordDerived := pbkdf2.Key([]byte(password), keyslot.KeySlotSalt(), int(keyslot.Iterations()), int(h.KeyBytes()), hasher)
striped := make([]byte, h.KeyBytes()*keyslot.Stripes())
n, err := f.ReadAt(striped, int64(keyslot.KeyMaterialOffset())*V1SectorSize)
if err != nil {
return nil, -1, -1, -1, fmt.Errorf("reading diffuse material for keyslot %d: %w", k, err)
}
if n != len(striped) {
return nil, -1, -1, -1, fmt.Errorf("short read while reading diffuse material for keyslot %d: expected %d, got %d", k, len(striped), n)
}
splitKey, err := v1decrypt(h.CipherName(), h.CipherMode(), 0, passwordDerived, striped, V1SectorSize, false)
if err != nil {
fmt.Fprintf(os.Stderr, "error attempting to decrypt main key: %v\n", err)
continue
}
mkCandidate, err := afMerge(splitKey, hasher(), int(h.KeyBytes()), int(keyslot.Stripes()))
if err != nil {
fmt.Fprintf(os.Stderr, "error attempting to compute main key: %v\n", err)
continue
}
mkcandidateDerived := pbkdf2.Key(mkCandidate, h.MKDigestSalt(), int(h.MKDigestIter()), v1DigestSize, hasher)
ivTweak := 0
decryptStream := func(ciphertext []byte) ([]byte, error) {
plaintext, err := v1decrypt(h.CipherName(), h.CipherMode(), ivTweak, mkCandidate, ciphertext, V1SectorSize, false)
ivTweak += len(ciphertext) / V1SectorSize
return plaintext, err
}
if bytes.Equal(mkcandidateDerived, h.MKDigest()) {
payloadOffset := int64(h.PayloadOffset() * V1SectorSize)
return decryptStream, V1SectorSize, payloadOffset, st.Size() - payloadOffset, nil
}
}
if activeKeys == 0 {
return nil, -1, -1, -1, errors.New("no passwords set on LUKS1 volume")
}
return nil, -1, -1, -1, errors.New("decryption error: incorrect password")
}
// Decrypt attempts to verify the specified password using information from the
// header, JSON block, and read from the specified file.
//
// Returns a function which will decrypt payload blocks in succession, the size
// of chunks of data that the function expects, the offset in the file where
// the payload begins, and the size of the payload.
func (h V2Header) Decrypt(password string, f *os.File, j V2JSON) (func([]byte) ([]byte, error), int, int64, int64, error) {
foundDigests := 0
for d, digest := range j.Digests {
if digest.Type != "pbkdf2" {
continue
}
if digest.V2JSONDigestPbkdf2 == nil {
return nil, -1, -1, -1, fmt.Errorf("digest %q is corrupt: no pbkdf2 parameters", d)
}
foundDigests++
if len(digest.Segments) == 0 || len(digest.Digest) == 0 {
continue
}
payloadOffset := int64(-1)
payloadSectorSize := V1SectorSize
payloadEncryption := ""
payloadSize := int64(0)
ivTweak := 0
for _, segmentID := range digest.Segments {
segment, ok := j.Segments[segmentID]
if !ok {
continue // well, that was misleading
}
if segment.Type != "crypt" {
continue
}
tmp, err := strconv.ParseInt(segment.Offset, 10, 64)
if err != nil {
continue
}
payloadOffset = tmp
if segment.Size == "dynamic" {
st, err := f.Stat()
if err != nil {
continue
}
payloadSize = st.Size() - payloadOffset
} else {
payloadSize, err = strconv.ParseInt(segment.Size, 10, 64)
if err != nil {
continue
}
}
payloadSectorSize = segment.SectorSize
payloadEncryption = segment.Encryption
ivTweak = segment.IVTweak
break
}
if payloadEncryption == "" {
continue
}
activeKeys := 0
for k, keyslot := range j.Keyslots {
if keyslot.Priority != nil && *keyslot.Priority == V2JSONKeyslotPriorityIgnore {
continue
}
applicable := true
if len(digest.Keyslots) > 0 {
applicable = false
for i := 0; i < len(digest.Keyslots); i++ {
if k == digest.Keyslots[i] {
applicable = true
break
}
}
}
if !applicable {
continue
}
if keyslot.Type != "luks2" {
continue
}
if keyslot.V2JSONKeyslotLUKS2 == nil {
return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt", k)
}
if keyslot.V2JSONKeyslotLUKS2.AF.Type != "luks1" {
continue
}
if keyslot.V2JSONKeyslotLUKS2.AF.V2JSONAFLUKS1 == nil {
return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: no AF parameters", k)
}
if keyslot.Area.Type != "raw" {
return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: key data area is not raw", k)
}
if keyslot.Area.KeySize*V2SectorSize < keyslot.KeySize*keyslot.AF.Stripes {
return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: key data area is too small (%d < %d)", k, keyslot.Area.KeySize*V2SectorSize, keyslot.KeySize*keyslot.AF.Stripes)
}
var passwordDerived []byte
switch keyslot.V2JSONKeyslotLUKS2.Kdf.Type {
default:
continue
case "pbkdf2":
if keyslot.V2JSONKeyslotLUKS2.Kdf.V2JSONKdfPbkdf2 == nil {
return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: no pbkdf2 parameters", k)
}
hasher, err := hasherByName(keyslot.Kdf.Hash)
if err != nil {
return nil, -1, -1, -1, fmt.Errorf("unsupported digest algorithm %q: %w", keyslot.Kdf.Hash, err)
}
passwordDerived = pbkdf2.Key([]byte(password), keyslot.Kdf.Salt, keyslot.Kdf.Iterations, keyslot.KeySize, hasher)
case "argon2i":
if keyslot.V2JSONKeyslotLUKS2.Kdf.V2JSONKdfArgon2i == nil {
return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: no argon2i parameters", k)
}
passwordDerived = argon2.Key([]byte(password), keyslot.Kdf.Salt, uint32(keyslot.Kdf.Time), uint32(keyslot.Kdf.Memory), uint8(keyslot.Kdf.CPUs), uint32(keyslot.KeySize))
case "argon2id":
if keyslot.V2JSONKeyslotLUKS2.Kdf.V2JSONKdfArgon2i == nil {
return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: no argon2id parameters", k)
}
passwordDerived = argon2.IDKey([]byte(password), keyslot.Kdf.Salt, uint32(keyslot.Kdf.Time), uint32(keyslot.Kdf.Memory), uint8(keyslot.Kdf.CPUs), uint32(keyslot.KeySize))
}
striped := make([]byte, keyslot.KeySize*keyslot.AF.Stripes)
n, err := f.ReadAt(striped, int64(keyslot.Area.Offset))
if err != nil {
return nil, -1, -1, -1, fmt.Errorf("reading diffuse material for keyslot %q: %w", k, err)
}
if n != len(striped) {
return nil, -1, -1, -1, fmt.Errorf("short read while reading diffuse material for keyslot %q: expected %d, got %d", k, len(striped), n)
}
splitKey, err := v2decrypt(keyslot.Area.Encryption, 0, passwordDerived, striped, V1SectorSize, false)
if err != nil {
fmt.Fprintf(os.Stderr, "error attempting to decrypt main key: %v\n", err)
continue
}
afhasher, err := hasherByName(keyslot.AF.Hash)
if err != nil {
return nil, -1, -1, -1, fmt.Errorf("unsupported digest algorithm %q: %w", keyslot.AF.Hash, err)
}
mkCandidate, err := afMerge(splitKey, afhasher(), int(keyslot.KeySize), int(keyslot.AF.Stripes))
if err != nil {
fmt.Fprintf(os.Stderr, "error attempting to compute main key: %v\n", err)
continue
}
digester, err := hasherByName(digest.Hash)
if err != nil {
return nil, -1, -1, -1, fmt.Errorf("unsupported digest algorithm %q: %w", digest.Hash, err)
}
mkcandidateDerived := pbkdf2.Key(mkCandidate, digest.Salt, digest.Iterations, len(digest.Digest), digester)
decryptStream := func(ciphertext []byte) ([]byte, error) {
plaintext, err := v2decrypt(payloadEncryption, ivTweak, mkCandidate, ciphertext, payloadSectorSize, true)
ivTweak += len(ciphertext) / payloadSectorSize
return plaintext, err
}
if bytes.Equal(mkcandidateDerived, digest.Digest) {
return decryptStream, payloadSectorSize, payloadOffset, payloadSize, nil
}
activeKeys++
}
if activeKeys == 0 {
return nil, -1, -1, -1, fmt.Errorf("no passwords set on LUKS2 volume for digest %q", d)
}
}
if foundDigests == 0 {
return nil, -1, -1, -1, errors.New("no usable password-verification digests set on LUKS2 volume")
}
return nil, -1, -1, -1, errors.New("decryption error: incorrect password")
}

421
vendor/github.com/containers/luksy/encrypt.go generated vendored Normal file
View File

@ -0,0 +1,421 @@
package luksy
import (
"crypto/rand"
"encoding/json"
"errors"
"fmt"
"strconv"
"strings"
"github.com/google/uuid"
"golang.org/x/crypto/argon2"
"golang.org/x/crypto/pbkdf2"
)
// EncryptV1 prepares to encrypt data using one or more passwords and the
// specified cipher (or a default, if the specified cipher is "").
//
// Returns a fixed LUKSv1 header which contains keying information, a function
// which will encrypt blocks of data in succession, and the size of chunks of
// data that it expects.
func EncryptV1(password []string, cipher string) ([]byte, func([]byte) ([]byte, error), int, error) {
if len(password) == 0 {
return nil, nil, -1, errors.New("at least one password is required")
}
if len(password) > v1NumKeys {
return nil, nil, -1, fmt.Errorf("attempted to use %d passwords, only %d possible", len(password), v1NumKeys)
}
if cipher == "" {
cipher = "aes-xts-plain64"
}
salt := make([]byte, v1SaltSize)
n, err := rand.Read(salt)
if err != nil {
return nil, nil, -1, fmt.Errorf("reading random data: %w", err)
}
if n != len(salt) {
return nil, nil, -1, errors.New("short read")
}
cipherSpec := strings.SplitN(cipher, "-", 3)
if len(cipherSpec) != 3 || len(cipherSpec[0]) == 0 || len(cipherSpec[1]) == 0 || len(cipherSpec[2]) == 0 {
return nil, nil, -1, fmt.Errorf("invalid cipher %q", cipher)
}
var h V1Header
if err := h.SetMagic(V1Magic); err != nil {
return nil, nil, -1, fmt.Errorf("setting magic to v1: %w", err)
}
if err := h.SetVersion(1); err != nil {
return nil, nil, -1, fmt.Errorf("setting version to 1: %w", err)
}
h.SetCipherName(cipherSpec[0])
h.SetCipherMode(cipherSpec[1] + "-" + cipherSpec[2])
h.SetHashSpec("sha256")
h.SetKeyBytes(32)
if cipherSpec[1] == "xts" {
h.SetKeyBytes(64)
}
h.SetMKDigestSalt(salt)
h.SetMKDigestIter(V1Stripes)
h.SetUUID(uuid.NewString())
mkey := make([]byte, h.KeyBytes())
n, err = rand.Read(mkey)
if err != nil {
return nil, nil, -1, fmt.Errorf("reading random data: %w", err)
}
if n != len(mkey) {
return nil, nil, -1, errors.New("short read")
}
hasher, err := hasherByName(h.HashSpec())
if err != nil {
return nil, nil, -1, errors.New("internal error")
}
mkdigest := pbkdf2.Key(mkey, h.MKDigestSalt(), int(h.MKDigestIter()), v1DigestSize, hasher)
h.SetMKDigest(mkdigest)
headerLength := roundUpToMultiple(v1HeaderStructSize, V1AlignKeyslots)
iterations := IterationsPBKDF2(salt, int(h.KeyBytes()), hasher)
var stripes [][]byte
ksSalt := make([]byte, v1KeySlotSaltLength)
for i := 0; i < v1NumKeys; i++ {
n, err = rand.Read(ksSalt)
if err != nil {
return nil, nil, -1, fmt.Errorf("reading random data: %w", err)
}
if n != len(ksSalt) {
return nil, nil, -1, errors.New("short read")
}
var keyslot V1KeySlot
keyslot.SetActive(i < len(password))
keyslot.SetIterations(uint32(iterations))
keyslot.SetStripes(V1Stripes)
keyslot.SetKeySlotSalt(ksSalt)
if i < len(password) {
splitKey, err := afSplit(mkey, hasher(), int(h.MKDigestIter()))
if err != nil {
return nil, nil, -1, fmt.Errorf("splitting key: %w", err)
}
passwordDerived := pbkdf2.Key([]byte(password[i]), keyslot.KeySlotSalt(), int(keyslot.Iterations()), int(h.KeyBytes()), hasher)
striped, err := v1encrypt(h.CipherName(), h.CipherMode(), 0, passwordDerived, splitKey, V1SectorSize, false)
if err != nil {
return nil, nil, -1, fmt.Errorf("encrypting split key with password: %w", err)
}
if len(striped) != len(mkey)*int(keyslot.Stripes()) {
return nil, nil, -1, fmt.Errorf("internal error: got %d stripe bytes, expected %d", len(striped), len(mkey)*int(keyslot.Stripes()))
}
stripes = append(stripes, striped)
}
keyslot.SetKeyMaterialOffset(uint32(headerLength / V1SectorSize))
if err := h.SetKeySlot(i, keyslot); err != nil {
return nil, nil, -1, fmt.Errorf("internal error: setting value for key slot %d: %w", i, err)
}
headerLength += len(mkey) * int(keyslot.Stripes())
headerLength = roundUpToMultiple(headerLength, V1AlignKeyslots)
}
headerLength = roundUpToMultiple(headerLength, V1SectorSize)
h.SetPayloadOffset(uint32(headerLength / V1SectorSize))
head := make([]byte, headerLength)
offset := copy(head, h[:])
offset = roundUpToMultiple(offset, V1AlignKeyslots)
for _, stripe := range stripes {
copy(head[offset:], stripe)
offset = roundUpToMultiple(offset+len(stripe), V1AlignKeyslots)
}
ivTweak := 0
encryptStream := func(plaintext []byte) ([]byte, error) {
ciphertext, err := v1encrypt(h.CipherName(), h.CipherMode(), ivTweak, mkey, plaintext, V1SectorSize, true)
ivTweak += len(plaintext) / V1SectorSize
return ciphertext, err
}
return head, encryptStream, V1SectorSize, nil
}
// EncryptV2 prepares to encrypt data using one or more passwords and the
// specified cipher (or a default, if the specified cipher is "").
//
// Returns a fixed LUKSv2 header which contains keying information, a
// function which will encrypt blocks of data in succession, and the size of
// chunks of data that it expects.
func EncryptV2(password []string, cipher string, payloadSectorSize int) ([]byte, func([]byte) ([]byte, error), int, error) {
if len(password) == 0 {
return nil, nil, -1, errors.New("at least one password is required")
}
if cipher == "" {
cipher = "aes-xts-plain64"
}
cipherSpec := strings.SplitN(cipher, "-", 3)
if len(cipherSpec) != 3 || len(cipherSpec[0]) == 0 || len(cipherSpec[1]) == 0 || len(cipherSpec[2]) == 0 {
return nil, nil, -1, fmt.Errorf("invalid cipher %q", cipher)
}
if payloadSectorSize == 0 {
payloadSectorSize = V2SectorSize
}
switch payloadSectorSize {
default:
return nil, nil, -1, fmt.Errorf("invalid sector size %d", payloadSectorSize)
case 512, 1024, 2048, 4096:
}
headerSalts := make([]byte, v1SaltSize*3)
n, err := rand.Read(headerSalts)
if err != nil {
return nil, nil, -1, err
}
if n != len(headerSalts) {
return nil, nil, -1, errors.New("short read")
}
hSalt1 := headerSalts[:v1SaltSize]
hSalt2 := headerSalts[v1SaltSize : v1SaltSize*2]
mkeySalt := headerSalts[v1SaltSize*2:]
roundHeaderSize := func(size int) (int, error) {
switch {
case size < 0x4000:
return 0x4000, nil
case size < 0x8000:
return 0x8000, nil
case size < 0x10000:
return 0x10000, nil
case size < 0x20000:
return 0x20000, nil
case size < 0x40000:
return 0x40000, nil
case size < 0x80000:
return 0x80000, nil
case size < 0x100000:
return 0x100000, nil
case size < 0x200000:
return 0x200000, nil
case size < 0x400000:
return 0x400000, nil
}
return 0, fmt.Errorf("internal error: unsupported header size %d", size)
}
var h1, h2 V2Header
if err := h1.SetMagic(V2Magic1); err != nil {
return nil, nil, -1, fmt.Errorf("setting magic to v2: %w", err)
}
if err := h2.SetMagic(V2Magic2); err != nil {
return nil, nil, -1, fmt.Errorf("setting magic to v2: %w", err)
}
if err := h1.SetVersion(2); err != nil {
return nil, nil, -1, fmt.Errorf("setting version to 2: %w", err)
}
if err := h2.SetVersion(2); err != nil {
return nil, nil, -1, fmt.Errorf("setting version to 2: %w", err)
}
h1.SetSequenceID(1)
h2.SetSequenceID(1)
h1.SetLabel("")
h2.SetLabel("")
h1.SetChecksumAlgorithm("sha256")
h2.SetChecksumAlgorithm("sha256")
h1.SetSalt(hSalt1)
h2.SetSalt(hSalt2)
uuidString := uuid.NewString()
h1.SetUUID(uuidString)
h2.SetUUID(uuidString)
h1.SetHeaderOffset(0)
h2.SetHeaderOffset(0)
h1.SetChecksum(nil)
h2.SetChecksum(nil)
mkey := make([]byte, 32)
if cipherSpec[1] == "xts" {
mkey = make([]byte, 64)
}
n, err = rand.Read(mkey)
if err != nil {
return nil, nil, -1, fmt.Errorf("reading random data: %w", err)
}
if n != len(mkey) {
return nil, nil, -1, errors.New("short read")
}
tuningSalt := make([]byte, v1SaltSize)
hasher, err := hasherByName(h1.ChecksumAlgorithm())
if err != nil {
return nil, nil, -1, errors.New("internal error")
}
iterations := IterationsPBKDF2(tuningSalt, len(mkey), hasher)
timeCost := 1
threadsCost := 4
memoryCost := MemoryCostArgon2(tuningSalt, len(mkey), timeCost, threadsCost)
priority := V2JSONKeyslotPriorityNormal
var stripes [][]byte
var keyslots []V2JSONKeyslot
mdigest := pbkdf2.Key(mkey, mkeySalt, iterations, len(hasher().Sum([]byte{})), hasher)
digest0 := V2JSONDigest{
Type: "pbkdf2",
Salt: mkeySalt,
Digest: mdigest,
Segments: []string{"0"},
V2JSONDigestPbkdf2: &V2JSONDigestPbkdf2{
Hash: h1.ChecksumAlgorithm(),
Iterations: iterations,
},
}
for i := range password {
keyslotSalt := make([]byte, v1SaltSize)
n, err := rand.Read(keyslotSalt)
if err != nil {
return nil, nil, -1, err
}
if n != len(keyslotSalt) {
return nil, nil, -1, errors.New("short read")
}
key := argon2.Key([]byte(password[i]), keyslotSalt, uint32(timeCost), uint32(memoryCost), uint8(threadsCost), uint32(len(mkey)))
split, err := afSplit(mkey, hasher(), V2Stripes)
if err != nil {
return nil, nil, -1, fmt.Errorf("splitting: %w", err)
}
striped, err := v2encrypt(cipher, 0, key, split, V1SectorSize, false)
if err != nil {
return nil, nil, -1, fmt.Errorf("encrypting: %w", err)
}
stripes = append(stripes, striped)
keyslot := V2JSONKeyslot{
Type: "luks2",
KeySize: len(mkey),
Area: V2JSONArea{
Type: "raw",
Offset: 10000000, // gets updated later
Size: int64(roundUpToMultiple(len(striped), V2AlignKeyslots)),
V2JSONAreaRaw: &V2JSONAreaRaw{
Encryption: cipher,
KeySize: len(key),
},
},
Priority: &priority,
V2JSONKeyslotLUKS2: &V2JSONKeyslotLUKS2{
AF: V2JSONAF{
Type: "luks1",
V2JSONAFLUKS1: &V2JSONAFLUKS1{
Stripes: V2Stripes,
Hash: h1.ChecksumAlgorithm(),
},
},
Kdf: V2JSONKdf{
Type: "argon2i",
Salt: keyslotSalt,
V2JSONKdfArgon2i: &V2JSONKdfArgon2i{
Time: timeCost,
Memory: memoryCost,
CPUs: threadsCost,
},
},
},
}
keyslots = append(keyslots, keyslot)
digest0.Keyslots = append(digest0.Keyslots, strconv.Itoa(i))
}
segment0 := V2JSONSegment{
Type: "crypt",
Offset: "10000000", // gets updated later
Size: "dynamic",
V2JSONSegmentCrypt: &V2JSONSegmentCrypt{
IVTweak: 0,
Encryption: cipher,
SectorSize: payloadSectorSize,
},
}
j := V2JSON{
Config: V2JSONConfig{},
Keyslots: map[string]V2JSONKeyslot{},
Digests: map[string]V2JSONDigest{},
Segments: map[string]V2JSONSegment{},
Tokens: map[string]V2JSONToken{},
}
rebuild:
j.Digests["0"] = digest0
j.Segments["0"] = segment0
encodedJSON, err := json.Marshal(j)
if err != nil {
return nil, nil, -1, err
}
headerPlusPaddedJsonSize, err := roundHeaderSize(int(V2SectorSize) /* binary header */ + len(encodedJSON) + 1)
if err != nil {
return nil, nil, -1, err
}
if j.Config.JsonSize != headerPlusPaddedJsonSize-V2SectorSize {
j.Config.JsonSize = headerPlusPaddedJsonSize - V2SectorSize
goto rebuild
}
if h1.HeaderSize() != uint64(headerPlusPaddedJsonSize) {
h1.SetHeaderSize(uint64(headerPlusPaddedJsonSize))
h2.SetHeaderSize(uint64(headerPlusPaddedJsonSize))
h1.SetHeaderOffset(0)
h2.SetHeaderOffset(uint64(headerPlusPaddedJsonSize))
goto rebuild
}
keyslotsOffset := h2.HeaderOffset() * 2
maxKeys := len(password)
if maxKeys < 64 {
maxKeys = 64
}
for i := 0; i < len(password); i++ {
oldOffset := keyslots[i].Area.Offset
keyslots[i].Area.Offset = int64(keyslotsOffset) + int64(roundUpToMultiple(len(mkey)*V2Stripes, V2AlignKeyslots))*int64(i)
j.Keyslots[strconv.Itoa(i)] = keyslots[i]
if keyslots[i].Area.Offset != oldOffset {
goto rebuild
}
}
keyslotsSize := roundUpToMultiple(len(mkey)*V2Stripes, V2AlignKeyslots) * maxKeys
if j.Config.KeyslotsSize != keyslotsSize {
j.Config.KeyslotsSize = keyslotsSize
goto rebuild
}
segmentOffsetInt := roundUpToMultiple(int(keyslotsOffset)+j.Config.KeyslotsSize, V2SectorSize)
segmentOffset := strconv.Itoa(segmentOffsetInt)
if segment0.Offset != segmentOffset {
segment0.Offset = segmentOffset
goto rebuild
}
d1 := hasher()
h1.SetChecksum(nil)
d1.Write(h1[:])
d1.Write(encodedJSON)
zeropad := make([]byte, headerPlusPaddedJsonSize-len(h1)-len(encodedJSON))
d1.Write(zeropad)
h1.SetChecksum(d1.Sum(nil))
d2 := hasher()
h2.SetChecksum(nil)
d2.Write(h2[:])
d2.Write(encodedJSON)
d1.Write(zeropad)
h2.SetChecksum(d2.Sum(nil))
head := make([]byte, segmentOffsetInt)
copy(head, h1[:])
copy(head[V2SectorSize:], encodedJSON)
copy(head[h2.HeaderOffset():], h2[:])
copy(head[h2.HeaderOffset()+V2SectorSize:], encodedJSON)
for i := 0; i < len(password); i++ {
iAsString := strconv.Itoa(i)
copy(head[j.Keyslots[iAsString].Area.Offset:], stripes[i])
}
ivTweak := 0
encryptStream := func(plaintext []byte) ([]byte, error) {
ciphertext, err := v2encrypt(cipher, ivTweak, mkey, plaintext, payloadSectorSize, true)
ivTweak += len(plaintext) / payloadSectorSize
return ciphertext, err
}
return head, encryptStream, segment0.SectorSize, nil
}

537
vendor/github.com/containers/luksy/encryption.go generated vendored Normal file
View File

@ -0,0 +1,537 @@
package luksy
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/binary"
"errors"
"fmt"
"hash"
"io"
"strings"
"github.com/aead/serpent"
"golang.org/x/crypto/cast5"
"golang.org/x/crypto/ripemd160"
"golang.org/x/crypto/twofish"
"golang.org/x/crypto/xts"
)
func v1encrypt(cipherName, cipherMode string, ivTweak int, key []byte, plaintext []byte, sectorSize int, bulk bool) ([]byte, error) {
var err error
var newBlockCipher func([]byte) (cipher.Block, error)
ciphertext := make([]byte, len(plaintext))
switch cipherName {
case "aes":
newBlockCipher = aes.NewCipher
case "twofish":
newBlockCipher = func(key []byte) (cipher.Block, error) { return twofish.NewCipher(key) }
case "cast5":
newBlockCipher = func(key []byte) (cipher.Block, error) { return cast5.NewCipher(key) }
case "serpent":
newBlockCipher = serpent.NewCipher
default:
return nil, fmt.Errorf("unsupported cipher %s", cipherName)
}
if sectorSize == 0 {
sectorSize = V1SectorSize
}
switch sectorSize {
default:
return nil, fmt.Errorf("invalid sector size %d", sectorSize)
case 512, 1024, 2048, 4096:
}
switch cipherMode {
case "ecb":
cipher, err := newBlockCipher(key)
if err != nil {
return nil, fmt.Errorf("initializing encryption: %w", err)
}
for processed := 0; processed < len(plaintext); processed += cipher.BlockSize() {
blockLeft := sectorSize
if processed+blockLeft > len(plaintext) {
blockLeft = len(plaintext) - processed
}
cipher.Encrypt(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft])
}
case "cbc-plain":
block, err := newBlockCipher(key)
if err != nil {
return nil, fmt.Errorf("initializing encryption: %w", err)
}
for processed := 0; processed < len(plaintext); processed += sectorSize {
blockLeft := sectorSize
if processed+blockLeft > len(plaintext) {
blockLeft = len(plaintext) - processed
}
ivValue := processed/sectorSize + ivTweak
if bulk { // iv_large_sectors is not being used
ivValue *= sectorSize / V1SectorSize
}
iv0 := make([]byte, block.BlockSize())
binary.LittleEndian.PutUint32(iv0, uint32(ivValue))
cipher := cipher.NewCBCEncrypter(block, iv0)
cipher.CryptBlocks(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft])
}
case "cbc-plain64":
block, err := newBlockCipher(key)
if err != nil {
return nil, fmt.Errorf("initializing encryption: %w", err)
}
for processed := 0; processed < len(plaintext); processed += sectorSize {
blockLeft := sectorSize
if processed+blockLeft > len(plaintext) {
blockLeft = len(plaintext) - processed
}
ivValue := processed/sectorSize + ivTweak
if bulk { // iv_large_sectors is not being used
ivValue *= sectorSize / V1SectorSize
}
iv0 := make([]byte, block.BlockSize())
binary.LittleEndian.PutUint64(iv0, uint64(ivValue))
cipher := cipher.NewCBCEncrypter(block, iv0)
cipher.CryptBlocks(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft])
}
case "cbc-essiv:sha256":
hasherName := strings.TrimPrefix(cipherMode, "cbc-essiv:")
hasher, err := hasherByName(hasherName)
if err != nil {
return nil, fmt.Errorf("initializing encryption using hash %s: %w", hasherName, err)
}
h := hasher()
h.Write(key)
makeiv, err := newBlockCipher(h.Sum(nil))
if err != nil {
return nil, fmt.Errorf("initializing encryption: %w", err)
}
block, err := newBlockCipher(key)
if err != nil {
return nil, fmt.Errorf("initializing encryption: %w", err)
}
for processed := 0; processed < len(plaintext); processed += sectorSize {
blockLeft := sectorSize
if processed+blockLeft > len(plaintext) {
blockLeft = len(plaintext) - processed
}
ivValue := (processed/sectorSize + ivTweak)
if bulk { // iv_large_sectors is not being used
ivValue *= sectorSize / V1SectorSize
}
plain0 := make([]byte, makeiv.BlockSize())
binary.LittleEndian.PutUint64(plain0, uint64(ivValue))
iv0 := make([]byte, makeiv.BlockSize())
makeiv.Encrypt(iv0, plain0)
cipher := cipher.NewCBCEncrypter(block, iv0)
cipher.CryptBlocks(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft])
}
case "xts-plain":
cipher, err := xts.NewCipher(newBlockCipher, key)
if err != nil {
return nil, fmt.Errorf("initializing encryption: %w", err)
}
for processed := 0; processed < len(plaintext); processed += sectorSize {
blockLeft := sectorSize
if processed+blockLeft > len(plaintext) {
blockLeft = len(plaintext) - processed
}
sector := uint64(processed/sectorSize + ivTweak)
if bulk { // iv_large_sectors is not being used
sector *= uint64(sectorSize / V1SectorSize)
}
sector = sector % 0x100000000
cipher.Encrypt(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft], sector)
}
case "xts-plain64":
cipher, err := xts.NewCipher(newBlockCipher, key)
if err != nil {
return nil, fmt.Errorf("initializing encryption: %w", err)
}
for processed := 0; processed < len(plaintext); processed += sectorSize {
blockLeft := sectorSize
if processed+blockLeft > len(plaintext) {
blockLeft = len(plaintext) - processed
}
sector := uint64(processed/sectorSize + ivTweak)
if bulk { // iv_large_sectors is not being used
sector *= uint64(sectorSize / V1SectorSize)
}
cipher.Encrypt(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft], sector)
}
default:
return nil, fmt.Errorf("unsupported cipher mode %s", cipherMode)
}
if err != nil {
return nil, fmt.Errorf("cipher error: %w", err)
}
return ciphertext, nil
}
func v1decrypt(cipherName, cipherMode string, ivTweak int, key []byte, ciphertext []byte, sectorSize int, bulk bool) ([]byte, error) {
var err error
var newBlockCipher func([]byte) (cipher.Block, error)
plaintext := make([]byte, len(ciphertext))
switch cipherName {
case "aes":
newBlockCipher = aes.NewCipher
case "twofish":
newBlockCipher = func(key []byte) (cipher.Block, error) { return twofish.NewCipher(key) }
case "cast5":
newBlockCipher = func(key []byte) (cipher.Block, error) { return cast5.NewCipher(key) }
case "serpent":
newBlockCipher = serpent.NewCipher
default:
return nil, fmt.Errorf("unsupported cipher %s", cipherName)
}
if sectorSize == 0 {
sectorSize = V1SectorSize
}
switch sectorSize {
default:
return nil, fmt.Errorf("invalid sector size %d", sectorSize)
case 512, 1024, 2048, 4096:
}
switch cipherMode {
case "ecb":
cipher, err := newBlockCipher(key)
if err != nil {
return nil, fmt.Errorf("initializing decryption: %w", err)
}
for processed := 0; processed < len(ciphertext); processed += cipher.BlockSize() {
blockLeft := sectorSize
if processed+blockLeft > len(ciphertext) {
blockLeft = len(ciphertext) - processed
}
cipher.Decrypt(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft])
}
case "cbc-plain":
block, err := newBlockCipher(key)
if err != nil {
return nil, fmt.Errorf("initializing decryption: %w", err)
}
for processed := 0; processed < len(plaintext); processed += sectorSize {
blockLeft := sectorSize
if processed+blockLeft > len(plaintext) {
blockLeft = len(plaintext) - processed
}
ivValue := processed/sectorSize + ivTweak
if bulk { // iv_large_sectors is not being used
ivValue *= sectorSize / V1SectorSize
}
iv0 := make([]byte, block.BlockSize())
binary.LittleEndian.PutUint32(iv0, uint32(ivValue))
cipher := cipher.NewCBCDecrypter(block, iv0)
cipher.CryptBlocks(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft])
}
case "cbc-plain64":
block, err := newBlockCipher(key)
if err != nil {
return nil, fmt.Errorf("initializing decryption: %w", err)
}
for processed := 0; processed < len(plaintext); processed += sectorSize {
blockLeft := sectorSize
if processed+blockLeft > len(plaintext) {
blockLeft = len(plaintext) - processed
}
ivValue := processed/sectorSize + ivTweak
if bulk { // iv_large_sectors is not being used
ivValue *= sectorSize / V1SectorSize
}
iv0 := make([]byte, block.BlockSize())
binary.LittleEndian.PutUint64(iv0, uint64(ivValue))
cipher := cipher.NewCBCDecrypter(block, iv0)
cipher.CryptBlocks(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft])
}
case "cbc-essiv:sha256":
hasherName := strings.TrimPrefix(cipherMode, "cbc-essiv:")
hasher, err := hasherByName(hasherName)
if err != nil {
return nil, fmt.Errorf("initializing decryption using hash %s: %w", hasherName, err)
}
h := hasher()
h.Write(key)
makeiv, err := newBlockCipher(h.Sum(nil))
if err != nil {
return nil, fmt.Errorf("initializing decryption: %w", err)
}
block, err := newBlockCipher(key)
if err != nil {
return nil, fmt.Errorf("initializing decryption: %w", err)
}
for processed := 0; processed < len(plaintext); processed += sectorSize {
blockLeft := sectorSize
if processed+blockLeft > len(plaintext) {
blockLeft = len(plaintext) - processed
}
ivValue := (processed/sectorSize + ivTweak)
if bulk { // iv_large_sectors is not being used
ivValue *= sectorSize / V1SectorSize
}
plain0 := make([]byte, makeiv.BlockSize())
binary.LittleEndian.PutUint64(plain0, uint64(ivValue))
iv0 := make([]byte, makeiv.BlockSize())
makeiv.Encrypt(iv0, plain0)
cipher := cipher.NewCBCDecrypter(block, iv0)
cipher.CryptBlocks(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft])
}
case "xts-plain":
cipher, err := xts.NewCipher(newBlockCipher, key)
if err != nil {
return nil, fmt.Errorf("initializing decryption: %w", err)
}
for processed := 0; processed < len(ciphertext); processed += sectorSize {
blockLeft := sectorSize
if processed+blockLeft > len(ciphertext) {
blockLeft = len(ciphertext) - processed
}
sector := uint64(processed/sectorSize + ivTweak)
if bulk { // iv_large_sectors is not being used
sector *= uint64(sectorSize / V1SectorSize)
}
sector = sector % 0x100000000
cipher.Decrypt(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft], sector)
}
case "xts-plain64":
cipher, err := xts.NewCipher(newBlockCipher, key)
if err != nil {
return nil, fmt.Errorf("initializing decryption: %w", err)
}
for processed := 0; processed < len(ciphertext); processed += sectorSize {
blockLeft := sectorSize
if processed+blockLeft > len(ciphertext) {
blockLeft = len(ciphertext) - processed
}
sector := uint64(processed/sectorSize + ivTweak)
if bulk { // iv_large_sectors is not being used
sector *= uint64(sectorSize / V1SectorSize)
}
cipher.Decrypt(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft], sector)
}
default:
return nil, fmt.Errorf("unsupported cipher mode %s", cipherMode)
}
if err != nil {
return nil, fmt.Errorf("cipher error: %w", err)
}
return plaintext, nil
}
func v2encrypt(cipherSuite string, ivTweak int, key []byte, ciphertext []byte, sectorSize int, bulk bool) ([]byte, error) {
var cipherName, cipherMode string
switch {
default:
cipherSpec := strings.SplitN(cipherSuite, "-", 2)
if len(cipherSpec) < 2 {
return nil, fmt.Errorf("unrecognized cipher suite %q", cipherSuite)
}
cipherName = cipherSpec[0]
cipherMode = cipherSpec[1]
}
return v1encrypt(cipherName, cipherMode, ivTweak, key, ciphertext, sectorSize, bulk)
}
func v2decrypt(cipherSuite string, ivTweak int, key []byte, ciphertext []byte, sectorSize int, bulk bool) ([]byte, error) {
var cipherName, cipherMode string
switch {
default:
cipherSpec := strings.SplitN(cipherSuite, "-", 2)
if len(cipherSpec) < 2 {
return nil, fmt.Errorf("unrecognized cipher suite %q", cipherSuite)
}
cipherName = cipherSpec[0]
cipherMode = cipherSpec[1]
}
return v1decrypt(cipherName, cipherMode, ivTweak, key, ciphertext, sectorSize, bulk)
}
func diffuse(key []byte, h hash.Hash) []byte {
sum := make([]byte, len(key))
counter := uint32(0)
for summed := 0; summed < len(key); summed += h.Size() {
h.Reset()
var buf [4]byte
binary.BigEndian.PutUint32(buf[:], counter)
h.Write(buf[:])
needed := len(key) - summed
if needed > h.Size() {
needed = h.Size()
}
h.Write(key[summed : summed+needed])
partial := h.Sum(nil)
copy(sum[summed:summed+needed], partial)
counter++
}
return sum
}
func afMerge(splitKey []byte, h hash.Hash, keysize int, stripes int) ([]byte, error) {
if len(splitKey) != keysize*stripes {
return nil, fmt.Errorf("expected %d af bytes, got %d", keysize*stripes, len(splitKey))
}
d := make([]byte, keysize)
for i := 0; i < stripes-1; i++ {
for j := 0; j < keysize; j++ {
d[j] = d[j] ^ splitKey[i*keysize+j]
}
d = diffuse(d, h)
}
for j := 0; j < keysize; j++ {
d[j] = d[j] ^ splitKey[(stripes-1)*keysize+j]
}
return d, nil
}
func afSplit(key []byte, h hash.Hash, stripes int) ([]byte, error) {
keysize := len(key)
s := make([]byte, keysize*stripes)
d := make([]byte, keysize)
n, err := rand.Read(s[0 : (keysize-1)*stripes])
if err != nil {
return nil, err
}
if n != (keysize-1)*stripes {
return nil, fmt.Errorf("short read when attempting to read random data: %d < %d", n, (keysize-1)*stripes)
}
for i := 0; i < stripes-1; i++ {
for j := 0; j < keysize; j++ {
d[j] = d[j] ^ s[i*keysize+j]
}
d = diffuse(d, h)
}
for j := 0; j < keysize; j++ {
s[(stripes-1)*keysize+j] = d[j] ^ key[j]
}
return s, nil
}
func roundUpToMultiple(i, factor int) int {
if i < 0 {
return 0
}
return i + ((factor - (i % factor)) % factor)
}
func hasherByName(name string) (func() hash.Hash, error) {
switch name {
case "sha1":
return sha1.New, nil
case "sha256":
return sha256.New, nil
case "sha512":
return sha512.New, nil
case "ripemd160":
return ripemd160.New, nil
default:
return nil, fmt.Errorf("unsupported digest algorithm %q", name)
}
}
type wrapper struct {
fn func(plaintext []byte) ([]byte, error)
blockSize int
buf []byte
buffered, consumed int
reader io.Reader
eof bool
writer io.Writer
}
func (w *wrapper) Write(buf []byte) (int, error) {
n := 0
for n < len(buf) {
nBuffered := copy(w.buf[w.buffered:], buf[n:])
w.buffered += nBuffered
n += nBuffered
if w.buffered == len(w.buf) {
processed, err := w.fn(w.buf)
if err != nil {
return n, err
}
nWritten, err := w.writer.Write(processed)
if err != nil {
return n, err
}
w.buffered -= nWritten
if nWritten != len(processed) {
return n, fmt.Errorf("short write: %d != %d", nWritten, len(processed))
}
}
}
return n, nil
}
func (w *wrapper) Read(buf []byte) (int, error) {
n := 0
for n < len(buf) {
nRead := copy(buf[n:], w.buf[w.consumed:])
w.consumed += nRead
n += nRead
if w.consumed == len(w.buf) && !w.eof {
nRead, err := w.reader.Read(w.buf)
w.eof = errors.Is(err, io.EOF)
if err != nil && !w.eof {
return n, err
}
if nRead != len(w.buf) && !w.eof {
return n, fmt.Errorf("short read: %d != %d", nRead, len(w.buf))
}
processed, err := w.fn(w.buf[:nRead])
if err != nil {
return n, err
}
w.buf = processed
w.consumed = 0
}
}
var eof error
if w.consumed == len(w.buf) && w.eof {
eof = io.EOF
}
return n, eof
}
func (w *wrapper) Close() error {
if w.writer != nil {
if w.buffered%w.blockSize != 0 {
w.buffered += copy(w.buf[w.buffered:], make([]byte, roundUpToMultiple(w.buffered%w.blockSize, w.blockSize)))
}
processed, err := w.fn(w.buf[:w.buffered])
if err != nil {
return err
}
nWritten, err := w.writer.Write(processed)
if err != nil {
return err
}
if nWritten != len(processed) {
return fmt.Errorf("short write: %d != %d", nWritten, len(processed))
}
w.buffered = 0
}
return nil
}
// EncryptWriter creates an io.WriteCloser which buffers writes through an
// encryption function. After writing a final block, the returned writer
// should be closed.
func EncryptWriter(fn func(plaintext []byte) ([]byte, error), writer io.Writer, blockSize int) io.WriteCloser {
bufferSize := roundUpToMultiple(1024*1024, blockSize)
return &wrapper{fn: fn, blockSize: blockSize, buf: make([]byte, bufferSize), writer: writer}
}
// DecryptReader creates an io.ReadCloser which buffers reads through a
// decryption function. When data will no longer be read, the returned reader
// should be closed.
func DecryptReader(fn func(ciphertext []byte) ([]byte, error), reader io.Reader, blockSize int) io.ReadCloser {
bufferSize := roundUpToMultiple(1024*1024, blockSize)
return &wrapper{fn: fn, blockSize: blockSize, buf: make([]byte, bufferSize), consumed: bufferSize, reader: reader}
}

75
vendor/github.com/containers/luksy/luks.go generated vendored Normal file
View File

@ -0,0 +1,75 @@
package luksy
import (
"bytes"
"encoding/json"
"fmt"
"os"
)
// ReadHeaderOptions can control some of what ReadHeaders() does.
type ReadHeaderOptions struct{}
// ReadHeaders reads LUKS headers from the specified file, returning either a
// LUKSv1 header, or two LUKSv2 headers and a LUKSv2 JSON block, depending on
// which format is detected.
func ReadHeaders(f *os.File, options ReadHeaderOptions) (*V1Header, *V2Header, *V2Header, *V2JSON, error) {
var v1 V1Header
var v2a, v2b V2Header
n, err := f.ReadAt(v2a[:], 0)
if err != nil {
return nil, nil, nil, nil, err
}
if n != len(v2a) {
return nil, nil, nil, nil, fmt.Errorf("only able to read %d bytes - file truncated?", n)
}
if n, err = f.ReadAt(v1[:], 0); err != nil {
return nil, nil, nil, nil, err
}
if n != len(v1) {
return nil, nil, nil, nil, fmt.Errorf("only able to read %d bytes - file truncated?", n)
}
if v2a.Magic() != V2Magic1 {
return nil, nil, nil, nil, fmt.Errorf("internal error: magic mismatch in LUKS header (%q)", v2a.Magic())
}
switch v2a.Version() { // is it a v1 header, or the first v2 header?
case 1:
return &v1, nil, nil, nil, nil
case 2:
size := v2a.HeaderSize()
if size > 0x7fffffffffffffff {
return nil, nil, nil, nil, fmt.Errorf("unsupported header size while looking for second header")
}
if size < 4096 {
return nil, nil, nil, nil, fmt.Errorf("unsupported header size while looking for JSON data")
}
if n, err = f.ReadAt(v2b[:], int64(size)); err != nil || n != len(v2b) {
if err == nil && n != len(v2b) {
err = fmt.Errorf("short read: read only %d bytes, should have read %d", n, len(v2b))
}
return nil, nil, nil, nil, err
}
if v2b.Magic() != V2Magic2 {
return nil, nil, nil, nil, fmt.Errorf("internal error: magic mismatch in second LUKS header (%q)", v2b.Magic())
}
jsonSize := size - 4096
buf := make([]byte, jsonSize)
n, err = f.ReadAt(buf[:], 4096)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("internal error: while reading JSON data: %w", err)
}
if n < 0 || uint64(n) != jsonSize {
return nil, nil, nil, nil, fmt.Errorf("internal error: short read while reading JSON data (wanted %d, got %d)", jsonSize, n)
}
var jsonData V2JSON
buf = bytes.TrimRightFunc(buf, func(r rune) bool { return r == 0 })
if err = json.Unmarshal(buf, &jsonData); err != nil {
return nil, nil, nil, nil, fmt.Errorf("internal error: decoding JSON data: %w", err)
}
if uint64(jsonData.Config.JsonSize) != jsonSize {
return nil, nil, nil, nil, fmt.Errorf("internal error: JSON data size mismatch: (expected %d, used %d)", jsonData.Config.JsonSize, jsonSize)
}
return nil, &v2a, &v2b, &jsonData, nil
}
return nil, nil, nil, nil, fmt.Errorf("error reading LUKS header - magic identifier not found")
}

55
vendor/github.com/containers/luksy/tune.go generated vendored Normal file
View File

@ -0,0 +1,55 @@
package luksy
import (
"hash"
"time"
"golang.org/x/crypto/argon2"
"golang.org/x/crypto/pbkdf2"
)
func durationOf(f func()) time.Duration {
start := time.Now()
f()
return time.Since(start)
}
func IterationsPBKDF2(salt []byte, keyLen int, h func() hash.Hash) int {
iterations := 2
var d time.Duration
for d < time.Second {
d = durationOf(func() {
_ = pbkdf2.Key([]byte{}, salt, iterations, keyLen, h)
})
if d < time.Second/10 {
iterations *= 2
} else {
return iterations * int(time.Second) / int(d)
}
}
return iterations
}
func memoryCostArgon2(salt []byte, keyLen, timeCost, threadsCost int, kdf func([]byte, []byte, uint32, uint32, uint8, uint32) []byte) int {
memoryCost := 2
var d time.Duration
for d < time.Second {
d = durationOf(func() {
_ = kdf([]byte{}, salt, uint32(timeCost), uint32(memoryCost), uint8(threadsCost), uint32(keyLen))
})
if d < time.Second/10 {
memoryCost *= 2
} else {
return memoryCost * int(time.Second) / int(d)
}
}
return memoryCost
}
func MemoryCostArgon2(salt []byte, keyLen, timeCost, threadsCost int) int {
return memoryCostArgon2(salt, keyLen, timeCost, threadsCost, argon2.Key)
}
func MemoryCostArgon2i(salt []byte, keyLen, timeCost, threadsCost int) int {
return memoryCostArgon2(salt, keyLen, timeCost, threadsCost, argon2.IDKey)
}

321
vendor/github.com/containers/luksy/v1header.go generated vendored Normal file
View File

@ -0,0 +1,321 @@
package luksy
import (
"encoding/binary"
"fmt"
"syscall"
)
type (
V1Header [592]uint8
V1KeySlot [48]uint8
)
const (
// Mostly verbatim from LUKS1 On-Disk Format Specification version 1.2.3
V1Magic = "LUKS\xba\xbe"
v1MagicStart = 0
v1MagicLength = 6
v1VersionStart = v1MagicStart + v1MagicLength
v1VersionLength = 2
v1CipherNameStart = v1VersionStart + v1VersionLength
v1CipherNameLength = 32
v1CipherModeStart = v1CipherNameStart + v1CipherNameLength
v1CipherModeLength = 32
v1HashSpecStart = v1CipherModeStart + v1CipherModeLength
v1HashSpecLength = 32
v1PayloadOffsetStart = v1HashSpecStart + v1HashSpecLength
v1PayloadOffsetLength = 4
v1KeyBytesStart = v1PayloadOffsetStart + v1PayloadOffsetLength
v1KeyBytesLength = 4
v1MKDigestStart = v1KeyBytesStart + v1KeyBytesLength
v1MKDigestLength = v1DigestSize
v1MKDigestSaltStart = v1MKDigestStart + v1MKDigestLength
v1MKDigestSaltLength = v1SaltSize
v1MKDigestIterStart = v1MKDigestSaltStart + v1MKDigestSaltLength
v1MKDigestIterLength = 4
v1UUIDStart = v1MKDigestIterStart + v1MKDigestIterLength
v1UUIDLength = 40
v1KeySlot1Start = v1UUIDStart + v1UUIDLength
v1KeySlot1Length = 48
v1KeySlot2Start = v1KeySlot1Start + v1KeySlot1Length
v1KeySlot2Length = 48
v1KeySlot3Start = v1KeySlot2Start + v1KeySlot2Length
v1KeySlot3Length = 48
v1KeySlot4Start = v1KeySlot3Start + v1KeySlot3Length
v1KeySlot4Length = 48
v1KeySlot5Start = v1KeySlot4Start + v1KeySlot4Length
v1KeySlot5Length = 48
v1KeySlot6Start = v1KeySlot5Start + v1KeySlot5Length
v1KeySlot6Length = 48
v1KeySlot7Start = v1KeySlot6Start + v1KeySlot6Length
v1KeySlot7Length = 48
v1KeySlot8Start = v1KeySlot7Start + v1KeySlot7Length
v1KeySlot8Length = 48
v1HeaderStructSize = v1KeySlot8Start + v1KeySlot8Length
v1KeySlotActiveStart = 0
v1KeySlotActiveLength = 4
v1KeySlotIterationsStart = v1KeySlotActiveStart + v1KeySlotActiveLength
v1KeySlotIterationsLength = 4
v1KeySlotSaltStart = v1KeySlotIterationsStart + v1KeySlotIterationsLength
v1KeySlotSaltLength = v1SaltSize
v1KeySlotKeyMaterialOffsetStart = v1KeySlotSaltStart + v1KeySlotSaltLength
v1KeySlotKeyMaterialOffsetLength = 4
v1KeySlotStripesStart = v1KeySlotKeyMaterialOffsetStart + v1KeySlotKeyMaterialOffsetLength
v1KeySlotStripesLength = 4
v1KeySlotStructSize = v1KeySlotStripesStart + v1KeySlotStripesLength
v1DigestSize = 20
v1SaltSize = 32
v1NumKeys = 8
v1KeySlotActiveKeyDisabled = 0x0000dead
v1KeySlotActiveKeyEnabled = 0x00ac71f3
V1Stripes = 4000
V1AlignKeyslots = 4096
V1SectorSize = 512
)
func (h V1Header) readu2(offset int) uint16 {
return binary.BigEndian.Uint16(h[offset:])
}
func (h V1Header) readu4(offset int) uint32 {
return binary.BigEndian.Uint32(h[offset:])
}
func (h *V1Header) writeu2(offset int, value uint16) {
binary.BigEndian.PutUint16(h[offset:], value)
}
func (h *V1Header) writeu4(offset int, value uint32) {
binary.BigEndian.PutUint32(h[offset:], value)
}
func (h V1Header) Magic() string {
return trimZeroPad(string(h[v1MagicStart : v1MagicStart+v1MagicLength]))
}
func (h *V1Header) SetMagic(magic string) error {
switch magic {
case V1Magic:
copy(h[v1MagicStart:v1MagicStart+v1MagicLength], []uint8(magic))
return nil
}
return fmt.Errorf("magic %q not acceptable, only %q is an acceptable magic value: %w", magic, V1Magic, syscall.EINVAL)
}
func (h V1Header) Version() uint16 {
return h.readu2(v1VersionStart)
}
func (h *V1Header) SetVersion(version uint16) error {
switch version {
case 1:
h.writeu2(v1VersionStart, version)
return nil
}
return fmt.Errorf("version %d not acceptable, only 1 is an acceptable version: %w", version, syscall.EINVAL)
}
func (h *V1Header) setZeroString(offset int, value string, length int) {
for len(value) < length {
value = value + "\000"
}
copy(h[offset:offset+length], []uint8(value))
}
func (h *V1Header) setInt8(offset int, s []uint8, length int) {
t := make([]byte, length)
copy(t, s)
copy(h[offset:offset+length], s)
}
func (h V1Header) CipherName() string {
return trimZeroPad(string(h[v1CipherNameStart : v1CipherNameStart+v1CipherNameLength]))
}
func (h *V1Header) SetCipherName(name string) {
h.setZeroString(v1CipherNameStart, name, v1CipherNameLength)
}
func (h V1Header) CipherMode() string {
return trimZeroPad(string(h[v1CipherModeStart : v1CipherModeStart+v1CipherModeLength]))
}
func (h *V1Header) SetCipherMode(mode string) {
h.setZeroString(v1CipherModeStart, mode, v1CipherModeLength)
}
func (h V1Header) HashSpec() string {
return trimZeroPad(string(h[v1HashSpecStart : v1HashSpecStart+v1HashSpecLength]))
}
func (h *V1Header) SetHashSpec(spec string) {
h.setZeroString(v1HashSpecStart, spec, v1HashSpecLength)
}
func (h V1Header) PayloadOffset() uint32 {
return h.readu4(v1PayloadOffsetStart)
}
func (h *V1Header) SetPayloadOffset(offset uint32) {
h.writeu4(v1PayloadOffsetStart, offset)
}
func (h V1Header) KeyBytes() uint32 {
return h.readu4(v1KeyBytesStart)
}
func (h *V1Header) SetKeyBytes(bytes uint32) {
h.writeu4(v1KeyBytesStart, bytes)
}
func (h *V1Header) KeySlot(slot int) (V1KeySlot, error) {
var ks V1KeySlot
if slot < 0 || slot >= v1NumKeys {
return ks, fmt.Errorf("invalid key slot number (must be 0..%d)", v1NumKeys-1)
}
switch slot {
case 0:
copy(ks[:], h[v1KeySlot1Start:v1KeySlot1Start+v1KeySlot1Length])
case 1:
copy(ks[:], h[v1KeySlot2Start:v1KeySlot2Start+v1KeySlot2Length])
case 2:
copy(ks[:], h[v1KeySlot3Start:v1KeySlot3Start+v1KeySlot3Length])
case 3:
copy(ks[:], h[v1KeySlot4Start:v1KeySlot4Start+v1KeySlot4Length])
case 4:
copy(ks[:], h[v1KeySlot5Start:v1KeySlot5Start+v1KeySlot5Length])
case 5:
copy(ks[:], h[v1KeySlot6Start:v1KeySlot6Start+v1KeySlot6Length])
case 6:
copy(ks[:], h[v1KeySlot7Start:v1KeySlot7Start+v1KeySlot7Length])
case 7:
copy(ks[:], h[v1KeySlot8Start:v1KeySlot8Start+v1KeySlot8Length])
}
return ks, nil
}
func (h *V1Header) SetKeySlot(slot int, ks V1KeySlot) error {
if slot < 0 || slot >= v1NumKeys {
return fmt.Errorf("invalid key slot number (must be 0..%d)", v1NumKeys-1)
}
switch slot {
case 0:
copy(h[v1KeySlot1Start:v1KeySlot1Start+v1KeySlot1Length], ks[:])
case 1:
copy(h[v1KeySlot2Start:v1KeySlot2Start+v1KeySlot2Length], ks[:])
case 2:
copy(h[v1KeySlot3Start:v1KeySlot3Start+v1KeySlot3Length], ks[:])
case 3:
copy(h[v1KeySlot4Start:v1KeySlot4Start+v1KeySlot4Length], ks[:])
case 4:
copy(h[v1KeySlot5Start:v1KeySlot5Start+v1KeySlot5Length], ks[:])
case 5:
copy(h[v1KeySlot6Start:v1KeySlot6Start+v1KeySlot6Length], ks[:])
case 6:
copy(h[v1KeySlot7Start:v1KeySlot7Start+v1KeySlot7Length], ks[:])
case 7:
copy(h[v1KeySlot8Start:v1KeySlot8Start+v1KeySlot8Length], ks[:])
}
return nil
}
func (h V1Header) MKDigest() []uint8 {
return dupInt8(h[v1MKDigestStart : v1MKDigestStart+v1MKDigestLength])
}
func (h *V1Header) SetMKDigest(digest []uint8) {
h.setInt8(v1MKDigestStart, digest, v1MKDigestLength)
}
func (h V1Header) MKDigestSalt() []uint8 {
return dupInt8(h[v1MKDigestSaltStart : v1MKDigestSaltStart+v1MKDigestSaltLength])
}
func (h *V1Header) SetMKDigestSalt(salt []uint8) {
h.setInt8(v1MKDigestSaltStart, salt, v1MKDigestSaltLength)
}
func (h V1Header) MKDigestIter() uint32 {
return h.readu4(v1MKDigestIterStart)
}
func (h *V1Header) SetMKDigestIter(bytes uint32) {
h.writeu4(v1MKDigestIterStart, bytes)
}
func (h V1Header) UUID() string {
return trimZeroPad(string(h[v1UUIDStart : v1UUIDStart+v1UUIDLength]))
}
func (h *V1Header) SetUUID(uuid string) {
h.setZeroString(v1UUIDStart, uuid, v1UUIDLength)
}
func (s V1KeySlot) readu4(offset int) uint32 {
return binary.BigEndian.Uint32(s[offset:])
}
func (s *V1KeySlot) writeu4(offset int, value uint32) {
binary.BigEndian.PutUint32(s[offset:], value)
}
func (s *V1KeySlot) setInt8(offset int, i []uint8, length int) {
for len(s) < length {
i = append(i, 0)
}
copy(s[offset:offset+length], i)
}
func (s V1KeySlot) Active() (bool, error) {
active := s.readu4(v1KeySlotActiveStart)
switch active {
case v1KeySlotActiveKeyDisabled:
return false, nil
case v1KeySlotActiveKeyEnabled:
return true, nil
}
return false, fmt.Errorf("got invalid active value %#0x: %w", active, syscall.EINVAL)
}
func (s *V1KeySlot) SetActive(active bool) {
if active {
s.writeu4(v1KeySlotActiveStart, v1KeySlotActiveKeyEnabled)
return
}
s.writeu4(v1KeySlotActiveStart, v1KeySlotActiveKeyDisabled)
}
func (s V1KeySlot) Iterations() uint32 {
return s.readu4(v1KeySlotIterationsStart)
}
func (s *V1KeySlot) SetIterations(iterations uint32) {
s.writeu4(v1KeySlotIterationsStart, iterations)
}
func (s V1KeySlot) KeySlotSalt() []uint8 {
return dupInt8(s[v1KeySlotSaltStart : v1KeySlotSaltStart+v1KeySlotSaltLength])
}
func (s *V1KeySlot) SetKeySlotSalt(salt []uint8) {
s.setInt8(v1KeySlotSaltStart, salt, v1KeySlotSaltLength)
}
func (s V1KeySlot) KeyMaterialOffset() uint32 {
return s.readu4(v1KeySlotKeyMaterialOffsetStart)
}
func (s *V1KeySlot) SetKeyMaterialOffset(material uint32) {
s.writeu4(v1KeySlotKeyMaterialOffsetStart, material)
}
func (s V1KeySlot) Stripes() uint32 {
return s.readu4(v1KeySlotStripesStart)
}
func (s *V1KeySlot) SetStripes(stripes uint32) {
s.writeu4(v1KeySlotStripesStart, stripes)
}

203
vendor/github.com/containers/luksy/v2header.go generated vendored Normal file
View File

@ -0,0 +1,203 @@
package luksy
import (
"fmt"
"strings"
"syscall"
)
type V2Header [4096]uint8
const (
// Mostly verbatim from LUKS2 On-Disk Format Specification version 1.1.1
V2Magic1 = V1Magic
V2Magic2 = "SKUL\xba\xbe"
v2MagicStart = 0
v2MagicLength = 6
v2VersionStart = v2MagicStart + v2MagicLength
v2VersionLength = 2
v2HeaderSizeStart = v2VersionStart + v2VersionLength
v2HeaderSizeLength = 8
v2SequenceIDStart = v2HeaderSizeStart + v2HeaderSizeLength
v2SequenceIDLength = 8
v2LabelStart = v2SequenceIDStart + v2SequenceIDLength
v2LabelLength = 48
v2ChecksumAlgorithmStart = v2LabelStart + v2LabelLength
v2ChecksumAlgorithmLength = 32
v2SaltStart = v2ChecksumAlgorithmStart + v2ChecksumAlgorithmLength
v2SaltLength = 64
v2UUIDStart = v2SaltStart + v2SaltLength
v2UUIDLength = 40
v2SubsystemStart = v2UUIDStart + v2UUIDLength
v2SubsystemLength = v2LabelLength
v2HeaderOffsetStart = v2SubsystemStart + v2SubsystemLength
v2HeaderOffsetLength = 8
v2Padding1Start = v2HeaderOffsetStart + v2HeaderOffsetLength
v2Padding1Length = 184
v2ChecksumStart = v2Padding1Start + v2Padding1Length
v2ChecksumLength = 64
v2Padding4096Start = v2ChecksumStart + v2ChecksumLength
v2Padding4096Length = 7 * 512
v2HeaderStructSize = v2Padding4096Start + v2Padding4096Length
V2Stripes = 4000
V2AlignKeyslots = 4096
V2SectorSize = 4096
)
func (h V2Header) Magic() string {
return string(h[v2MagicStart : v2MagicStart+v2MagicLength])
}
func (h *V2Header) SetMagic(magic string) error {
switch magic {
case V2Magic1, V2Magic2:
copy(h[v2MagicStart:v2MagicStart+v2MagicLength], []uint8(magic))
return nil
}
return fmt.Errorf("magic %q not acceptable, only %q and %q are acceptable magic values: %w", magic, V2Magic1, V2Magic2, syscall.EINVAL)
}
func (h V2Header) readu2(offset int) uint16 {
t := uint16(0)
for i := 0; i < 2; i++ {
t = (t << 8) + uint16(h[offset+i])
}
return t
}
func (h V2Header) readu8(offset int) uint64 {
t := uint64(0)
for i := 0; i < 8; i++ {
t = (t << 8) + uint64(h[offset+i])
}
return t
}
func (h *V2Header) writeu2(offset int, value uint16) {
t := value
for i := 0; i < 2; i++ {
h[offset+1-i] = uint8(uint64(t) & 0xff)
t >>= 8
}
}
func (h *V2Header) writeu8(offset int, value uint64) {
t := value
for i := 0; i < 8; i++ {
h[offset+7-i] = uint8(uint64(t) & 0xff)
t >>= 8
}
}
func (h V2Header) Version() uint16 {
return h.readu2(v2VersionStart)
}
func (h *V2Header) SetVersion(version uint16) error {
switch version {
case 2:
h.writeu2(v2VersionStart, version)
return nil
}
return fmt.Errorf("version %d not acceptable, only 2 is an acceptable version: %w", version, syscall.EINVAL)
}
func (h V2Header) HeaderSize() uint64 {
return h.readu8(v2HeaderSizeStart)
}
func (h *V2Header) SetHeaderSize(size uint64) {
h.writeu8(v2HeaderSizeStart, size)
}
func (h V2Header) SequenceID() uint64 {
return h.readu8(v2SequenceIDStart)
}
func (h *V2Header) SetSequenceID(id uint64) {
h.writeu8(v2SequenceIDStart, id)
}
func trimZeroPad(s string) string {
return strings.TrimRightFunc(s, func(r rune) bool { return r == 0 })
}
func (h V2Header) Label() string {
return trimZeroPad(string(h[v2LabelStart : v2LabelStart+v2LabelLength]))
}
func (h *V2Header) setZeroString(offset int, value string, length int) {
for len(value) < length {
value = value + "\000"
}
copy(h[offset:offset+length], []uint8(value))
}
func (h *V2Header) SetLabel(label string) {
h.setZeroString(v2LabelStart, label, v2LabelLength)
}
func (h V2Header) ChecksumAlgorithm() string {
return trimZeroPad(string(h[v2ChecksumAlgorithmStart : v2ChecksumAlgorithmStart+v2ChecksumAlgorithmLength]))
}
func (h *V2Header) SetChecksumAlgorithm(alg string) {
h.setZeroString(v2ChecksumAlgorithmStart, alg, v2ChecksumAlgorithmLength)
}
func dupInt8(s []uint8) []uint8 {
c := make([]uint8, len(s))
copy(c, s)
return c
}
func (h *V2Header) setInt8(offset int, s []uint8, length int) {
t := make([]byte, length)
copy(t, s)
copy(h[offset:offset+length], t)
}
func (h V2Header) Salt() []uint8 {
return dupInt8(h[v2SaltStart : v2SaltStart+v2SaltLength])
}
func (h *V2Header) SetSalt(salt []uint8) {
h.setInt8(v2SaltStart, salt, v2SaltLength)
}
func (h V2Header) UUID() string {
return trimZeroPad(string(h[v2UUIDStart : v2UUIDStart+v2UUIDLength]))
}
func (h *V2Header) SetUUID(uuid string) {
h.setZeroString(v2UUIDStart, uuid, v2UUIDLength)
}
func (h V2Header) Subsystem() string {
return trimZeroPad(string(h[v2SubsystemStart : v2SubsystemStart+v2SubsystemLength]))
}
func (h *V2Header) SetSubsystem(ss string) {
h.setZeroString(v2SubsystemStart, ss, v2SubsystemLength)
}
func (h V2Header) HeaderOffset() uint64 {
return h.readu8(v2HeaderOffsetStart)
}
func (h *V2Header) SetHeaderOffset(o uint64) {
h.writeu8(v2HeaderOffsetStart, o)
}
func (h V2Header) Checksum() []uint8 {
hasher, err := hasherByName(h.ChecksumAlgorithm())
if err == nil {
return dupInt8(h[v2ChecksumStart : v2ChecksumStart+hasher().Size()])
}
return dupInt8(h[v2ChecksumStart : v2ChecksumStart+v2ChecksumLength])
}
func (h *V2Header) SetChecksum(sum []uint8) {
h.setInt8(v2ChecksumStart, sum, v2ChecksumLength)
}

157
vendor/github.com/containers/luksy/v2json.go generated vendored Normal file
View File

@ -0,0 +1,157 @@
package luksy
type V2JSON struct {
Config V2JSONConfig `json:"config"`
Keyslots map[string]V2JSONKeyslot `json:"keyslots"`
Digests map[string]V2JSONDigest `json:"digests"`
Segments map[string]V2JSONSegment `json:"segments"`
Tokens map[string]V2JSONToken `json:"tokens"`
}
type V2JSONKeyslotPriority int
func (p V2JSONKeyslotPriority) String() string {
switch p {
case V2JSONKeyslotPriorityIgnore:
return "ignore"
case V2JSONKeyslotPriorityNormal:
return "normal"
case V2JSONKeyslotPriorityHigh:
return "high"
}
return "unknown"
}
const (
V2JSONKeyslotPriorityIgnore = V2JSONKeyslotPriority(0)
V2JSONKeyslotPriorityNormal = V2JSONKeyslotPriority(1)
V2JSONKeyslotPriorityHigh = V2JSONKeyslotPriority(2)
)
type V2JSONKeyslot struct {
Type string `json:"type"`
KeySize int `json:"key_size"`
Area V2JSONArea `json:"area"`
Priority *V2JSONKeyslotPriority `json:"priority,omitempty"`
*V2JSONKeyslotLUKS2 // type = "luks2"
*V2JSONKeyslotReencrypt // type = "reencrypt"
}
type V2JSONKeyslotLUKS2 struct {
AF V2JSONAF `json:"af"`
Kdf V2JSONKdf `json:"kdf"`
}
type V2JSONKeyslotReencrypt struct {
Mode string `json:"mode"` // only "reencrypt", "encrypt", "decrypt"
Direction string `json:"direction"` // only "forward", "backward"
}
type V2JSONArea struct {
Type string `json:"type"` // only "raw", "none", "journal", "checksum", "datashift", "datashift-journal", "datashift-checksum"
Offset int64 `json:"offset,string"`
Size int64 `json:"size,string"`
*V2JSONAreaRaw // type = "raw"
*V2JSONAreaChecksum // type = "checksum"
*V2JSONAreaDatashift // type = "datashift"
*V2JSONAreaDatashiftChecksum // type = "datashift-checksum"
}
type V2JSONAreaRaw struct {
Encryption string `json:"encryption"`
KeySize int `json:"key_size"`
}
type V2JSONAreaChecksum struct {
Hash string `json:"hash"`
SectorSize int `json:"sector_size"`
}
type V2JSONAreaDatashift struct {
ShiftSize int `json:"shift_size,string"`
}
type V2JSONAreaDatashiftChecksum struct {
V2JSONAreaChecksum
V2JSONAreaDatashift
}
type V2JSONAF struct {
Type string `json:"type"` // "luks1"
*V2JSONAFLUKS1 // type == "luks1"
}
type V2JSONAFLUKS1 struct {
Stripes int `json:"stripes"` // 4000
Hash string `json:"hash"` // "sha256"
}
type V2JSONKdf struct {
Type string `json:"type"`
Salt []byte `json:"salt"`
*V2JSONKdfPbkdf2 // type = "pbkdf2"
*V2JSONKdfArgon2i // type = "argon2i" or type = "argon2id"
}
type V2JSONKdfPbkdf2 struct {
Hash string `json:"hash"`
Iterations int `json:"iterations"`
}
type V2JSONKdfArgon2i struct {
Time int `json:"time"`
Memory int `json:"memory"`
CPUs int `json:"cpus"`
}
type V2JSONSegment struct {
Type string `json:"type"` // only "linear", "crypt"
Offset string `json:"offset"`
Size string `json:"size"` // numeric value or "dynamic"
Flags []string `json:"flags,omitempty"`
*V2JSONSegmentCrypt `json:",omitempty"` // type = "crypt"
}
type V2JSONSegmentCrypt struct {
IVTweak int `json:"iv_tweak,string"`
Encryption string `json:"encryption"`
SectorSize int `json:"sector_size"` // 512 or 1024 or 2048 or 4096
Integrity *V2JSONSegmentIntegrity `json:"integrity,omitempty"`
}
type V2JSONSegmentIntegrity struct {
Type string `json:"type"`
JournalEncryption string `json:"journal_encryption"`
JournalIntegrity string `json:"journal_integrity"`
}
type V2JSONDigest struct {
Type string `json:"type"`
Keyslots []string `json:"keyslots"`
Segments []string `json:"segments"`
Salt []byte `json:"salt"`
Digest []byte `json:"digest"`
*V2JSONDigestPbkdf2 // type == "pbkdf2"
}
type V2JSONDigestPbkdf2 struct {
Hash string `json:"hash"`
Iterations int `json:"iterations"`
}
type V2JSONConfig struct {
JsonSize int `json:"json_size,string"`
KeyslotsSize int `json:"keyslots_size,string,omitempty"`
Flags []string `json:"flags,omitempty"` // one or more of "allow-discards", "same-cpu-crypt", "submit-from-crypt-cpus", "no-journal", "no-read-workqueue", "no-write-workqueue"
Requirements []string `json:"requirements,omitempty"`
}
type V2JSONToken struct {
Type string `json:"type"` // "luks2-keyring"
Keyslots []string `json:"keyslots,omitempty"`
*V2JSONTokenLUKS2Keyring // type == "luks2-keyring"
}
type V2JSONTokenLUKS2Keyring struct {
KeyDescription string `json:"key_description"`
}

32
vendor/github.com/containers/storage/OWNERS generated vendored Normal file
View File

@ -0,0 +1,32 @@
approvers:
- Luap99
- TomSweeneyRedHat
- cevich
- edsantiago
- flouthoc
- giuseppe
- haircommander
- kolyshkin
- mrunalp
- mtrmac
- nalind
- rhatdan
- saschagrunert
- umohnani8
- vrothberg
reviewers:
- Luap99
- TomSweeneyRedHat
- cevich
- edsantiago
- flouthoc
- giuseppe
- haircommander
- kolyshkin
- mrunalp
- mtrmac
- nalind
- rhatdan
- saschagrunert
- umohnani8
- vrothberg

View File

@ -1 +1 @@
1.50.1-dev
1.50.2