Update vendor containers/(common, buildah, image, storage)

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
Daniel J Walsh
2023-07-14 07:19:56 -04:00
parent bb72016f58
commit 13a9500166
118 changed files with 5804 additions and 1146 deletions

View File

@ -29,7 +29,7 @@ const (
// identify working containers.
Package = "buildah"
// Version for the Package. Also used by .packit.sh for Packit builds.
Version = "1.31.0"
Version = "1.32.0-dev"
// DefaultRuntime if containers.conf fails.
DefaultRuntime = "runc"

View File

@ -246,43 +246,25 @@ On openSUSE Tumbleweed, install go via `zypper in go`, then run this command:
The build steps for Buildah on SUSE / openSUSE are the same as for Fedora, above.
### Ubuntu
### Ubuntu/Debian
In Ubuntu jammy you can use these commands:
In Ubuntu 22.10 (Karmic) or Debian 12 (Bookworm) you can use these commands:
```
sudo apt-get -y -qq update
sudo apt-get -y install bats btrfs-progs git libapparmor-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo go-md2man make
sudo apt-get -y install golang-1.18
sudo apt-get -y install bats btrfs-progs git go-md2man golang libapparmor-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev make skopeo
```
Then to install Buildah on Ubuntu follow the steps in this example:
Then to install Buildah follow the steps in this example:
```
mkdir ~/buildah
cd ~/buildah
export GOPATH=`pwd`
git clone https://github.com/containers/buildah ./src/github.com/containers/buildah
cd ./src/github.com/containers/buildah
PATH=/usr/lib/go-1.18/bin:$PATH make runc all SECURITYTAGS="apparmor seccomp"
git clone https://github.com/containers/buildah
cd buildah
make runc all SECURITYTAGS="apparmor seccomp"
sudo make install install.runc
buildah --help
```
### Debian
To install the required dependencies, you can use those commands, tested under Debian GNU/Linux amd64 9.3 (stretch):
```
gpg --recv-keys 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D
sudo gpg --export 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D >> /usr/share/keyrings/projectatomic-ppa.gpg
sudo echo 'deb [signed-by=/usr/share/keyrings/projectatomic-ppa.gpg] http://ppa.launchpad.net/projectatomic/ppa/ubuntu zesty main' > /etc/apt/sources.list.d/projectatomic-ppa.list
sudo apt update
sudo apt -y install -t stretch-backports golang
sudo apt -y install bats btrfs-tools git libapparmor-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man
```
The build steps on Debian are otherwise the same as Ubuntu, above.
## Vendoring - Dependency Management
This project is using [go modules](https://github.com/golang/go/wiki/Modules) for dependency management. If the CI is complaining about a pull request leaving behind an unclean state, it is very likely right about it. After changing dependencies, make sure to run `make vendor-in-container` to synchronize the code with the go module and repopulate the `./vendor` directory.

View File

@ -6,7 +6,6 @@ import (
"os/exec"
"path/filepath"
"strings"
"syscall"
"errors"
@ -146,74 +145,6 @@ func mountWithMountProgram(mountProgram, overlayOptions, mergeDir string) error
return nil
}
// MountWithOptions creates a subdir of the contentDir based on the source directory
// from the source system. It then mounts up the source directory on to the
// generated mount point and returns the mount point to the caller.
// But allows api to set custom workdir, upperdir and other overlay options
// Following API is being used by podman at the moment
func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) {
mergeDir := filepath.Join(contentDir, "merge")
// Create overlay mount options for rw/ro.
var overlayOptions string
if opts.ReadOnly {
// Read-only overlay mounts require two lower layer.
lowerTwo := filepath.Join(contentDir, "lower")
if err := os.Mkdir(lowerTwo, 0755); err != nil {
return mount, err
}
overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", escapeColon(source), lowerTwo)
} else {
// Read-write overlay mounts want a lower, upper and a work layer.
workDir := filepath.Join(contentDir, "work")
upperDir := filepath.Join(contentDir, "upper")
if opts.WorkDirOptionFragment != "" && opts.UpperDirOptionFragment != "" {
workDir = opts.WorkDirOptionFragment
upperDir = opts.UpperDirOptionFragment
}
st, err := os.Stat(source)
if err != nil {
return mount, err
}
if err := os.Chmod(upperDir, st.Mode()); err != nil {
return mount, err
}
if stat, ok := st.Sys().(*syscall.Stat_t); ok {
if err := os.Chown(upperDir, int(stat.Uid), int(stat.Gid)); err != nil {
return mount, err
}
}
overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", escapeColon(source), upperDir, workDir)
}
mountProgram := findMountProgram(opts.GraphOpts)
if mountProgram != "" {
if err := mountWithMountProgram(mountProgram, overlayOptions, mergeDir); err != nil {
return mount, err
}
mount.Source = mergeDir
mount.Destination = dest
mount.Type = "bind"
mount.Options = []string{"bind", "slave"}
return mount, nil
}
if unshare.IsRootless() {
/* If a mount_program is not specified, fallback to try mounting native overlay. */
overlayOptions = fmt.Sprintf("%s,userxattr", overlayOptions)
}
mount.Source = mergeDir
mount.Destination = dest
mount.Type = "overlay"
mount.Options = strings.Split(overlayOptions, ",")
return mount, nil
}
// Convert ":" to "\:", the path which will be overlay mounted need to be escaped
func escapeColon(source string) string {
return strings.ReplaceAll(source, ":", "\\:")

View File

@ -0,0 +1,31 @@
package overlay
import (
//"fmt"
//"os"
//"path/filepath"
//"strings"
//"syscall"
"errors"
//"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/runtime-spec/specs-go"
)
// MountWithOptions creates a subdir of the contentDir based on the source directory
// from the source system. It then mounts up the source directory on to the
// generated mount point and returns the mount point to the caller.
// But allows api to set custom workdir, upperdir and other overlay options
// Following API is being used by podman at the moment
func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) {
if opts.ReadOnly {
// Read-only overlay mounts can be simulated with nullfs
mount.Source = source
mount.Destination = dest
mount.Type = "nullfs"
mount.Options = []string{"ro"}
return mount, nil
} else {
return mount, errors.New("read/write overlay mounts not supported on freebsd")
}
}

View File

@ -0,0 +1,80 @@
package overlay
import (
"fmt"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/runtime-spec/specs-go"
)
// MountWithOptions creates a subdir of the contentDir based on the source directory
// from the source system. It then mounts up the source directory on to the
// generated mount point and returns the mount point to the caller.
// But allows api to set custom workdir, upperdir and other overlay options
// Following API is being used by podman at the moment
func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) {
mergeDir := filepath.Join(contentDir, "merge")
// Create overlay mount options for rw/ro.
var overlayOptions string
if opts.ReadOnly {
// Read-only overlay mounts require two lower layer.
lowerTwo := filepath.Join(contentDir, "lower")
if err := os.Mkdir(lowerTwo, 0755); err != nil {
return mount, err
}
overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", escapeColon(source), lowerTwo)
} else {
// Read-write overlay mounts want a lower, upper and a work layer.
workDir := filepath.Join(contentDir, "work")
upperDir := filepath.Join(contentDir, "upper")
if opts.WorkDirOptionFragment != "" && opts.UpperDirOptionFragment != "" {
workDir = opts.WorkDirOptionFragment
upperDir = opts.UpperDirOptionFragment
}
st, err := os.Stat(source)
if err != nil {
return mount, err
}
if err := os.Chmod(upperDir, st.Mode()); err != nil {
return mount, err
}
if stat, ok := st.Sys().(*syscall.Stat_t); ok {
if err := os.Chown(upperDir, int(stat.Uid), int(stat.Gid)); err != nil {
return mount, err
}
}
overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", escapeColon(source), upperDir, workDir)
}
mountProgram := findMountProgram(opts.GraphOpts)
if mountProgram != "" {
if err := mountWithMountProgram(mountProgram, overlayOptions, mergeDir); err != nil {
return mount, err
}
mount.Source = mergeDir
mount.Destination = dest
mount.Type = "bind"
mount.Options = []string{"bind", "slave"}
return mount, nil
}
if unshare.IsRootless() {
/* If a mount_program is not specified, fallback to try mounting native overlay. */
overlayOptions = fmt.Sprintf("%s,userxattr", overlayOptions)
}
mount.Source = mergeDir
mount.Destination = dest
mount.Type = "overlay"
mount.Options = strings.Split(overlayOptions, ",")
return mount, nil
}

View File

@ -17,6 +17,7 @@ import (
"github.com/containers/buildah/define"
"github.com/containers/buildah/internal"
"github.com/containers/buildah/pkg/jail"
"github.com/containers/buildah/pkg/overlay"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/util"
"github.com/containers/common/libnetwork/resolvconf"
@ -322,13 +323,22 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
}
parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) {
var foundrw, foundro bool
var foundrw, foundro, foundO bool
var upperDir string
for _, opt := range options {
switch opt {
case "rw":
foundrw = true
case "ro":
foundro = true
case "O":
foundO = true
}
if strings.HasPrefix(opt, "upperdir") {
splitOpt := strings.SplitN(opt, "=", 2)
if len(splitOpt) > 1 {
upperDir = splitOpt[1]
}
}
}
if !foundrw && !foundro {
@ -337,6 +347,30 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
if mountType == "bind" || mountType == "rbind" {
mountType = "nullfs"
}
if foundO {
containerDir, err := b.store.ContainerDirectory(b.ContainerID)
if err != nil {
return specs.Mount{}, err
}
contentDir, err := overlay.TempDir(containerDir, idMaps.rootUID, idMaps.rootGID)
if err != nil {
return specs.Mount{}, fmt.Errorf("failed to create TempDir in the %s directory: %w", containerDir, err)
}
overlayOpts := overlay.Options{
RootUID: idMaps.rootUID,
RootGID: idMaps.rootGID,
UpperDirOptionFragment: upperDir,
GraphOpts: b.store.GraphOptions(),
}
overlayMount, err := overlay.MountWithOptions(contentDir, host, container, &overlayOpts)
if err == nil {
b.TempVolumes[contentDir] = true
}
return overlayMount, err
}
return specs.Mount{
Destination: container,
Type: mountType,

View File

@ -394,10 +394,12 @@ func filterID(value string) filterFunc {
}
}
// filterDigest creates an digest filter for matching the specified value.
// filterDigest creates a digest filter for matching the specified value.
func filterDigest(value string) filterFunc {
// TODO: return an error if value is not a digest
// if _, err := digest.Parse(value); err != nil {...}
return func(img *Image) (bool, error) {
return string(img.Digest()) == value, nil
return img.hasDigest(value), nil
}
}

View File

@ -144,6 +144,9 @@ func (i *Image) ID() string {
// possibly many digests that we have stored for the image, so many
// applications are better off using the entire list returned by Digests().
func (i *Image) Digest() digest.Digest {
// TODO: we return the image digest or the one of the manifest list
// which can lead to issues depending on the callers' assumptions.
// Hence, deprecate in favor of Digest_s_.
return i.storageImage.Digest
}
@ -154,6 +157,18 @@ func (i *Image) Digests() []digest.Digest {
return i.storageImage.Digests
}
// hasDigest returns whether the specified value matches any digest of the
// image.
func (i *Image) hasDigest(value string) bool {
// TODO: change the argument to a typed digest.Digest
for _, d := range i.Digests() {
if string(d) == value {
return true
}
}
return false
}
// IsReadOnly returns whether the image is set read only.
func (i *Image) IsReadOnly() bool {
return i.storageImage.ReadOnly
@ -656,6 +671,8 @@ func (i *Image) NamedTaggedRepoTags() ([]reference.NamedTagged, error) {
// NamedRepoTags returns the repotags associated with the image as a
// slice of reference.Named.
func (i *Image) NamedRepoTags() ([]reference.Named, error) {
// FIXME: the NamedRepoTags name is a bit misleading as it can return
// repo@digest values if thats how an image was pulled.
var repoTags []reference.Named
for _, name := range i.Names() {
parsed, err := reference.Parse(name)
@ -669,32 +686,37 @@ func (i *Image) NamedRepoTags() ([]reference.Named, error) {
return repoTags, nil
}
// inRepoTags looks for the specified name/tag pair in the image's repo tags.
func (i *Image) inRepoTags(namedTagged reference.NamedTagged) (reference.Named, error) {
// inRepoTags looks for the specified name/tag in the image's repo tags. If
// `ignoreTag` is set, only the repo must match and the tag is ignored.
func (i *Image) inRepoTags(namedTagged reference.NamedTagged, ignoreTag bool) (reference.Named, error) {
repoTags, err := i.NamedRepoTags()
if err != nil {
return nil, err
}
pairs, err := ToNameTagPairs(repoTags)
if err != nil {
return nil, err
}
name := namedTagged.Name()
tag := namedTagged.Tag()
for _, pair := range pairs {
if tag != pair.Tag {
for _, r := range repoTags {
if !ignoreTag {
var repoTag string
tagged, isTagged := r.(reference.NamedTagged)
if isTagged {
repoTag = tagged.Tag()
}
if !isTagged || tag != repoTag {
continue
}
}
repoName := r.Name()
if !strings.HasSuffix(repoName, name) {
continue
}
if !strings.HasSuffix(pair.Name, name) {
continue
if len(repoName) == len(name) { // full match
return r, nil
}
if len(pair.Name) == len(name) { // full match
return pair.named, nil
}
if pair.Name[len(pair.Name)-len(name)-1] == '/' { // matches at repo
return pair.named, nil
if repoName[len(repoName)-len(name)-1] == '/' { // matches at repo
return r, nil
}
}

View File

@ -217,6 +217,11 @@ func (i *Image) getManifestList() (manifests.List, error) {
// image index (OCI). This information may be critical to make certain
// execution paths more robust (e.g., suppress certain errors).
func (i *Image) IsManifestList(ctx context.Context) (bool, error) {
// FIXME: due to `ImageDigestBigDataKey` we'll always check the
// _last-written_ manifest which is causing issues for multi-arch image
// pulls.
//
// See https://github.com/containers/common/pull/1505#discussion_r1242677279.
ref, err := i.StorageReference()
if err != nil {
return false, err

View File

@ -100,22 +100,22 @@ func ToNameTagPairs(repoTags []reference.Named) ([]NameTagPair, error) {
// normalizeTaggedDigestedString strips the tag off the specified string iff it
// is tagged and digested. Note that the tag is entirely ignored to match
// Docker behavior.
func normalizeTaggedDigestedString(s string) (string, error) {
func normalizeTaggedDigestedString(s string) (string, reference.Named, error) {
// Note that the input string is not expected to be parseable, so we
// return it verbatim in error cases.
ref, err := reference.Parse(s)
if err != nil {
return "", err
return "", nil, err
}
named, ok := ref.(reference.Named)
if !ok {
return s, nil
return s, nil, nil
}
named, err = normalizeTaggedDigestedNamed(named)
if err != nil {
return "", err
return "", nil, err
}
return named.String(), nil
return named.String(), named, nil
}
// normalizeTaggedDigestedNamed strips the tag off the specified named

View File

@ -86,7 +86,7 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP
// Docker compat: strip off the tag iff name is tagged and digested
// (e.g., fedora:latest@sha256...). In that case, the tag is stripped
// off and entirely ignored. The digest is the sole source of truth.
normalizedName, normalizeError := normalizeTaggedDigestedString(name)
normalizedName, _, normalizeError := normalizeTaggedDigestedString(name)
if normalizeError != nil {
return nil, normalizeError
}

View File

@ -16,6 +16,7 @@ import (
"github.com/containers/storage"
deepcopy "github.com/jinzhu/copier"
jsoniter "github.com/json-iterator/go"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
@ -239,7 +240,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
// Docker compat: strip off the tag iff name is tagged and digested
// (e.g., fedora:latest@sha256...). In that case, the tag is stripped
// off and entirely ignored. The digest is the sole source of truth.
normalizedName, err := normalizeTaggedDigestedString(name)
normalizedName, possiblyUnqualifiedNamedReference, err := normalizeTaggedDigestedString(name)
if err != nil {
return nil, "", err
}
@ -259,7 +260,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
// If the name clearly refers to a local image, try to look it up.
if byFullID || byDigest {
img, err := r.lookupImageInLocalStorage(originalName, name, options)
img, err := r.lookupImageInLocalStorage(originalName, name, nil, options)
if err != nil {
return nil, "", err
}
@ -297,7 +298,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
}
for _, candidate := range candidates {
img, err := r.lookupImageInLocalStorage(name, candidate.String(), options)
img, err := r.lookupImageInLocalStorage(name, candidate.String(), candidate, options)
if err != nil {
return nil, "", err
}
@ -308,7 +309,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
// The specified name may refer to a short ID. Note that this *must*
// happen after the short-name expansion as done above.
img, err := r.lookupImageInLocalStorage(name, name, options)
img, err := r.lookupImageInLocalStorage(name, name, nil, options)
if err != nil {
return nil, "", err
}
@ -316,21 +317,51 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
return img, name, err
}
return r.lookupImageInDigestsAndRepoTags(name, options)
return r.lookupImageInDigestsAndRepoTags(name, possiblyUnqualifiedNamedReference, options)
}
// lookupImageInLocalStorage looks up the specified candidate for name in the
// storage and checks whether it's matching the system context.
func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *LookupImageOptions) (*Image, error) {
func (r *Runtime) lookupImageInLocalStorage(name, candidate string, namedCandidate reference.Named, options *LookupImageOptions) (*Image, error) {
logrus.Debugf("Trying %q ...", candidate)
img, err := r.store.Image(candidate)
if err != nil && !errors.Is(err, storage.ErrImageUnknown) {
return nil, err
var err error
var img *storage.Image
var ref types.ImageReference
// FIXME: the lookup logic for manifest lists needs improvement.
// See https://github.com/containers/common/pull/1505#discussion_r1242677279
// for details.
// For images pulled by tag, Image.Names does not currently contain a
// repo@digest value, so such an input would not match directly in
// c/storage.
if namedCandidate != nil {
namedCandidate = reference.TagNameOnly(namedCandidate)
ref, err = storageTransport.Transport.NewStoreReference(r.store, namedCandidate, "")
if err != nil {
return nil, err
}
img, err = storageTransport.Transport.GetStoreImage(r.store, ref)
if err != nil {
if errors.Is(err, storage.ErrImageUnknown) {
return nil, nil
}
return nil, err
}
// NOTE: we must reparse the reference another time below since
// an ordinary image may have resolved into a per-platform image
// without any regard to options.{Architecture,OS,Variant}.
} else {
img, err = r.store.Image(candidate)
if err != nil {
if errors.Is(err, storage.ErrImageUnknown) {
return nil, nil
}
return nil, err
}
}
if img == nil {
return nil, nil
}
ref, err := storageTransport.Transport.ParseStoreReference(r.store, img.ID)
ref, err = storageTransport.Transport.ParseStoreReference(r.store, img.ID)
if err != nil {
return nil, err
}
@ -417,76 +448,71 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *Loo
// lookupImageInDigestsAndRepoTags attempts to match name against any image in
// the local containers storage. If name is digested, it will be compared
// against image digests. Otherwise, it will be looked up in the repo tags.
func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, options *LookupImageOptions) (*Image, string, error) {
// Until now, we've tried very hard to find an image but now it is time
// for limbo. If the image includes a digest that we couldn't detect
// verbatim in the storage, we must have a look at all digests of all
// images. Those may change over time (e.g., via manifest lists).
// Both Podman and Buildah want us to do that dance.
func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, possiblyUnqualifiedNamedReference reference.Named, options *LookupImageOptions) (*Image, string, error) {
originalName := name // we may change name below
if possiblyUnqualifiedNamedReference == nil {
return nil, "", fmt.Errorf("%s: %w", originalName, storage.ErrImageUnknown)
}
// In case of a digested reference, we strip off the digest and require
// any image matching the repo/tag to also match the specified digest.
var requiredDigest digest.Digest
digested, isDigested := possiblyUnqualifiedNamedReference.(reference.Digested)
if isDigested {
requiredDigest = digested.Digest()
possiblyUnqualifiedNamedReference = reference.TrimNamed(possiblyUnqualifiedNamedReference)
name = possiblyUnqualifiedNamedReference.String()
}
if !shortnames.IsShortName(name) {
return nil, "", fmt.Errorf("%s: %w", originalName, storage.ErrImageUnknown)
}
// Docker compat: make sure to add the "latest" tag if needed. The tag
// will be ignored if we're looking for a digest match.
possiblyUnqualifiedNamedReference = reference.TagNameOnly(possiblyUnqualifiedNamedReference)
namedTagged, isNamedTagged := possiblyUnqualifiedNamedReference.(reference.NamedTagged)
if !isNamedTagged {
// NOTE: this should never happen since we already stripped off
// the digest.
return nil, "", fmt.Errorf("%s: %w (could not cast to tagged)", originalName, storage.ErrImageUnknown)
}
allImages, err := r.ListImages(context.Background(), nil, nil)
if err != nil {
return nil, "", err
}
ref, err := reference.Parse(name) // Warning! This is not ParseNormalizedNamed
if err != nil {
return nil, "", err
}
named, isNamed := ref.(reference.Named)
if !isNamed {
return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown)
}
digested, isDigested := named.(reference.Digested)
if isDigested {
logrus.Debug("Looking for image with matching recorded digests")
digest := digested.Digest()
for _, image := range allImages {
for _, d := range image.Digests() {
if d != digest {
continue
}
// Also make sure that the matching image fits all criteria (e.g., manifest list).
if _, err := r.lookupImageInLocalStorage(name, image.ID(), options); err != nil {
return nil, "", err
}
return image, name, nil
}
}
return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown)
}
if !shortnames.IsShortName(name) {
return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown)
}
named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed
namedTagged, isNammedTagged := named.(reference.NamedTagged)
if !isNammedTagged {
// NOTE: this should never happen since we already know it's
// not a digested reference.
return nil, "", fmt.Errorf("%s: %w (could not cast to tagged)", name, storage.ErrImageUnknown)
}
for _, image := range allImages {
named, err := image.inRepoTags(namedTagged)
named, err := image.inRepoTags(namedTagged, isDigested)
if err != nil {
return nil, "", err
}
if named == nil {
continue
}
img, err := r.lookupImageInLocalStorage(name, named.String(), options)
img, err := r.lookupImageInLocalStorage(name, named.String(), named, options)
if err != nil {
return nil, "", err
}
if img != nil {
return img, named.String(), err
if isDigested {
if !img.hasDigest(requiredDigest.String()) {
continue
}
named = reference.TrimNamed(named)
canonical, err := reference.WithDigest(named, requiredDigest)
if err != nil {
return nil, "", fmt.Errorf("building canonical reference with digest %q and matched %q: %w", requiredDigest.String(), named.String(), err)
}
return img, canonical.String(), nil
}
return img, named.String(), nil
}
}
return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown)
return nil, "", fmt.Errorf("%s: %w", originalName, storage.ErrImageUnknown)
}
// ResolveName resolves the specified name. If the name resolves to a local

View File

@ -28,9 +28,7 @@ func GetBridgeInterfaceNames(n NetUtil) []string {
func GetUsedNetworkNames(n NetUtil) []string {
names := make([]string, 0, n.Len())
n.ForEach(func(net types.Network) {
if net.Driver == types.BridgeNetworkDriver {
names = append(names, net.NetworkInterface)
}
names = append(names, net.Name)
})
return names
}

View File

@ -5,7 +5,6 @@ import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/containers/common/pkg/secrets/filedriver"
@ -50,8 +49,8 @@ var errDataSize = errors.New("secret data must be larger than 0 and less than 51
var secretsFile = "secrets.json"
// secretNameRegexp matches valid secret names
// Allowed: 253 [a-zA-Z0-9-_.] characters, and the start and end character must be [a-zA-Z0-9]
var secretNameRegexp = regexp.Delayed(`^[a-zA-Z0-9][a-zA-Z0-9_.-]*$`)
// Allowed: 253 characters, excluding ,/=\0
var secretNameRegexp = regexp.Delayed("^[^,/=\000]+$")
// SecretsManager holds information on handling secrets
//
@ -247,11 +246,6 @@ func (s *SecretsManager) Store(name string, data []byte, driverType string, opti
// Delete removes all secret metadata and secret data associated with the specified secret.
// Delete takes a name, ID, or partial ID.
func (s *SecretsManager) Delete(nameOrID string) (string, error) {
err := validateSecretName(nameOrID)
if err != nil {
return "", err
}
s.lockfile.Lock()
defer s.lockfile.Unlock()
@ -325,8 +319,10 @@ func (s *SecretsManager) LookupSecretData(nameOrID string) (*Secret, []byte, err
// validateSecretName checks if the secret name is valid.
func validateSecretName(name string) error {
if !secretNameRegexp.MatchString(name) || len(name) > 253 || strings.HasSuffix(name, "-") || strings.HasSuffix(name, ".") {
return fmt.Errorf("only 253 [a-zA-Z0-9-_.] characters allowed, and the start and end character must be [a-zA-Z0-9]: %s: %w", name, errInvalidSecretName)
if len(name) == 0 ||
len(name) > 253 ||
!secretNameRegexp.MatchString(name) {
return fmt.Errorf("secret name %q can not include '=', '/', ',', or the '\\0' (NULL) and be between 1 and 253 characters: %w", name, errInvalidSecretName)
}
return nil
}

View File

@ -16,10 +16,6 @@ import (
)
func Validate(user *url.Userinfo, path string, port int, identity string) (*config.Destination, *url.URL, error) {
sock := ""
if strings.Contains(path, "/run") {
sock = strings.Split(path, "/run")[1]
}
// url.Parse NEEDS ssh://, if this ever fails or returns some nonsense, that is why.
uri, err := url.Parse(path)
if err != nil {
@ -43,15 +39,8 @@ func Validate(user *url.Userinfo, path string, port int, identity string) (*conf
uri.User = user
}
uriStr := ""
if len(sock) > 0 {
uriStr = "ssh://" + uri.User.Username() + "@" + uri.Host + "/run" + sock
} else {
uriStr = "ssh://" + uri.User.Username() + "@" + uri.Host
}
dst := config.Destination{
URI: uriStr,
URI: uri.String(),
}
if len(identity) > 0 {

View File

@ -1 +1 @@
1.48.0
1.49.0-dev

View File

@ -191,10 +191,27 @@ type DriverWithDifferOutput struct {
TOCDigest digest.Digest
}
type DifferOutputFormat int
const (
// DifferOutputFormatDir means the output is a directory and it will
// keep the original layout.
DifferOutputFormatDir = iota
// DifferOutputFormatFlat will store the files by their checksum, in the form
// checksum[0:2]/checksum[2:]
DifferOutputFormatFlat
)
// DifferOptions overrides how the differ work
type DifferOptions struct {
// Format defines the destination directory layout format
Format DifferOutputFormat
}
// Differ defines the interface for using a custom differ.
// This API is experimental and can be changed without bumping the major version number.
type Differ interface {
ApplyDiff(dest string, options *archive.TarOptions) (DriverWithDifferOutput, error)
ApplyDiff(dest string, options *archive.TarOptions, differOpts *DifferOptions) (DriverWithDifferOutput, error)
}
// DriverWithDiffer is the interface for direct diff access.

View File

@ -0,0 +1,24 @@
//go:build !linux || !composefs || !cgo
// +build !linux !composefs !cgo
package overlay
import (
"fmt"
)
func composeFsSupported() bool {
return false
}
func generateComposeFsBlob(toc []byte, composefsDir string) error {
return fmt.Errorf("composefs is not supported")
}
func mountComposefsBlob(dataDir, mountPoint string) error {
return fmt.Errorf("composefs is not supported")
}
func enableVerityRecursive(path string) error {
return fmt.Errorf("composefs is not supported")
}

View File

@ -0,0 +1,185 @@
//go:build linux && composefs && cgo
// +build linux,composefs,cgo
package overlay
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io/fs"
"os"
"os/exec"
"path/filepath"
"sync"
"syscall"
"unsafe"
"github.com/containers/storage/pkg/loopback"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
var (
composeFsHelperOnce sync.Once
composeFsHelperPath string
composeFsHelperErr error
)
func getComposeFsHelper() (string, error) {
composeFsHelperOnce.Do(func() {
composeFsHelperPath, composeFsHelperErr = exec.LookPath("composefs-from-json")
})
return composeFsHelperPath, composeFsHelperErr
}
func composeFsSupported() bool {
_, err := getComposeFsHelper()
return err == nil
}
func enableVerity(description string, fd int) error {
enableArg := unix.FsverityEnableArg{
Version: 1,
Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256,
Block_size: 4096,
}
_, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg)))
if e1 != 0 && !errors.Is(e1, unix.EEXIST) {
return fmt.Errorf("failed to enable verity for %q: %w", description, e1)
}
return nil
}
func enableVerityRecursive(path string) error {
walkFn := func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if !d.Type().IsRegular() {
return nil
}
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
if err := enableVerity(path, int(f.Fd())); err != nil {
return err
}
return nil
}
return filepath.WalkDir(path, walkFn)
}
func getComposefsBlob(dataDir string) string {
return filepath.Join(dataDir, "composefs.blob")
}
func generateComposeFsBlob(toc []byte, composefsDir string) error {
if err := os.MkdirAll(composefsDir, 0o700); err != nil {
return err
}
destFile := getComposefsBlob(composefsDir)
writerJson, err := getComposeFsHelper()
if err != nil {
return fmt.Errorf("failed to find composefs-from-json: %w", err)
}
fd, err := unix.Openat(unix.AT_FDCWD, destFile, unix.O_WRONLY|unix.O_CREAT|unix.O_TRUNC|unix.O_EXCL|unix.O_CLOEXEC, 0o644)
if err != nil {
return fmt.Errorf("failed to open output file: %w", err)
}
outFd := os.NewFile(uintptr(fd), "outFd")
fd, err = unix.Open(fmt.Sprintf("/proc/self/fd/%d", outFd.Fd()), unix.O_RDONLY|unix.O_CLOEXEC, 0)
if err != nil {
outFd.Close()
return fmt.Errorf("failed to dup output file: %w", err)
}
newFd := os.NewFile(uintptr(fd), "newFd")
defer newFd.Close()
err = func() error {
// a scope to close outFd before setting fsverity on the read-only fd.
defer outFd.Close()
cmd := exec.Command(writerJson, "--format=erofs", "--out=/proc/self/fd/3", "/proc/self/fd/0")
cmd.ExtraFiles = []*os.File{outFd}
cmd.Stderr = os.Stderr
cmd.Stdin = bytes.NewReader(toc)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to convert json to erofs: %w", err)
}
return nil
}()
if err != nil {
return err
}
if err := enableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
logrus.Warningf("%s", err)
}
return nil
}
/*
typedef enum {
LCFS_EROFS_FLAGS_HAS_ACL = (1 << 0),
} lcfs_erofs_flag_t;
struct lcfs_erofs_header_s {
uint32_t magic;
uint32_t version;
uint32_t flags;
uint32_t unused[5];
} __attribute__((__packed__));
*/
// hasACL returns true if the erofs blob has ACLs enabled
func hasACL(path string) (bool, error) {
const LCFS_EROFS_FLAGS_HAS_ACL = (1 << 0)
fd, err := unix.Openat(unix.AT_FDCWD, path, unix.O_RDONLY|unix.O_CLOEXEC, 0)
if err != nil {
return false, err
}
defer unix.Close(fd)
// do not worry about checking the magic number, if the file is invalid
// we will fail to mount it anyway
flags := make([]byte, 4)
nread, err := unix.Pread(fd, flags, 8)
if err != nil {
return false, err
}
if nread != 4 {
return false, fmt.Errorf("failed to read flags from %q", path)
}
return binary.LittleEndian.Uint32(flags)&LCFS_EROFS_FLAGS_HAS_ACL != 0, nil
}
func mountComposefsBlob(dataDir, mountPoint string) error {
blobFile := getComposefsBlob(dataDir)
loop, err := loopback.AttachLoopDevice(blobFile)
if err != nil {
return err
}
defer loop.Close()
hasACL, err := hasACL(blobFile)
if err != nil {
return err
}
mountOpts := "ro"
if !hasACL {
mountOpts += ",noacl"
}
return unix.Mount(loop.Name(), mountPoint, "erofs", unix.MS_RDONLY, mountOpts)
}

View File

@ -82,6 +82,8 @@ const (
lowerFile = "lower"
maxDepth = 500
zstdChunkedManifest = "zstd-chunked-manifest"
// idLength represents the number of random characters
// which can be used to create the unique link identifier
// for every layer. If this value is too long then the
@ -780,6 +782,10 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
}
func (d *Driver) useNaiveDiff() bool {
if d.useComposeFs() {
return true
}
useNaiveDiffLock.Do(func() {
if d.options.mountProgram != "" {
useNaiveDiffOnly = true
@ -1431,6 +1437,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
logLevel = logrus.DebugLevel
}
optsList := options.Options
needsIDMapping := !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 && d.options.mountProgram == ""
if len(optsList) == 0 {
optsList = strings.Split(d.options.mountOptions, ",")
} else {
@ -1499,12 +1508,76 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
}
idmappedMountProcessPid := -1
if needsIDMapping {
pid, cleanupFunc, err := idmap.CreateUsernsProcess(options.UidMaps, options.GidMaps)
if err != nil {
return "", err
}
idmappedMountProcessPid = int(pid)
defer cleanupFunc()
}
composefsLayers := filepath.Join(workDirBase, "composefs-layers")
if err := os.MkdirAll(composefsLayers, 0o700); err != nil {
return "", err
}
skipIDMappingLayers := make(map[string]string)
composeFsLayers := []string{}
composefsMounts := []string{}
defer func() {
for _, m := range composefsMounts {
defer unix.Unmount(m, unix.MNT_DETACH)
}
}()
maybeAddComposefsMount := func(lowerID string, i int) (string, error) {
composefsBlob := d.getComposefsData(lowerID)
_, err = os.Stat(composefsBlob)
if err != nil {
if os.IsNotExist(err) {
return "", nil
}
return "", err
}
logrus.Debugf("overlay: using composefs blob %s for lower %s", composefsBlob, lowerID)
dest := filepath.Join(composefsLayers, fmt.Sprintf("%d", i))
if err := os.MkdirAll(dest, 0o700); err != nil {
return "", err
}
if err := mountComposefsBlob(composefsBlob, dest); err != nil {
return "", err
}
composefsMounts = append(composefsMounts, dest)
composeFsPath, err := d.getDiffPath(lowerID)
if err != nil {
return "", err
}
composeFsLayers = append(composeFsLayers, composeFsPath)
skipIDMappingLayers[composeFsPath] = composeFsPath
return dest, nil
}
diffDir := path.Join(workDirBase, "diff")
if dest, err := maybeAddComposefsMount(id, 0); err != nil {
return "", err
} else if dest != "" {
diffDir = dest
}
// For each lower, resolve its path, and append it and any additional diffN
// directories to the lowers list.
for _, l := range splitLowers {
for i, l := range splitLowers {
if l == "" {
continue
}
lower := ""
newpath := path.Join(d.home, l)
if st, err := os.Stat(newpath); err != nil {
@ -1538,6 +1611,30 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
lower = newpath
}
linkContent, err := os.Readlink(lower)
if err != nil {
return "", err
}
lowerID := filepath.Base(filepath.Dir(linkContent))
composefsMount, err := maybeAddComposefsMount(lowerID, i+1)
if err != nil {
return "", err
}
if composefsMount != "" {
if needsIDMapping {
if err := idmap.CreateIDMappedMount(composefsMount, composefsMount, idmappedMountProcessPid); err != nil {
return "", fmt.Errorf("create mapped mount for %q: %w", composefsMount, err)
}
skipIDMappingLayers[composefsMount] = composefsMount
// overlay takes a reference on the mount, so it is safe to unmount
// the mapped idmounts as soon as the final overlay file system is mounted.
defer unix.Unmount(composefsMount, unix.MNT_DETACH)
}
absLowers = append(absLowers, composefsMount)
continue
}
absLowers = append(absLowers, lower)
diffN = 1
_, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN)))
@ -1548,15 +1645,22 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
}
if len(composeFsLayers) > 0 {
optsList = append(optsList, "metacopy=on", "redirect_dir=on")
}
absLowers = append(absLowers, composeFsLayers...)
if len(absLowers) == 0 {
absLowers = append(absLowers, path.Join(dir, "empty"))
}
// user namespace requires this to move a directory from lower to upper.
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps)
if err != nil {
return "", err
}
diffDir := path.Join(workDirBase, "diff")
if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil {
return "", err
}
@ -1596,31 +1700,30 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
}
if !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 && d.options.mountProgram == "" {
if needsIDMapping {
var newAbsDir []string
idMappedMounts := make(map[string]string)
mappedRoot := filepath.Join(d.home, id, "mapped")
if err := os.MkdirAll(mappedRoot, 0o700); err != nil {
return "", err
}
pid, cleanupFunc, err := idmap.CreateUsernsProcess(options.UidMaps, options.GidMaps)
if err != nil {
return "", err
}
defer cleanupFunc()
idMappedMounts := make(map[string]string)
// rewrite the lower dirs to their idmapped mount.
c := 0
for _, absLower := range absLowers {
mappedMountSrc := getMappedMountRoot(absLower)
if _, ok := skipIDMappingLayers[absLower]; ok {
newAbsDir = append(newAbsDir, absLower)
continue
}
root, found := idMappedMounts[mappedMountSrc]
if !found {
root = filepath.Join(mappedRoot, fmt.Sprintf("%d", c))
c++
if err := idmap.CreateIDMappedMount(mappedMountSrc, root, int(pid)); err != nil {
if err := idmap.CreateIDMappedMount(mappedMountSrc, root, idmappedMountProcessPid); err != nil {
return "", fmt.Errorf("create mapped mount for %q on %q: %w", mappedMountSrc, root, err)
}
idMappedMounts[mappedMountSrc] = root
@ -1896,6 +1999,13 @@ func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error {
return os.RemoveAll(stagingDirectory)
}
func (d *Driver) useComposeFs() bool {
if !composeFsSupported() || unshare.IsRootless() {
return false
}
return true
}
// ApplyDiff applies the changes in the new layer using the specified function
func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) {
var idMappings *idtools.IDMappings
@ -1928,14 +2038,22 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
logrus.Debugf("Applying differ in %s", applyDir)
differOptions := graphdriver.DifferOptions{
Format: graphdriver.DifferOutputFormatDir,
}
if d.useComposeFs() {
differOptions.Format = graphdriver.DifferOutputFormatFlat
}
out, err := differ.ApplyDiff(applyDir, &archive.TarOptions{
UIDMaps: idMappings.UIDs(),
GIDMaps: idMappings.GIDs(),
IgnoreChownErrors: d.options.ignoreChownErrors,
WhiteoutFormat: d.getWhiteoutFormat(),
InUserNS: unshare.IsRootless(),
})
}, &differOptions)
out.Target = applyDir
return out, err
}
@ -1945,17 +2063,28 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory stri
return fmt.Errorf("%q is not a staging directory", stagingDirectory)
}
diff, err := d.getDiffPath(id)
if d.useComposeFs() {
// FIXME: move this logic into the differ so we don't have to open
// the file twice.
if err := enableVerityRecursive(stagingDirectory); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
logrus.Warningf("%s", err)
}
toc := diffOutput.BigData[zstdChunkedManifest]
if err := generateComposeFsBlob(toc, d.getComposefsData(id)); err != nil {
return err
}
}
diffPath, err := d.getDiffPath(id)
if err != nil {
return err
}
if err := os.RemoveAll(diff); err != nil && !os.IsNotExist(err) {
if err := os.RemoveAll(diffPath); err != nil && !os.IsNotExist(err) {
return err
}
diffOutput.UncompressedDigest = diffOutput.TOCDigest
return os.Rename(stagingDirectory, diff)
return os.Rename(stagingDirectory, diffPath)
}
// DifferTarget gets the location where files are stored for the layer.
@ -2001,6 +2130,11 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts)
return directory.Size(applyDir)
}
func (d *Driver) getComposefsData(id string) string {
dir := d.dir(id)
return path.Join(dir, "composefs-data")
}
func (d *Driver) getDiffPath(id string) (string, error) {
dir, imagestore, _ := d.dir2(id)
base := dir

View File

@ -92,7 +92,10 @@ func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInf
return err
}
if s.Dev() != sourceStat.Dev() {
// Don't cross mount points. This ignores file mounts to avoid
// generating a diff which deletes all files following the
// mount.
if s.Dev() != sourceStat.Dev() && s.IsDir() {
return filepath.SkipDir
}

View File

@ -15,6 +15,7 @@ import (
"unsafe"
storage "github.com/containers/storage"
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/ioutils"
jsoniter "github.com/json-iterator/go"
@ -109,7 +110,7 @@ func (c *layersCache) load() error {
}
bigData, err := c.store.LayerBigData(r.ID, cacheKey)
// if the cache areadly exists, read and use it
// if the cache already exists, read and use it
if err == nil {
defer bigData.Close()
metadata, err := readMetadataFromCache(bigData)
@ -122,6 +123,23 @@ func (c *layersCache) load() error {
return err
}
var lcd chunkedLayerData
clFile, err := c.store.LayerBigData(r.ID, chunkedLayerDataKey)
if err != nil && !errors.Is(err, os.ErrNotExist) {
return err
}
if clFile != nil {
cl, err := io.ReadAll(clFile)
if err != nil {
return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err)
}
json := jsoniter.ConfigCompatibleWithStandardLibrary
if err := json.Unmarshal(cl, &lcd); err != nil {
return err
}
}
// otherwise create it from the layer TOC.
manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey)
if err != nil {
@ -134,7 +152,7 @@ func (c *layersCache) load() error {
return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err)
}
metadata, err := writeCache(manifest, r.ID, c.store)
metadata, err := writeCache(manifest, lcd.Format, r.ID, c.store)
if err == nil {
c.addLayer(r.ID, metadata)
}
@ -211,13 +229,13 @@ type setBigData interface {
// - digest(file.payload))
// - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs)
// - digest(i) for each i in chunks(file payload)
func writeCache(manifest []byte, id string, dest setBigData) (*metadata, error) {
func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id string, dest setBigData) (*metadata, error) {
var vdata bytes.Buffer
tagLen := 0
digestLen := 0
var tagsBuffer bytes.Buffer
toc, err := prepareMetadata(manifest)
toc, err := prepareMetadata(manifest, format)
if err != nil {
return nil, err
}
@ -396,7 +414,7 @@ func readMetadataFromCache(bigData io.Reader) (*metadata, error) {
}, nil
}
func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) {
func prepareMetadata(manifest []byte, format graphdriver.DifferOutputFormat) ([]*internal.FileMetadata, error) {
toc, err := unmarshalToc(manifest)
if err != nil {
// ignore errors here. They might be caused by a different manifest format.
@ -404,6 +422,17 @@ func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) {
return nil, nil //nolint: nilnil
}
switch format {
case graphdriver.DifferOutputFormatDir:
case graphdriver.DifferOutputFormatFlat:
toc.Entries, err = makeEntriesFlat(toc.Entries)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unknown format %q", format)
}
var r []*internal.FileMetadata
chunkSeen := make(map[string]bool)
for i := range toc.Entries {
@ -420,6 +449,7 @@ func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) {
chunkSeen[cd] = true
}
}
return r, nil
}

View File

@ -28,6 +28,7 @@ import (
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/types"
securejoin "github.com/cyphar/filepath-securejoin"
jsoniter "github.com/json-iterator/go"
"github.com/klauspost/compress/zstd"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
@ -41,6 +42,8 @@ const (
newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY)
containersOverrideXattr = "user.containers.override_stat"
bigDataKey = "zstd-chunked-manifest"
chunkedData = "zstd-chunked-data"
chunkedLayerDataKey = "zstd-chunked-layer-data"
fileTypeZstdChunked = iota
fileTypeEstargz
@ -73,6 +76,11 @@ var xattrsToIgnore = map[string]interface{}{
"security.selinux": true,
}
// chunkedLayerData is used to store additional information about the layer
type chunkedLayerData struct {
Format graphdriver.DifferOutputFormat `json:"format"`
}
func timeToTimespec(time *time.Time) (ts unix.Timespec) {
if time == nil || time.IsZero() {
// Return UTIME_OMIT special value
@ -241,7 +249,7 @@ func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name str
srcFile, err := openFileUnderRoot(name, srcDirfd, unix.O_RDONLY, 0)
if err != nil {
return false, nil, 0, fmt.Errorf("open source file under target rootfs: %w", err)
return false, nil, 0, fmt.Errorf("open source file under target rootfs (%s): %w", name, err)
}
defer srcFile.Close()
@ -844,7 +852,14 @@ func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *ar
}, nil
}
func (d *destinationFile) Close() error {
func (d *destinationFile) Close() (Err error) {
defer func() {
err := d.file.Close()
if Err == nil {
Err = err
}
}()
manifestChecksum, err := digest.Parse(d.metadata.Digest)
if err != nil {
return err
@ -1317,7 +1332,39 @@ func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, cop
return false, nil
}
func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) {
func makeEntriesFlat(mergedEntries []internal.FileMetadata) ([]internal.FileMetadata, error) {
var new []internal.FileMetadata
hashes := make(map[string]string)
for i := range mergedEntries {
if mergedEntries[i].Type != TypeReg {
continue
}
if mergedEntries[i].Digest == "" {
if mergedEntries[i].Size != 0 {
return nil, fmt.Errorf("missing digest for %q", mergedEntries[i].Name)
}
continue
}
digest, err := digest.Parse(mergedEntries[i].Digest)
if err != nil {
return nil, err
}
d := digest.Encoded()
if hashes[d] != "" {
continue
}
hashes[d] = d
mergedEntries[i].Name = fmt.Sprintf("%s/%s", d[0:2], d[2:])
new = append(new, mergedEntries[i])
}
return new, nil
}
func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, differOpts *graphdriver.DifferOptions) (graphdriver.DriverWithDifferOutput, error) {
defer c.layersCache.release()
defer func() {
if c.zstdReader != nil {
@ -1325,11 +1372,21 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
}
}()
lcd := chunkedLayerData{
Format: differOpts.Format,
}
json := jsoniter.ConfigCompatibleWithStandardLibrary
lcdBigData, err := json.Marshal(lcd)
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
output := graphdriver.DriverWithDifferOutput{
Differ: c,
TarSplit: c.tarSplit,
BigData: map[string][]byte{
bigDataKey: c.manifest,
bigDataKey: c.manifest,
chunkedLayerDataKey: lcdBigData,
},
TOCDigest: c.tocDigest,
}
@ -1389,6 +1446,21 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
}
defer unix.Close(dirfd)
if differOpts != nil && differOpts.Format == graphdriver.DifferOutputFormatFlat {
mergedEntries, err = makeEntriesFlat(mergedEntries)
if err != nil {
return output, err
}
createdDirs := make(map[string]struct{})
for _, e := range mergedEntries {
d := e.Name[0:2]
if _, found := createdDirs[d]; !found {
unix.Mkdirat(dirfd, d, 0o755)
createdDirs[d] = struct{}{}
}
}
}
// hardlinks can point to missing files. So create them after all files
// are retrieved
var hardLinks []hardLinkToCreate

View File

@ -8,75 +8,11 @@ import (
"os"
"runtime"
"syscall"
"unsafe"
"github.com/containers/storage/pkg/idtools"
"golang.org/x/sys/unix"
)
type attr struct {
attrSet uint64
attrClr uint64
propagation uint64
userNs uint64
}
// openTree is a wrapper for the open_tree syscall
func openTree(path string, flags int) (fd int, err error) {
var _p0 *byte
if _p0, err = syscall.BytePtrFromString(path); err != nil {
return 0, err
}
r, _, e1 := syscall.Syscall6(uintptr(unix.SYS_OPEN_TREE), uintptr(0), uintptr(unsafe.Pointer(_p0)),
uintptr(flags), 0, 0, 0)
if e1 != 0 {
err = e1
}
return int(r), err
}
// moveMount is a wrapper for the move_mount syscall.
func moveMount(fdTree int, target string) (err error) {
var _p0, _p1 *byte
empty := ""
if _p0, err = syscall.BytePtrFromString(target); err != nil {
return err
}
if _p1, err = syscall.BytePtrFromString(empty); err != nil {
return err
}
flags := unix.MOVE_MOUNT_F_EMPTY_PATH
_, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOVE_MOUNT),
uintptr(fdTree), uintptr(unsafe.Pointer(_p1)),
0, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
if e1 != 0 {
err = e1
}
return
}
// mountSetAttr is a wrapper for the mount_setattr syscall
func mountSetAttr(dfd int, path string, flags uint, attr *attr, size uint) (err error) {
var _p0 *byte
if _p0, err = syscall.BytePtrFromString(path); err != nil {
return err
}
_, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOUNT_SETATTR), uintptr(dfd), uintptr(unsafe.Pointer(_p0)),
uintptr(flags), uintptr(unsafe.Pointer(attr)), uintptr(size), 0)
if e1 != 0 {
err = e1
}
return
}
// CreateIDMappedMount creates a IDMapped bind mount from SOURCE to TARGET using the user namespace
// for the PID process.
func CreateIDMappedMount(source, target string, pid int) error {
@ -85,29 +21,26 @@ func CreateIDMappedMount(source, target string, pid int) error {
if err != nil {
return fmt.Errorf("unable to get user ns file descriptor for %q: %w", path, err)
}
var attr attr
attr.attrSet = unix.MOUNT_ATTR_IDMAP
attr.attrClr = 0
attr.propagation = 0
attr.userNs = uint64(userNsFile.Fd())
defer userNsFile.Close()
targetDirFd, err := openTree(source, unix.OPEN_TREE_CLONE)
targetDirFd, err := unix.OpenTree(0, source, unix.OPEN_TREE_CLONE)
if err != nil {
return err
}
defer unix.Close(targetDirFd)
if err := mountSetAttr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE,
&attr, uint(unsafe.Sizeof(attr))); err != nil {
if err := unix.MountSetattr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE,
&unix.MountAttr{
Attr_set: unix.MOUNT_ATTR_IDMAP,
Userns_fd: uint64(userNsFile.Fd()),
}); err != nil {
return err
}
if err := os.Mkdir(target, 0o700); err != nil && !os.IsExist(err) {
return err
}
return moveMount(targetDirFd, target)
return unix.MoveMount(targetDirFd, "", 0, target, unix.MOVE_MOUNT_F_EMPTY_PATH)
}
// CreateUsernsProcess forks the current process and creates a user namespace using the specified

View File

@ -7,6 +7,8 @@ import (
"os"
"strconv"
"syscall"
"golang.org/x/sys/unix"
)
// StatT type contains status of a file. It contains metadata
@ -57,6 +59,10 @@ func (s StatT) Dev() uint64 {
return s.dev
}
func (s StatT) IsDir() bool {
return (s.mode & unix.S_IFDIR) != 0
}
// Stat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//

View File

@ -48,6 +48,10 @@ func (s StatT) Dev() uint64 {
return 0
}
func (s StatT) IsDir() bool {
return s.Mode().IsDir()
}
// Stat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//