fix(deps): update module github.com/docker/docker to v28.1.0+incompatible

Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
This commit is contained in:
renovate[bot]
2025-04-17 19:48:18 +00:00
committed by GitHub
parent 51c4df1316
commit 05e7eeaff4
70 changed files with 1562 additions and 741 deletions

View File

@ -293,6 +293,7 @@ Brandon Liu <bdon@bdon.org>
Brandon Philips <brandon.philips@coreos.com>
Brandon Rhodes <brandon@rhodesmill.org>
Brendan Dixon <brendand@microsoft.com>
Brendon Smith <bws@bws.bio>
Brennan Kinney <5098581+polarathene@users.noreply.github.com>
Brent Salisbury <brent.salisbury@docker.com>
Brett Higgins <brhiggins@arbor.net>
@ -347,6 +348,7 @@ Casey Bisson <casey.bisson@joyent.com>
Catalin Pirvu <pirvu.catalin94@gmail.com>
Ce Gao <ce.gao@outlook.com>
Cedric Davies <cedricda@microsoft.com>
Cesar Talledo <cesar.talledo@docker.com>
Cezar Sa Espinola <cezarsa@gmail.com>
Chad Swenson <chadswen@gmail.com>
Chance Zibolski <chance.zibolski@gmail.com>
@ -1281,6 +1283,7 @@ Krasi Georgiev <krasi@vip-consult.solutions>
Krasimir Georgiev <support@vip-consult.co.uk>
Kris-Mikael Krister <krismikael@protonmail.com>
Kristian Haugene <kristian.haugene@capgemini.com>
Kristian Heljas <kristian@kristian.ee>
Kristina Zabunova <triara.xiii@gmail.com>
Krystian Wojcicki <kwojcicki@sympatico.ca>
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
@ -1712,6 +1715,7 @@ Patrick Hemmer <patrick.hemmer@gmail.com>
Patrick St. laurent <patrick@saint-laurent.us>
Patrick Stapleton <github@gdi2290.com>
Patrik Cyvoct <patrik@ptrk.io>
Patrik Leifert <patrikleifert@hotmail.com>
pattichen <craftsbear@gmail.com>
Paul "TBBle" Hampson <Paul.Hampson@Pobox.com>
Paul <paul9869@gmail.com>

View File

@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api"
// Common constants for daemon and client.
const (
// DefaultVersion of the current REST API.
DefaultVersion = "1.48"
DefaultVersion = "1.49"
// MinSupportedAPIVersion is the minimum API version that can be supported
// by the API server, specified as "major.minor". Note that the daemon

View File

@ -19,10 +19,10 @@ produces:
consumes:
- "application/json"
- "text/plain"
basePath: "/v1.48"
basePath: "/v1.49"
info:
title: "Docker Engine API"
version: "1.48"
version: "1.49"
x-logo:
url: "https://docs.docker.com/assets/images/logo-docker-main.png"
description: |
@ -55,8 +55,8 @@ info:
the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
is returned.
If you omit the version-prefix, the current version of the API (v1.48) is used.
For example, calling `/info` is the same as calling `/v1.48/info`. Using the
If you omit the version-prefix, the current version of the API (v1.49) is used.
For example, calling `/info` is the same as calling `/v1.49/info`. Using the
API without a version-prefix is deprecated and will be removed in a future release.
Engine releases in the near future should support this version of the API,
@ -6856,6 +6856,8 @@ definitions:
description: "The network pool size"
type: "integer"
example: "24"
FirewallBackend:
$ref: "#/definitions/FirewallInfo"
Warnings:
description: |
List of warnings / informational messages about missing features, or
@ -6939,6 +6941,37 @@ definitions:
default: "plugins.moby"
example: "plugins.moby"
FirewallInfo:
description: |
Information about the daemon's firewalling configuration.
This field is currently only used on Linux, and omitted on other platforms.
type: "object"
x-nullable: true
properties:
Driver:
description: |
The name of the firewall backend driver.
type: "string"
example: "nftables"
Info:
description: |
Information about the firewall backend, provided as
"label" / "value" pairs.
<p><br /></p>
> **Note**: The information returned in this field, including the
> formatting of values and labels, should not be considered stable,
> and may change without notice.
type: "array"
items:
type: "array"
items:
type: "string"
example:
- ["ReloadedAt", "2025-01-01T00:00:00Z"]
# PluginsInfo is a temp struct holding Plugins name
# registered with docker daemon. It is used by Info struct
PluginsInfo:
@ -6984,32 +7017,6 @@ definitions:
type: "object"
x-nullable: true
properties:
AllowNondistributableArtifactsCIDRs:
description: |
List of IP ranges to which nondistributable artifacts can be pushed,
using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632).
<p><br /></p>
> **Deprecated**: Pushing nondistributable artifacts is now always enabled
> and this field is always `null`. This field will be removed in a API v1.49.
type: "array"
items:
type: "string"
example: []
AllowNondistributableArtifactsHostnames:
description: |
List of registry hostnames to which nondistributable artifacts can be
pushed, using the format `<hostname>[:<port>]` or `<IP address>[:<port>]`.
<p><br /></p>
> **Deprecated**: Pushing nondistributable artifacts is now always enabled
> and this field is always `null`. This field will be removed in a API v1.49.
type: "array"
items:
type: "string"
example: []
InsecureRegistryCIDRs:
description: |
List of IP ranges of insecure registries, using the CIDR syntax
@ -7179,13 +7186,6 @@ definitions:
description: "Actual commit ID of external tool."
type: "string"
example: "cfb82a876ecc11b5ca0977d1733adbe58599088a"
Expected:
description: |
Commit ID of external tool expected by dockerd as set at build time.
**Deprecated**: This field is deprecated and will be omitted in a API v1.49.
type: "string"
example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4"
SwarmInfo:
description: |
@ -10491,13 +10491,9 @@ paths:
### Image tarball format
An image tarball contains one directory per image layer (named using its long ID), each containing these files:
An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content).
- `VERSION`: currently `1.0` - the file format version
- `json`: detailed layer information, similar to `docker inspect layer_id`
- `layer.tar`: A tarfile containing the filesystem changes in this layer
The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions.
Additionally, includes the manifest.json file associated with a backwards compatible docker save format.
If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs.
@ -10537,6 +10533,7 @@ paths:
If not provided, the full multi-platform image will be saved.
Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
tags: ["Image"]
/images/get:
get:
summary: "Export several images"
@ -10571,6 +10568,16 @@ paths:
type: "array"
items:
type: "string"
- name: "platform"
type: "string"
in: "query"
description: |
JSON encoded OCI platform describing a platform which will be used
to select a platform-specific image to be saved if the image is
multi-platform.
If not provided, the full multi-platform image will be saved.
Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
tags: ["Image"]
/images/load:
post:

View File

@ -153,6 +153,7 @@ type GetImageOpts struct {
// ImageInspectOpts holds parameters to inspect an image.
type ImageInspectOpts struct {
Manifests bool
Platform *ocispec.Platform
}
// CommitConfig is the configuration for creating an image as part of a build.

View File

@ -128,11 +128,12 @@ type InspectResponse struct {
// compatibility.
Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"`
// Manifests is a list of image manifests available in this image. It
// Manifests is a list of image manifests available in this image. It
// provides a more detailed view of the platform-specific image manifests or
// other image-attached data like build attestations.
//
// Only available if the daemon provides a multi-platform image store.
// Only available if the daemon provides a multi-platform image store, the client
// requests manifests AND does not request a specific platform.
//
// WARNING: This is experimental and may change at any time without any backward
// compatibility.

View File

@ -106,6 +106,11 @@ type LoadOptions struct {
type InspectOptions struct {
// Manifests returns the image manifests.
Manifests bool
// Platform selects the specific platform of a multi-platform image to inspect.
//
// This option is only available for API version 1.49 and up.
Platform *ocispec.Platform
}
// SaveOptions holds parameters to save images.

View File

@ -1,3 +1,6 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.22
package registry // import "github.com/docker/docker/api/types/registry"
import (
@ -15,23 +18,26 @@ type ServiceConfig struct {
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
Mirrors []string
// ExtraFields is for internal use to include deprecated fields on older API versions.
ExtraFields map[string]any `json:"-"`
}
// MarshalJSON implements a custom marshaler to include legacy fields
// in API responses.
func (sc ServiceConfig) MarshalJSON() ([]byte, error) {
tmp := map[string]interface{}{
"InsecureRegistryCIDRs": sc.InsecureRegistryCIDRs,
"IndexConfigs": sc.IndexConfigs,
"Mirrors": sc.Mirrors,
func (sc *ServiceConfig) MarshalJSON() ([]byte, error) {
type tmp ServiceConfig
base, err := json.Marshal((*tmp)(sc))
if err != nil {
return nil, err
}
if sc.AllowNondistributableArtifactsCIDRs != nil {
tmp["AllowNondistributableArtifactsCIDRs"] = nil
var merged map[string]any
_ = json.Unmarshal(base, &merged)
for k, v := range sc.ExtraFields {
merged[k] = v
}
if sc.AllowNondistributableArtifactsHostnames != nil {
tmp["AllowNondistributableArtifactsHostnames"] = nil
}
return json.Marshal(tmp)
return json.Marshal(merged)
}
// NetIPNet is the net.IPNet type, which can be marshalled and

View File

@ -73,6 +73,7 @@ type Info struct {
SecurityOptions []string
ProductLicense string `json:",omitempty"`
DefaultAddressPools []NetworkAddressPool `json:",omitempty"`
FirewallBackend *FirewallInfo `json:"FirewallBackend,omitempty"`
CDISpecDirs []string
Containerd *ContainerdInfo `json:",omitempty"`
@ -143,7 +144,7 @@ type Commit struct {
// Expected is the commit ID of external tool expected by dockerd as set at build time.
//
// Deprecated: this field is no longer used in API v1.49, but kept for backward-compatibility with older API versions.
Expected string
Expected string `json:",omitempty"`
}
// NetworkAddressPool is a temp struct used by [Info] struct.
@ -151,3 +152,11 @@ type NetworkAddressPool struct {
Base string
Size int
}
// FirewallInfo describes the firewall backend.
type FirewallInfo struct {
// Driver is the name of the firewall backend driver.
Driver string `json:"Driver"`
// Info is a list of label/value pairs, containing information related to the firewall.
Info [][2]string `json:"Info,omitempty"`
}

View File

@ -32,7 +32,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, containerID string, opti
if tagged, ok := ref.(reference.Tagged); ok {
tag = tagged.Tag()
}
repository = reference.FamiliarName(ref)
repository = ref.Name()
}
query := url.Values{}

View File

@ -21,7 +21,7 @@ func (cli *Client) ImageCreate(ctx context.Context, parentReference string, opti
}
query := url.Values{}
query.Set("fromImage", reference.FamiliarName(ref))
query.Set("fromImage", ref.Name())
query.Set("tag", getAPITagFromNamedRef(ref))
if options.Platform != "" {
query.Set("platform", strings.ToLower(options.Platform))

View File

@ -32,6 +32,17 @@ func (cli *Client) ImageInspect(ctx context.Context, imageID string, inspectOpts
query.Set("manifests", "1")
}
if opts.apiOptions.Platform != nil {
if err := cli.NewVersionError(ctx, "1.49", "platform"); err != nil {
return image.InspectResponse{}, err
}
platform, err := encodePlatform(opts.apiOptions.Platform)
if err != nil {
return image.InspectResponse{}, err
}
query.Set("platform", platform)
}
resp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil)
defer ensureReaderClosed(resp)
if err != nil {

View File

@ -4,6 +4,7 @@ import (
"bytes"
"github.com/docker/docker/api/types/image"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// ImageInspectOption is a type representing functional options for the image inspect operation.
@ -36,6 +37,17 @@ func ImageInspectWithManifests(manifests bool) ImageInspectOption {
})
}
// ImageInspectWithPlatform sets platform API option for the image inspect operation.
// This option is only available for API version 1.49 and up.
// With this option set, the image inspect operation will return information for the
// specified platform variant of the multi-platform image.
func ImageInspectWithPlatform(platform *ocispec.Platform) ImageInspectOption {
return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error {
clientOpts.apiOptions.Platform = platform
return nil
})
}
// ImageInspectWithAPIOpts sets the API options for the image inspect operation.
func ImageInspectWithAPIOpts(opts image.InspectOptions) ImageInspectOption {
return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error {

View File

@ -26,7 +26,7 @@ func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.P
}
query := url.Values{}
query.Set("fromImage", reference.FamiliarName(ref))
query.Set("fromImage", ref.Name())
if !options.All {
query.Set("tag", getAPITagFromNamedRef(ref))
}

View File

@ -29,7 +29,6 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu
return nil, errors.New("cannot push a digest reference")
}
name := reference.FamiliarName(ref)
query := url.Values{}
if !options.All {
ref = reference.TagNameOnly(ref)
@ -52,13 +51,13 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu
query.Set("platform", string(pJson))
}
resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth)
resp, err := cli.tryImagePush(ctx, ref.Name(), query, options.RegistryAuth)
if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx)
if privilegeErr != nil {
return nil, privilegeErr
}
resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader)
resp, err = cli.tryImagePush(ctx, ref.Name(), query, newAuthHeader)
}
if err != nil {
return nil, err

View File

@ -26,7 +26,7 @@ func (cli *Client) ImageTag(ctx context.Context, source, target string) error {
ref = reference.TagNameOnly(ref)
query := url.Values{}
query.Set("repo", reference.FamiliarName(ref))
query.Set("repo", ref.Name())
if tagged, ok := ref.(reference.Tagged); ok {
query.Set("tag", tagged.Tag())
}

View File

@ -237,7 +237,7 @@ func (cli *Client) checkResponseErr(serverResp *http.Response) (retErr error) {
}
var daemonErr error
if serverResp.Header.Get("Content-Type") == "application/json" && (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) {
if serverResp.Header.Get("Content-Type") == "application/json" {
var errorResponse types.ErrorResponse
if err := json.Unmarshal(body, &errorResponse); err != nil {
return errors.Wrap(err, "Error reading JSON")

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,259 @@
// Package archive provides helper functions for dealing with archive files.
package archive
import (
"archive/tar"
"io"
"os"
"github.com/docker/docker/pkg/idtools"
"github.com/moby/go-archive"
"github.com/moby/go-archive/compression"
"github.com/moby/go-archive/tarheader"
)
// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a
// tar, but that do not have their own header entry.
//
// Deprecated: use [archive.ImpliedDirectoryMode] instead.
const ImpliedDirectoryMode = archive.ImpliedDirectoryMode
type (
// Compression is the state represents if compressed or not.
//
// Deprecated: use [compression.Compression] instead.
Compression = compression.Compression
// WhiteoutFormat is the format of whiteouts unpacked
//
// Deprecated: use [archive.WhiteoutFormat] instead.
WhiteoutFormat = archive.WhiteoutFormat
// TarOptions wraps the tar options.
//
// Deprecated: use [archive.TarOptions] instead.
TarOptions struct {
IncludeFiles []string
ExcludePatterns []string
Compression compression.Compression
NoLchown bool
IDMap idtools.IdentityMapping
ChownOpts *idtools.Identity
IncludeSourceDir bool
// WhiteoutFormat is the expected on disk format for whiteout files.
// This format will be converted to the standard format on pack
// and from the standard format on unpack.
WhiteoutFormat archive.WhiteoutFormat
// When unpacking, specifies whether overwriting a directory with a
// non-directory is allowed and vice versa.
NoOverwriteDirNonDir bool
// For each include when creating an archive, the included name will be
// replaced with the matching name from this map.
RebaseNames map[string]string
InUserNS bool
// Allow unpacking to succeed in spite of failures to set extended
// attributes on the unpacked files due to the destination filesystem
// not supporting them or a lack of permissions. Extended attributes
// were probably in the archive for a reason, so set this option at
// your own peril.
BestEffortXattrs bool
}
)
// Archiver implements the Archiver interface and allows the reuse of most utility functions of
// this package with a pluggable Untar function. Also, to facilitate the passing of specific id
// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
//
// Deprecated: use [archive.Archiver] instead.
type Archiver struct {
Untar func(io.Reader, string, *TarOptions) error
IDMapping idtools.IdentityMapping
}
// NewDefaultArchiver returns a new Archiver without any IdentityMapping
//
// Deprecated: use [archive.NewDefaultArchiver] instead.
func NewDefaultArchiver() *Archiver {
return &Archiver{Untar: Untar}
}
const (
Uncompressed = compression.None // Deprecated: use [compression.None] instead.
Bzip2 = compression.Bzip2 // Deprecated: use [compression.Bzip2] instead.
Gzip = compression.Gzip // Deprecated: use [compression.Gzip] instead.
Xz = compression.Xz // Deprecated: use [compression.Xz] instead.
Zstd = compression.Zstd // Deprecated: use [compression.Zstd] instead.
)
const (
AUFSWhiteoutFormat = archive.AUFSWhiteoutFormat // Deprecated: use [archive.AUFSWhiteoutFormat] instead.
OverlayWhiteoutFormat = archive.OverlayWhiteoutFormat // Deprecated: use [archive.OverlayWhiteoutFormat] instead.
)
// IsArchivePath checks if the (possibly compressed) file at the given path
// starts with a tar file header.
//
// Deprecated: use [archive.IsArchivePath] instead.
func IsArchivePath(path string) bool {
return archive.IsArchivePath(path)
}
// DetectCompression detects the compression algorithm of the source.
//
// Deprecated: use [compression.Detect] instead.
func DetectCompression(source []byte) archive.Compression {
return compression.Detect(source)
}
// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
//
// Deprecated: use [compression.DecompressStream] instead.
func DecompressStream(arch io.Reader) (io.ReadCloser, error) {
return compression.DecompressStream(arch)
}
// CompressStream compresses the dest with specified compression algorithm.
//
// Deprecated: use [compression.CompressStream] instead.
func CompressStream(dest io.Writer, comp compression.Compression) (io.WriteCloser, error) {
return compression.CompressStream(dest, comp)
}
// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper.
//
// Deprecated: use [archive.TarModifierFunc] instead.
type TarModifierFunc = archive.TarModifierFunc
// ReplaceFileTarWrapper converts inputTarStream to a new tar stream.
//
// Deprecated: use [archive.ReplaceFileTarWrapper] instead.
func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]archive.TarModifierFunc) io.ReadCloser {
return archive.ReplaceFileTarWrapper(inputTarStream, mods)
}
// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi.
//
// Deprecated: use [tarheader.FileInfoHeaderNoLookups] instead.
func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) {
return tarheader.FileInfoHeaderNoLookups(fi, link)
}
// FileInfoHeader creates a populated Header from fi.
//
// Deprecated: use [archive.FileInfoHeader] instead.
func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
return archive.FileInfoHeader(name, fi, link)
}
// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
// to a tar header
//
// Deprecated: use [archive.ReadSecurityXattrToTarHeader] instead.
func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
return archive.ReadSecurityXattrToTarHeader(path, hdr)
}
// Tar creates an archive from the directory at `path`, and returns it as a
// stream of bytes.
//
// Deprecated: use [archive.Tar] instead.
func Tar(path string, compression archive.Compression) (io.ReadCloser, error) {
return archive.TarWithOptions(path, &archive.TarOptions{Compression: compression})
}
// TarWithOptions creates an archive with the given options.
//
// Deprecated: use [archive.TarWithOptions] instead.
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
return archive.TarWithOptions(srcPath, toArchiveOpt(options))
}
// Tarballer is a lower-level interface to TarWithOptions.
//
// Deprecated: use [archive.Tarballer] instead.
type Tarballer = archive.Tarballer
// NewTarballer constructs a new tarballer using TarWithOptions.
//
// Deprecated: use [archive.Tarballer] instead.
func NewTarballer(srcPath string, options *TarOptions) (*archive.Tarballer, error) {
return archive.NewTarballer(srcPath, toArchiveOpt(options))
}
// Unpack unpacks the decompressedArchive to dest with options.
//
// Deprecated: use [archive.Unpack] instead.
func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
return archive.Unpack(decompressedArchive, dest, toArchiveOpt(options))
}
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
//
// Deprecated: use [archive.Untar] instead.
func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
return archive.Untar(tarArchive, dest, toArchiveOpt(options))
}
// UntarUncompressed reads a stream of bytes from `tarArchive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive must be an uncompressed stream.
//
// Deprecated: use [archive.UntarUncompressed] instead.
func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
return archive.UntarUncompressed(tarArchive, dest, toArchiveOpt(options))
}
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
// If either Tar or Untar fails, TarUntar aborts and returns the error.
func (archiver *Archiver) TarUntar(src, dst string) error {
return (&archive.Archiver{
Untar: func(reader io.Reader, s string, options *archive.TarOptions) error {
return archiver.Untar(reader, s, &TarOptions{
IDMap: archiver.IDMapping,
})
},
IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping),
}).TarUntar(src, dst)
}
// UntarPath untar a file from path to a destination, src is the source tar file path.
func (archiver *Archiver) UntarPath(src, dst string) error {
return (&archive.Archiver{
Untar: func(reader io.Reader, s string, options *archive.TarOptions) error {
return archiver.Untar(reader, s, &TarOptions{
IDMap: archiver.IDMapping,
})
},
IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping),
}).UntarPath(src, dst)
}
// CopyWithTar creates a tar archive of filesystem path `src`, and
// unpacks it at filesystem path `dst`.
// The archive is streamed directly with fixed buffering and no
// intermediary disk IO.
func (archiver *Archiver) CopyWithTar(src, dst string) error {
return (&archive.Archiver{
Untar: func(reader io.Reader, s string, options *archive.TarOptions) error {
return archiver.Untar(reader, s, nil)
},
IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping),
}).CopyWithTar(src, dst)
}
// CopyFileWithTar emulates the behavior of the 'cp' command-line
// for a single file. It copies a regular file from path `src` to
// path `dst`, and preserves all its metadata.
func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
return (&archive.Archiver{
Untar: func(reader io.Reader, s string, options *archive.TarOptions) error {
return archiver.Untar(reader, s, nil)
},
IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping),
}).CopyFileWithTar(src, dst)
}
// IdentityMapping returns the IdentityMapping of the archiver.
func (archiver *Archiver) IdentityMapping() idtools.IdentityMapping {
return archiver.IDMapping
}

View File

@ -1,107 +0,0 @@
package archive
import (
"archive/tar"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/moby/sys/userns"
"golang.org/x/sys/unix"
)
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
if format == OverlayWhiteoutFormat {
return overlayWhiteoutConverter{}
}
return nil
}
type overlayWhiteoutConverter struct{}
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, _ error) {
// convert whiteouts to AUFS format
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
// we just rename the file and make it normal
dir, filename := filepath.Split(hdr.Name)
hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
hdr.Mode = 0o600
hdr.Typeflag = tar.TypeReg
hdr.Size = 0
}
if fi.Mode()&os.ModeDir == 0 {
// FIXME(thaJeztah): return a sentinel error instead of nil, nil
return nil, nil
}
opaqueXattrName := "trusted.overlay.opaque"
if userns.RunningInUserNS() {
opaqueXattrName = "user.overlay.opaque"
}
// convert opaque dirs to AUFS format by writing an empty file with the prefix
opaque, err := lgetxattr(path, opaqueXattrName)
if err != nil {
return nil, err
}
if len(opaque) != 1 || opaque[0] != 'y' {
// FIXME(thaJeztah): return a sentinel error instead of nil, nil
return nil, nil
}
delete(hdr.PAXRecords, paxSchilyXattr+opaqueXattrName)
// create a header for the whiteout file
// it should inherit some properties from the parent, but be a regular file
return &tar.Header{
Typeflag: tar.TypeReg,
Mode: hdr.Mode & int64(os.ModePerm),
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), // #nosec G305 -- An archive is being created, not extracted.
Size: 0,
Uid: hdr.Uid,
Uname: hdr.Uname,
Gid: hdr.Gid,
Gname: hdr.Gname,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}, nil
}
func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
base := filepath.Base(path)
dir := filepath.Dir(path)
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
if base == WhiteoutOpaqueDir {
opaqueXattrName := "trusted.overlay.opaque"
if userns.RunningInUserNS() {
opaqueXattrName = "user.overlay.opaque"
}
err := unix.Setxattr(dir, opaqueXattrName, []byte{'y'}, 0)
if err != nil {
return false, fmt.Errorf("setxattr('%s', %s=y): %w", dir, opaqueXattrName, err)
}
// don't write the file itself
return false, err
}
// if a file was deleted and we are using overlay, we need to create a character device
if strings.HasPrefix(base, WhiteoutPrefix) {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
return false, fmt.Errorf("failed to mknod('%s', S_IFCHR, 0): %w", originalPath, err)
}
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
return false, err
}
// don't write the file itself
return false, nil
}
return true, nil
}

View File

@ -1,7 +0,0 @@
//go:build !linux
package archive
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
return nil
}

View File

@ -1,126 +0,0 @@
//go:build !windows
package archive
import (
"archive/tar"
"errors"
"os"
"path/filepath"
"runtime"
"strings"
"syscall"
"github.com/docker/docker/pkg/idtools"
"golang.org/x/sys/unix"
)
func init() {
sysStat = statUnix
}
// addLongPathPrefix adds the Windows long path prefix to the path provided if
// it does not already have it. It is a no-op on platforms other than Windows.
func addLongPathPrefix(srcPath string) string {
return srcPath
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a separate function as this is platform specific. On Linux, we
// can't use filepath.Join(srcPath,include) because this will clean away
// a trailing "." or "/" which may be important.
func getWalkRoot(srcPath string, include string) string {
return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm // noop for unix as golang APIs provide perm bits correctly
}
// statUnix populates hdr from system-dependent fields of fi without performing
// any OS lookups.
func statUnix(fi os.FileInfo, hdr *tar.Header) error {
// Devmajor and Devminor are only needed for special devices.
// In FreeBSD, RDev for regular files is -1 (unless overridden by FS):
// https://cgit.freebsd.org/src/tree/sys/kern/vfs_default.c?h=stable/13#n1531
// (NODEV is -1: https://cgit.freebsd.org/src/tree/sys/sys/param.h?h=stable/13#n241).
// ZFS in particular does not override the default:
// https://cgit.freebsd.org/src/tree/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c?h=stable/13#n2027
// Since `Stat_t.Rdev` is uint64, the cast turns -1 into (2^64 - 1).
// Such large values cannot be encoded in a tar header.
if runtime.GOOS == "freebsd" && hdr.Typeflag != tar.TypeBlock && hdr.Typeflag != tar.TypeChar {
return nil
}
s, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
hdr.Uid = int(s.Uid)
hdr.Gid = int(s.Gid)
if s.Mode&unix.S_IFBLK != 0 ||
s.Mode&unix.S_IFCHR != 0 {
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert
}
return nil
}
func getInodeFromStat(stat interface{}) (uint64, error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
// FIXME(thaJeztah): this should likely return an error; see https://github.com/moby/moby/pull/49493#discussion_r1979152897
return 0, nil
}
return s.Ino, nil
}
func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t")
}
return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo.
//
// Creating device nodes is not supported when running in a user namespace,
// produces a [syscall.EPERM] in most cases.
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
mode := uint32(hdr.Mode & 0o7777)
switch hdr.Typeflag {
case tar.TypeBlock:
mode |= unix.S_IFBLK
case tar.TypeChar:
mode |= unix.S_IFCHR
case tar.TypeFifo:
mode |= unix.S_IFIFO
}
return mknod(path, mode, unix.Mkdev(uint32(hdr.Devmajor), uint32(hdr.Devminor)))
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
} else if hdr.Typeflag != tar.TypeSymlink {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
return nil
}

View File

@ -1,69 +0,0 @@
package archive
import (
"archive/tar"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/idtools"
)
// longPathPrefix is the longpath prefix for Windows file paths.
const longPathPrefix = `\\?\`
// addLongPathPrefix adds the Windows long path prefix to the path provided if
// it does not already have it. It is a no-op on platforms other than Windows.
//
// addLongPathPrefix is a copy of [github.com/docker/docker/pkg/longpath.AddPrefix].
func addLongPathPrefix(srcPath string) string {
if strings.HasPrefix(srcPath, longPathPrefix) {
return srcPath
}
if strings.HasPrefix(srcPath, `\\`) {
// This is a UNC path, so we need to add 'UNC' to the path as well.
return longPathPrefix + `UNC` + srcPath[1:]
}
return longPathPrefix + srcPath
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a separate function as this is platform specific.
func getWalkRoot(srcPath string, include string) string {
return filepath.Join(srcPath, include)
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
// Remove group- and world-writable bits.
perm &= 0o755
// Add the x bit: make everything +x on Windows
return perm | 0o111
}
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
// do nothing. no notion of Rdev, Nlink in stat on Windows
return
}
func getInodeFromStat(stat interface{}) (uint64, error) {
// do nothing. no notion of Inode in stat on Windows
return 0, nil
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
return nil
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
return nil
}
func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
// no notion of file ownership mapping yet on Windows
return idtools.Identity{UID: 0, GID: 0}, nil
}

View File

@ -1,430 +0,0 @@
package archive
import (
"archive/tar"
"bytes"
"context"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"sort"
"strings"
"time"
"github.com/containerd/log"
"github.com/docker/docker/pkg/idtools"
)
// ChangeType represents the change type.
type ChangeType int
const (
ChangeModify = 0 // ChangeModify represents the modify operation.
ChangeAdd = 1 // ChangeAdd represents the add operation.
ChangeDelete = 2 // ChangeDelete represents the delete operation.
)
func (c ChangeType) String() string {
switch c {
case ChangeModify:
return "C"
case ChangeAdd:
return "A"
case ChangeDelete:
return "D"
}
return ""
}
// Change represents a change, it wraps the change type and path.
// It describes changes of the files in the path respect to the
// parent layers. The change could be modify, add, delete.
// This is used for layer diff.
type Change struct {
Path string
Kind ChangeType
}
func (change *Change) String() string {
return fmt.Sprintf("%s %s", change.Kind, change.Path)
}
// for sort.Sort
type changesByPath []Change
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
func (c changesByPath) Len() int { return len(c) }
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
// Gnu tar doesn't have sub-second mtime precision. The go tar
// writer (1.10+) does when using PAX format, but we round times to seconds
// to ensure archives have the same hashes for backwards compatibility.
// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4.
//
// Non-sub-second is problematic when we apply changes via tar
// files. We handle this by comparing for exact times, *or* same
// second count and either a or b having exactly 0 nanoseconds
func sameFsTime(a, b time.Time) bool {
return a.Equal(b) ||
(a.Unix() == b.Unix() &&
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
}
// Changes walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func Changes(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
}
func aufsMetadataSkip(path string) (skip bool, err error) {
skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
if err != nil {
skip = true
}
return skip, err
}
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
f := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(f, WhiteoutPrefix) {
originalFile := f[len(WhiteoutPrefix):]
return filepath.Join(filepath.Dir(path), originalFile), nil
}
return "", nil
}
type (
skipChange func(string) (bool, error)
deleteChange func(string, string, os.FileInfo) (string, error)
)
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
var (
changes []Change
changedDirs = make(map[string]struct{})
)
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
path, err = filepath.Rel(rw, path)
if err != nil {
return err
}
// As this runs on the daemon side, file paths are OS specific.
path = filepath.Join(string(os.PathSeparator), path)
// Skip root
if path == string(os.PathSeparator) {
return nil
}
if sc != nil {
if skip, err := sc(path); skip {
return err
}
}
change := Change{
Path: path,
}
deletedFile, err := dc(rw, path, f)
if err != nil {
return err
}
// Find out what kind of modification happened
if deletedFile != "" {
change.Path = deletedFile
change.Kind = ChangeDelete
} else {
// Otherwise, the file was added
change.Kind = ChangeAdd
// ...Unless it already existed in a top layer, in which case, it's a modification
for _, layer := range layers {
stat, err := os.Stat(filepath.Join(layer, path))
if err != nil && !os.IsNotExist(err) {
return err
}
if err == nil {
// The file existed in the top layer, so that's a modification
// However, if it's a directory, maybe it wasn't actually modified.
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
if stat.IsDir() && f.IsDir() {
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
// Both directories are the same, don't record the change
return nil
}
}
change.Kind = ChangeModify
break
}
}
}
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
// This block is here to ensure the change is recorded even if the
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
// Check https://github.com/docker/docker/pull/13590 for details.
if f.IsDir() {
changedDirs[path] = struct{}{}
}
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
parent := filepath.Dir(path)
if _, ok := changedDirs[parent]; !ok && parent != "/" {
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
changedDirs[parent] = struct{}{}
}
}
// Record change
changes = append(changes, change)
return nil
})
if err != nil && !os.IsNotExist(err) {
return nil, err
}
return changes, nil
}
// FileInfo describes the information of a file.
type FileInfo struct {
parent *FileInfo
name string
stat fs.FileInfo
children map[string]*FileInfo
capability []byte
added bool
}
// LookUp looks up the file information of a file.
func (info *FileInfo) LookUp(path string) *FileInfo {
// As this runs on the daemon side, file paths are OS specific.
parent := info
if path == string(os.PathSeparator) {
return info
}
pathElements := strings.Split(path, string(os.PathSeparator))
for _, elem := range pathElements {
if elem != "" {
child := parent.children[elem]
if child == nil {
return nil
}
parent = child
}
}
return parent
}
func (info *FileInfo) path() string {
if info.parent == nil {
// As this runs on the daemon side, file paths are OS specific.
return string(os.PathSeparator)
}
return filepath.Join(info.parent.path(), info.name)
}
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
sizeAtEntry := len(*changes)
if oldInfo == nil {
// add
change := Change{
Path: info.path(),
Kind: ChangeAdd,
}
*changes = append(*changes, change)
info.added = true
}
// We make a copy so we can modify it to detect additions
// also, we only recurse on the old dir if the new info is a directory
// otherwise any previous delete/change is considered recursive
oldChildren := make(map[string]*FileInfo)
if oldInfo != nil && info.isDir() {
for k, v := range oldInfo.children {
oldChildren[k] = v
}
}
for name, newChild := range info.children {
oldChild := oldChildren[name]
if oldChild != nil {
// change?
oldStat := oldChild.stat
newStat := newChild.stat
// Note: We can't compare inode or ctime or blocksize here, because these change
// when copying a file into a container. However, that is not generally a problem
// because any content change will change mtime, and any status change should
// be visible when actually comparing the stat fields. The only time this
// breaks down is if some code intentionally hides a change by setting
// back mtime
if statDifferent(oldStat, newStat) ||
!bytes.Equal(oldChild.capability, newChild.capability) {
change := Change{
Path: newChild.path(),
Kind: ChangeModify,
}
*changes = append(*changes, change)
newChild.added = true
}
// Remove from copy so we can detect deletions
delete(oldChildren, name)
}
newChild.addChanges(oldChild, changes)
}
for _, oldChild := range oldChildren {
// delete
change := Change{
Path: oldChild.path(),
Kind: ChangeDelete,
}
*changes = append(*changes, change)
}
// If there were changes inside this directory, we need to add it, even if the directory
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
// As this runs on the daemon side, file paths are OS specific.
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
change := Change{
Path: info.path(),
Kind: ChangeModify,
}
// Let's insert the directory entry before the recently added entries located inside this dir
*changes = append(*changes, change) // just to resize the slice, will be overwritten
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
(*changes)[sizeAtEntry] = change
}
}
// Changes add changes to file information.
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
var changes []Change
info.addChanges(oldInfo, &changes)
return changes
}
func newRootFileInfo() *FileInfo {
// As this runs on the daemon side, file paths are OS specific.
root := &FileInfo{
name: string(os.PathSeparator),
children: make(map[string]*FileInfo),
}
return root
}
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
// If oldDir is "", then all files in newDir will be Add-Changes.
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
var oldRoot, newRoot *FileInfo
if oldDir == "" {
emptyDir, err := os.MkdirTemp("", "empty")
if err != nil {
return nil, err
}
defer os.Remove(emptyDir)
oldDir = emptyDir
}
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
if err != nil {
return nil, err
}
return newRoot.Changes(oldRoot), nil
}
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
func ChangesSize(newDir string, changes []Change) int64 {
var (
size int64
sf = make(map[uint64]struct{})
)
for _, change := range changes {
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
file := filepath.Join(newDir, change.Path)
fileInfo, err := os.Lstat(file)
if err != nil {
log.G(context.TODO()).Errorf("Can not stat %q: %s", file, err)
continue
}
if fileInfo != nil && !fileInfo.IsDir() {
if hasHardlinks(fileInfo) {
inode := getIno(fileInfo)
if _, ok := sf[inode]; !ok {
size += fileInfo.Size()
sf[inode] = struct{}{}
}
} else {
size += fileInfo.Size()
}
}
}
}
return size
}
// ExportChanges produces an Archive from the provided changes, relative to dir.
func ExportChanges(dir string, changes []Change, idMap idtools.IdentityMapping) (io.ReadCloser, error) {
reader, writer := io.Pipe()
go func() {
ta := newTarAppender(idMap, writer, nil)
sort.Sort(changesByPath(changes))
// In general we log errors here but ignore them because
// during e.g. a diff operation the container can continue
// mutating the filesystem and we can see transient errors
// from this
for _, change := range changes {
if change.Kind == ChangeDelete {
whiteOutDir := filepath.Dir(change.Path)
whiteOutBase := filepath.Base(change.Path)
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
timestamp := time.Now()
hdr := &tar.Header{
Name: whiteOut[1:],
Size: 0,
ModTime: timestamp,
AccessTime: timestamp,
ChangeTime: timestamp,
}
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
log.G(context.TODO()).Debugf("Can't write whiteout header: %s", err)
}
} else {
path := filepath.Join(dir, change.Path)
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
log.G(context.TODO()).Debugf("Can't add file %s to tar: %s", path, err)
}
}
}
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
log.G(context.TODO()).Debugf("Can't close layer: %s", err)
}
if err := writer.Close(); err != nil {
log.G(context.TODO()).Debugf("failed close Changes writer: %s", err)
}
}()
return reader, nil
}

View File

@ -0,0 +1,56 @@
package archive
import (
"io"
"github.com/docker/docker/pkg/idtools"
"github.com/moby/go-archive"
)
// ChangeType represents the change
//
// Deprecated: use [archive.ChangeType] instead.
type ChangeType = archive.ChangeType
const (
ChangeModify = archive.ChangeModify // Deprecated: use [archive.ChangeModify] instead.
ChangeAdd = archive.ChangeAdd // Deprecated: use [archive.ChangeAdd] instead.
ChangeDelete = archive.ChangeDelete // Deprecated: use [archive.ChangeDelete] instead.
)
// Change represents a change.
//
// Deprecated: use [archive.Change] instead.
type Change = archive.Change
// Changes walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
//
// Deprecated: use [archive.Changes] instead.
func Changes(layers []string, rw string) ([]archive.Change, error) {
return archive.Changes(layers, rw)
}
// FileInfo describes the information of a file.
//
// Deprecated: use [archive.FileInfo] instead.
type FileInfo = archive.FileInfo
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
//
// Deprecated: use [archive.ChangesDirs] instead.
func ChangesDirs(newDir, oldDir string) ([]archive.Change, error) {
return archive.ChangesDirs(newDir, oldDir)
}
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
//
// Deprecated: use [archive.ChangesSize] instead.
func ChangesSize(newDir string, changes []archive.Change) int64 {
return archive.ChangesSize(newDir, changes)
}
// ExportChanges produces an Archive from the provided changes, relative to dir.
func ExportChanges(dir string, changes []archive.Change, idMap idtools.IdentityMapping) (io.ReadCloser, error) {
return archive.ExportChanges(dir, changes, idtools.ToUserIdentityMapping(idMap))
}

View File

@ -1,281 +0,0 @@
package archive
import (
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"syscall"
"unsafe"
"golang.org/x/sys/unix"
)
// walker is used to implement collectFileInfoForChanges on linux. Where this
// method in general returns the entire contents of two directory trees, we
// optimize some FS calls out on linux. In particular, we take advantage of the
// fact that getdents(2) returns the inode of each file in the directory being
// walked, which, when walking two trees in parallel to generate a list of
// changes, can be used to prune subtrees without ever having to lstat(2) them
// directly. Eliminating stat calls in this way can save up to seconds on large
// images.
type walker struct {
dir1 string
dir2 string
root1 *FileInfo
root2 *FileInfo
}
// collectFileInfoForChanges returns a complete representation of the trees
// rooted at dir1 and dir2, with one important exception: any subtree or
// leaf where the inode and device numbers are an exact match between dir1
// and dir2 will be pruned from the results. This method is *only* to be used
// to generating a list of changes between the two directories, as it does not
// reflect the full contents.
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
w := &walker{
dir1: dir1,
dir2: dir2,
root1: newRootFileInfo(),
root2: newRootFileInfo(),
}
i1, err := os.Lstat(w.dir1)
if err != nil {
return nil, nil, err
}
i2, err := os.Lstat(w.dir2)
if err != nil {
return nil, nil, err
}
if err := w.walk("/", i1, i2); err != nil {
return nil, nil, err
}
return w.root1, w.root2, nil
}
// Given a FileInfo, its path info, and a reference to the root of the tree
// being constructed, register this file with the tree.
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
if fi == nil {
return nil
}
parent := root.LookUp(filepath.Dir(path))
if parent == nil {
return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
}
info := &FileInfo{
name: filepath.Base(path),
children: make(map[string]*FileInfo),
parent: parent,
}
cpath := filepath.Join(dir, path)
info.stat = fi
info.capability, _ = lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
parent.children[info.name] = info
return nil
}
// Walk a subtree rooted at the same path in both trees being iterated. For
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
// Register these nodes with the return trees, unless we're still at the
// (already-created) roots:
if path != "/" {
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
return err
}
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
return err
}
}
is1Dir := i1 != nil && i1.IsDir()
is2Dir := i2 != nil && i2.IsDir()
sameDevice := false
if i1 != nil && i2 != nil {
si1 := i1.Sys().(*syscall.Stat_t)
si2 := i2.Sys().(*syscall.Stat_t)
if si1.Dev == si2.Dev {
sameDevice = true
}
}
// If these files are both non-existent, or leaves (non-dirs), we are done.
if !is1Dir && !is2Dir {
return nil
}
// Fetch the names of all the files contained in both directories being walked:
var names1, names2 []nameIno
if is1Dir {
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
if err != nil {
return err
}
}
if is2Dir {
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
if err != nil {
return err
}
}
// We have lists of the files contained in both parallel directories, sorted
// in the same order. Walk them in parallel, generating a unique merged list
// of all items present in either or both directories.
var names []string
ix1 := 0
ix2 := 0
for {
if ix1 >= len(names1) {
break
}
if ix2 >= len(names2) {
break
}
ni1 := names1[ix1]
ni2 := names2[ix2]
switch strings.Compare(ni1.name, ni2.name) {
case -1: // ni1 < ni2 -- advance ni1
// we will not encounter ni1 in names2
names = append(names, ni1.name)
ix1++
case 0: // ni1 == ni2
if ni1.ino != ni2.ino || !sameDevice {
names = append(names, ni1.name)
}
ix1++
ix2++
case 1: // ni1 > ni2 -- advance ni2
// we will not encounter ni2 in names1
names = append(names, ni2.name)
ix2++
}
}
for ix1 < len(names1) {
names = append(names, names1[ix1].name)
ix1++
}
for ix2 < len(names2) {
names = append(names, names2[ix2].name)
ix2++
}
// For each of the names present in either or both of the directories being
// iterated, stat the name under each root, and recurse the pair of them:
for _, name := range names {
fname := filepath.Join(path, name)
var cInfo1, cInfo2 os.FileInfo
if is1Dir {
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
if err != nil && !os.IsNotExist(err) {
return err
}
}
if is2Dir {
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
if err != nil && !os.IsNotExist(err) {
return err
}
}
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
return err
}
}
return nil
}
// {name,inode} pairs used to support the early-pruning logic of the walker type
type nameIno struct {
name string
ino uint64
}
type nameInoSlice []nameIno
func (s nameInoSlice) Len() int { return len(s) }
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
// numbers further up the stack when reading directory contents. Unlike
// os.Readdirnames, which returns a list of filenames, this function returns a
// list of {filename,inode} pairs.
func readdirnames(dirname string) (names []nameIno, err error) {
var (
size = 100
buf = make([]byte, 4096)
nbuf int
bufp int
nb int
)
f, err := os.Open(dirname)
if err != nil {
return nil, err
}
defer f.Close()
names = make([]nameIno, 0, size) // Empty with room to grow.
for {
// Refill the buffer if necessary
if bufp >= nbuf {
bufp = 0
nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
if nbuf < 0 {
nbuf = 0
}
if err != nil {
return nil, os.NewSyscallError("readdirent", err)
}
if nbuf <= 0 {
break // EOF
}
}
// Drain the buffer
nb, names = parseDirent(buf[bufp:nbuf], names)
bufp += nb
}
sl := nameInoSlice(names)
sort.Sort(sl)
return sl, nil
}
// parseDirent is a minor modification of unix.ParseDirent (linux version)
// which returns {name,inode} pairs instead of just names.
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
origlen := len(buf)
for len(buf) > 0 {
dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) // #nosec G103 -- Ignore "G103: Use of unsafe calls should be audited"
buf = buf[dirent.Reclen:]
if dirent.Ino == 0 { // File absent in directory.
continue
}
b := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) // #nosec G103 -- Ignore "G103: Use of unsafe calls should be audited"
name := string(b[0:clen(b[:])])
if name == "." || name == ".." { // Useless names
continue
}
names = append(names, nameIno{name, dirent.Ino})
}
return origlen - len(buf), names
}
func clen(n []byte) int {
for i := 0; i < len(n); i++ {
if n[i] == 0 {
return i
}
}
return len(n)
}

View File

@ -1,95 +0,0 @@
//go:build !linux
package archive
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
)
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
var (
oldRoot, newRoot *FileInfo
err1, err2 error
errs = make(chan error, 2)
)
go func() {
oldRoot, err1 = collectFileInfo(oldDir)
errs <- err1
}()
go func() {
newRoot, err2 = collectFileInfo(newDir)
errs <- err2
}()
// block until both routines have returned
for i := 0; i < 2; i++ {
if err := <-errs; err != nil {
return nil, nil, err
}
}
return oldRoot, newRoot, nil
}
func collectFileInfo(sourceDir string) (*FileInfo, error) {
root := newRootFileInfo()
err := filepath.WalkDir(sourceDir, func(path string, _ os.DirEntry, err error) error {
if err != nil {
return err
}
// Rebase path
relPath, err := filepath.Rel(sourceDir, path)
if err != nil {
return err
}
// As this runs on the daemon side, file paths are OS specific.
relPath = filepath.Join(string(os.PathSeparator), relPath)
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
// Temporary workaround. If the returned path starts with two backslashes,
// trim it down to a single backslash. Only relevant on Windows.
if runtime.GOOS == "windows" {
if strings.HasPrefix(relPath, `\\`) {
relPath = relPath[1:]
}
}
if relPath == string(os.PathSeparator) {
return nil
}
parent := root.LookUp(filepath.Dir(relPath))
if parent == nil {
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
}
s, err := os.Lstat(path)
if err != nil {
return err
}
info := &FileInfo{
name: filepath.Base(relPath),
children: make(map[string]*FileInfo),
parent: parent,
stat: s,
}
info.capability, _ = lgetxattr(path, "security.capability")
parent.children[info.name] = info
return nil
})
if err != nil {
return nil, err
}
return root, nil
}

View File

@ -1,43 +0,0 @@
//go:build !windows
package archive
import (
"io/fs"
"os"
"syscall"
)
func statDifferent(oldStat fs.FileInfo, newStat fs.FileInfo) bool {
oldSys := oldStat.Sys().(*syscall.Stat_t)
newSys := newStat.Sys().(*syscall.Stat_t)
// Don't look at size for dirs, its not a good measure of change
if oldStat.Mode() != newStat.Mode() ||
oldSys.Uid != newSys.Uid ||
oldSys.Gid != newSys.Gid ||
oldSys.Rdev != newSys.Rdev ||
// Don't look at size or modification time for dirs, its not a good
// measure of change. See https://github.com/moby/moby/issues/9874
// for a description of the issue with modification time, and
// https://github.com/moby/moby/pull/11422 for the change.
// (Note that in the Windows implementation of this function,
// modification time IS taken as a change). See
// https://github.com/moby/moby/pull/37982 for more information.
(!oldStat.Mode().IsDir() &&
(!sameFsTime(oldStat.ModTime(), newStat.ModTime()) || (oldStat.Size() != newStat.Size()))) {
return true
}
return false
}
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.Mode().IsDir()
}
func getIno(fi os.FileInfo) uint64 {
return fi.Sys().(*syscall.Stat_t).Ino
}
func hasHardlinks(fi os.FileInfo) bool {
return fi.Sys().(*syscall.Stat_t).Nlink > 1
}

View File

@ -1,33 +0,0 @@
package archive
import (
"io/fs"
"os"
)
func statDifferent(oldStat fs.FileInfo, newStat fs.FileInfo) bool {
// Note there is slight difference between the Linux and Windows
// implementations here. Due to https://github.com/moby/moby/issues/9874,
// and the fix at https://github.com/moby/moby/pull/11422, Linux does not
// consider a change to the directory time as a change. Windows on NTFS
// does. See https://github.com/moby/moby/pull/37982 for more information.
if !sameFsTime(oldStat.ModTime(), newStat.ModTime()) ||
oldStat.Mode() != newStat.Mode() ||
oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
return true
}
return false
}
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.Mode().IsDir()
}
func getIno(fi os.FileInfo) (inode uint64) {
return
}
func hasHardlinks(fi os.FileInfo) bool {
return false
}

View File

@ -1,497 +0,0 @@
package archive
import (
"archive/tar"
"context"
"errors"
"io"
"os"
"path/filepath"
"strings"
"sync"
"github.com/containerd/log"
)
// Errors used or returned by this file.
var (
ErrNotDirectory = errors.New("not a directory")
ErrDirNotExists = errors.New("no such directory")
ErrCannotCopyDir = errors.New("cannot copy directory")
ErrInvalidCopySource = errors.New("invalid copy source content")
)
var copyPool = sync.Pool{
New: func() interface{} { s := make([]byte, 32*1024); return &s },
}
func copyWithBuffer(dst io.Writer, src io.Reader) error {
buf := copyPool.Get().(*[]byte)
_, err := io.CopyBuffer(dst, src, *buf)
copyPool.Put(buf)
return err
}
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
// processing using any utility functions from the path or filepath stdlib
// packages) and appends a trailing `/.` or `/` if its corresponding original
// path (from before being processed by utility functions from the path or
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
// path already ends in a `.` path segment, then another is not added. If the
// clean path already ends in a path separator, then another is not added.
func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string) string {
// Ensure paths are in platform semantics
cleanedPath = normalizePath(cleanedPath)
originalPath = normalizePath(originalPath)
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
if !hasTrailingPathSeparator(cleanedPath) {
// Add a separator if it doesn't already end with one (a cleaned
// path would only end in a separator if it is the root).
cleanedPath += string(filepath.Separator)
}
cleanedPath += "."
}
if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
cleanedPath += string(filepath.Separator)
}
return cleanedPath
}
// assertsDirectory returns whether the given path is
// asserted to be a directory, i.e., the path ends with
// a trailing '/' or `/.`, assuming a path separator of `/`.
func assertsDirectory(path string) bool {
return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
}
// hasTrailingPathSeparator returns whether the given
// path ends with the system's path separator character.
func hasTrailingPathSeparator(path string) bool {
return len(path) > 0 && path[len(path)-1] == filepath.Separator
}
// specifiesCurrentDir returns whether the given path specifies
// a "current directory", i.e., the last path segment is `.`.
func specifiesCurrentDir(path string) bool {
return filepath.Base(path) == "."
}
// SplitPathDirEntry splits the given path between its directory name and its
// basename by first cleaning the path but preserves a trailing "." if the
// original path specified the current directory.
func SplitPathDirEntry(path string) (dir, base string) {
cleanedPath := filepath.Clean(filepath.FromSlash(path))
if specifiesCurrentDir(path) {
cleanedPath += string(os.PathSeparator) + "."
}
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
}
// TarResource archives the resource described by the given CopyInfo to a Tar
// archive. A non-nil error is returned if sourcePath does not exist or is
// asserted to be a directory but exists as another type of file.
//
// This function acts as a convenient wrapper around TarWithOptions, which
// requires a directory as the source path. TarResource accepts either a
// directory or a file path and correctly sets the Tar options.
func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
}
// TarResourceRebase is like TarResource but renames the first path element of
// items in the resulting tar archive to match the given rebaseName if not "".
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, _ error) {
sourcePath = normalizePath(sourcePath)
if _, err := os.Lstat(sourcePath); err != nil {
// Catches the case where the source does not exist or is not a
// directory if asserted to be a directory, as this also causes an
// error.
return nil, err
}
// Separate the source path between its directory and
// the entry in that directory which we are archiving.
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
opts := TarResourceRebaseOpts(sourceBase, rebaseName)
log.G(context.TODO()).Debugf("copying %q from %q", sourceBase, sourceDir)
return TarWithOptions(sourceDir, opts)
}
// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
// parameters to be sent to TarWithOptions (the TarOptions struct)
func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
filter := []string{sourceBase}
return &TarOptions{
Compression: Uncompressed,
IncludeFiles: filter,
IncludeSourceDir: true,
RebaseNames: map[string]string{
sourceBase: rebaseName,
},
}
}
// CopyInfo holds basic info about the source
// or destination path of a copy operation.
type CopyInfo struct {
Path string
Exists bool
IsDir bool
RebaseName string
}
// CopyInfoSourcePath stats the given path to create a CopyInfo
// struct representing that resource for the source of an archive copy
// operation. The given path should be an absolute local path. A source path
// has all symlinks evaluated that appear before the last path separator ("/"
// on Unix). As it is to be a copy source, the path must exist.
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
// normalize the file path and then evaluate the symbol link
// we will use the target file instead of the symbol link if
// followLink is set
path = normalizePath(path)
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
if err != nil {
return CopyInfo{}, err
}
stat, err := os.Lstat(resolvedPath)
if err != nil {
return CopyInfo{}, err
}
return CopyInfo{
Path: resolvedPath,
Exists: true,
IsDir: stat.IsDir(),
RebaseName: rebaseName,
}, nil
}
// CopyInfoDestinationPath stats the given path to create a CopyInfo
// struct representing that resource for the destination of an archive copy
// operation. The given path should be an absolute local path.
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
path = normalizePath(path)
originalPath := path
stat, err := os.Lstat(path)
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
// The path exists and is not a symlink.
return CopyInfo{
Path: path,
Exists: true,
IsDir: stat.IsDir(),
}, nil
}
// While the path is a symlink.
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
if n > maxSymlinkIter {
// Don't follow symlinks more than this arbitrary number of times.
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
}
// The path is a symbolic link. We need to evaluate it so that the
// destination of the copy operation is the link target and not the
// link itself. This is notably different than CopyInfoSourcePath which
// only evaluates symlinks before the last appearing path separator.
// Also note that it is okay if the last path element is a broken
// symlink as the copy operation should create the target.
var linkTarget string
linkTarget, err = os.Readlink(path)
if err != nil {
return CopyInfo{}, err
}
if !filepath.IsAbs(linkTarget) {
// Join with the parent directory.
dstParent, _ := SplitPathDirEntry(path)
linkTarget = filepath.Join(dstParent, linkTarget)
}
path = linkTarget
stat, err = os.Lstat(path)
}
if err != nil {
// It's okay if the destination path doesn't exist. We can still
// continue the copy operation if the parent directory exists.
if !os.IsNotExist(err) {
return CopyInfo{}, err
}
// Ensure destination parent dir exists.
dstParent, _ := SplitPathDirEntry(path)
parentDirStat, err := os.Stat(dstParent)
if err != nil {
return CopyInfo{}, err
}
if !parentDirStat.IsDir() {
return CopyInfo{}, ErrNotDirectory
}
return CopyInfo{Path: path}, nil
}
// The path exists after resolving symlinks.
return CopyInfo{
Path: path,
Exists: true,
IsDir: stat.IsDir(),
}, nil
}
// PrepareArchiveCopy prepares the given srcContent archive, which should
// contain the archived resource described by srcInfo, to the destination
// described by dstInfo. Returns the possibly modified content archive along
// with the path to the destination directory which it should be extracted to.
func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
// Ensure in platform semantics
srcInfo.Path = normalizePath(srcInfo.Path)
dstInfo.Path = normalizePath(dstInfo.Path)
// Separate the destination path between its directory and base
// components in case the source archive contents need to be rebased.
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
_, srcBase := SplitPathDirEntry(srcInfo.Path)
switch {
case dstInfo.Exists && dstInfo.IsDir:
// The destination exists as a directory. No alteration
// to srcContent is needed as its contents can be
// simply extracted to the destination directory.
return dstInfo.Path, io.NopCloser(srcContent), nil
case dstInfo.Exists && srcInfo.IsDir:
// The destination exists as some type of file and the source
// content is a directory. This is an error condition since
// you cannot copy a directory to an existing file location.
return "", nil, ErrCannotCopyDir
case dstInfo.Exists:
// The destination exists as some type of file and the source content
// is also a file. The source content entry will have to be renamed to
// have a basename which matches the destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case srcInfo.IsDir:
// The destination does not exist and the source content is an archive
// of a directory. The archive should be extracted to the parent of
// the destination path instead, and when it is, the directory that is
// created as a result should take the name of the destination path.
// The source content entries will have to be renamed to have a
// basename which matches the destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case assertsDirectory(dstInfo.Path):
// The destination does not exist and is asserted to be created as a
// directory, but the source content is not a directory. This is an
// error condition since you cannot create a directory from a file
// source.
return "", nil, ErrDirNotExists
default:
// The last remaining case is when the destination does not exist, is
// not asserted to be a directory, and the source content is not an
// archive of a directory. It this case, the destination file will need
// to be created when the archive is extracted and the source content
// entry will have to be renamed to have a basename which matches the
// destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
}
}
// RebaseArchiveEntries rewrites the given srcContent archive replacing
// an occurrence of oldBase with newBase at the beginning of entry names.
func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
if oldBase == string(os.PathSeparator) {
// If oldBase specifies the root directory, use an empty string as
// oldBase instead so that newBase doesn't replace the path separator
// that all paths will start with.
oldBase = ""
}
rebased, w := io.Pipe()
go func() {
srcTar := tar.NewReader(srcContent)
rebasedTar := tar.NewWriter(w)
for {
hdr, err := srcTar.Next()
if err == io.EOF {
// Signals end of archive.
rebasedTar.Close()
w.Close()
return
}
if err != nil {
w.CloseWithError(err)
return
}
// srcContent tar stream, as served by TarWithOptions(), is
// definitely in PAX format, but tar.Next() mistakenly guesses it
// as USTAR, which creates a problem: if the newBase is >100
// characters long, WriteHeader() returns an error like
// "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...".
//
// To fix, set the format to PAX here. See docker/for-linux issue #484.
hdr.Format = tar.FormatPAX
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
if hdr.Typeflag == tar.TypeLink {
hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
}
if err = rebasedTar.WriteHeader(hdr); err != nil {
w.CloseWithError(err)
return
}
// Ignoring GoSec G110. See https://github.com/securego/gosec/pull/433
// and https://cure53.de/pentest-report_opa.pdf, which recommends to
// replace io.Copy with io.CopyN7. The latter allows to specify the
// maximum number of bytes that should be read. By properly defining
// the limit, it can be assured that a GZip compression bomb cannot
// easily cause a Denial-of-Service.
// After reviewing with @tonistiigi and @cpuguy83, this should not
// affect us, because here we do not read into memory, hence should
// not be vulnerable to this code consuming memory.
//nolint:gosec // G110: Potential DoS vulnerability via decompression bomb (gosec)
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
w.CloseWithError(err)
return
}
}
}()
return rebased
}
// CopyResource performs an archive copy from the given source path to the
// given destination path. The source path MUST exist and the destination
// path's parent directory must exist.
func CopyResource(srcPath, dstPath string, followLink bool) error {
var (
srcInfo CopyInfo
err error
)
// Ensure in platform semantics
srcPath = normalizePath(srcPath)
dstPath = normalizePath(dstPath)
// Clean the source and destination paths.
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
return err
}
content, err := TarResource(srcInfo)
if err != nil {
return err
}
defer content.Close()
return CopyTo(content, srcInfo, dstPath)
}
// CopyTo handles extracting the given content whose
// entries should be sourced from srcInfo to dstPath.
func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
// The destination path need not exist, but CopyInfoDestinationPath will
// ensure that at least the parent directory exists.
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
if err != nil {
return err
}
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
if err != nil {
return err
}
defer copyArchive.Close()
options := &TarOptions{
NoLchown: true,
NoOverwriteDirNonDir: true,
}
return Untar(copyArchive, dstDir, options)
}
// ResolveHostSourcePath decides real path need to be copied with parameters such as
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
// link target of any symbol link file, else it will only resolve symlink of directory
// but return symbol link file itself without resolving.
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, _ error) {
if followLink {
var err error
resolvedPath, err = filepath.EvalSymlinks(path)
if err != nil {
return "", "", err
}
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
} else {
dirPath, basePath := filepath.Split(path)
// if not follow symbol link, then resolve symbol link of parent dir
resolvedDirPath, err := filepath.EvalSymlinks(dirPath)
if err != nil {
return "", "", err
}
// resolvedDirPath will have been cleaned (no trailing path separators) so
// we can manually join it with the base path element.
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
if hasTrailingPathSeparator(path) &&
filepath.Base(path) != filepath.Base(resolvedPath) {
rebaseName = filepath.Base(path)
}
}
return resolvedPath, rebaseName, nil
}
// GetRebaseName normalizes and compares path and resolvedPath,
// return completed resolved path and rebased file name
func GetRebaseName(path, resolvedPath string) (string, string) {
// linkTarget will have been cleaned (no trailing path separators and dot) so
// we can manually join it with them
var rebaseName string
if specifiesCurrentDir(path) &&
!specifiesCurrentDir(resolvedPath) {
resolvedPath += string(filepath.Separator) + "."
}
if hasTrailingPathSeparator(path) &&
!hasTrailingPathSeparator(resolvedPath) {
resolvedPath += string(filepath.Separator)
}
if filepath.Base(path) != filepath.Base(resolvedPath) {
// In the case where the path had a trailing separator and a symlink
// evaluation has changed the last path component, we will need to
// rebase the name in the archive that is being copied to match the
// originally requested name.
rebaseName = filepath.Base(path)
}
return resolvedPath, rebaseName
}

View File

@ -0,0 +1,130 @@
package archive
import (
"io"
"github.com/moby/go-archive"
"github.com/moby/go-archive/compression"
)
var (
ErrNotDirectory = archive.ErrNotDirectory // Deprecated: use [archive.ErrNotDirectory] instead.
ErrDirNotExists = archive.ErrDirNotExists // Deprecated: use [archive.ErrDirNotExists] instead.
ErrCannotCopyDir = archive.ErrCannotCopyDir // Deprecated: use [archive.ErrCannotCopyDir] instead.
ErrInvalidCopySource = archive.ErrInvalidCopySource // Deprecated: use [archive.ErrInvalidCopySource] instead.
)
// PreserveTrailingDotOrSeparator returns the given cleaned path.
//
// Deprecated: use [archive.PreserveTrailingDotOrSeparator] instead.
func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string) string {
return archive.PreserveTrailingDotOrSeparator(cleanedPath, originalPath)
}
// SplitPathDirEntry splits the given path between its directory name and its
// basename.
//
// Deprecated: use [archive.SplitPathDirEntry] instead.
func SplitPathDirEntry(path string) (dir, base string) {
return archive.SplitPathDirEntry(path)
}
// TarResource archives the resource described by the given CopyInfo to a Tar
// archive.
//
// Deprecated: use [archive.TarResource] instead.
func TarResource(sourceInfo archive.CopyInfo) (content io.ReadCloser, err error) {
return archive.TarResource(sourceInfo)
}
// TarResourceRebase is like TarResource but renames the first path element of
// items in the resulting tar archive to match the given rebaseName if not "".
//
// Deprecated: use [archive.TarResourceRebase] instead.
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, _ error) {
return archive.TarResourceRebase(sourcePath, rebaseName)
}
// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
// parameters to be sent to TarWithOptions.
//
// Deprecated: use [archive.TarResourceRebaseOpts] instead.
func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
filter := []string{sourceBase}
return &TarOptions{
Compression: compression.None,
IncludeFiles: filter,
IncludeSourceDir: true,
RebaseNames: map[string]string{
sourceBase: rebaseName,
},
}
}
// CopyInfo holds basic info about the source or destination path of a copy operation.
//
// Deprecated: use [archive.CopyInfo] instead.
type CopyInfo = archive.CopyInfo
// CopyInfoSourcePath stats the given path to create a CopyInfo struct.
// struct representing that resource for the source of an archive copy
// operation.
//
// Deprecated: use [archive.CopyInfoSourcePath] instead.
func CopyInfoSourcePath(path string, followLink bool) (archive.CopyInfo, error) {
return archive.CopyInfoSourcePath(path, followLink)
}
// CopyInfoDestinationPath stats the given path to create a CopyInfo
// struct representing that resource for the destination of an archive copy
// operation.
//
// Deprecated: use [archive.CopyInfoDestinationPath] instead.
func CopyInfoDestinationPath(path string) (info archive.CopyInfo, err error) {
return archive.CopyInfoDestinationPath(path)
}
// PrepareArchiveCopy prepares the given srcContent archive.
//
// Deprecated: use [archive.PrepareArchiveCopy] instead.
func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo archive.CopyInfo) (dstDir string, content io.ReadCloser, err error) {
return archive.PrepareArchiveCopy(srcContent, srcInfo, dstInfo)
}
// RebaseArchiveEntries rewrites the given srcContent archive replacing
// an occurrence of oldBase with newBase at the beginning of entry names.
//
// Deprecated: use [archive.RebaseArchiveEntries] instead.
func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
return archive.RebaseArchiveEntries(srcContent, oldBase, newBase)
}
// CopyResource performs an archive copy from the given source path to the
// given destination path.
//
// Deprecated: use [archive.CopyResource] instead.
func CopyResource(srcPath, dstPath string, followLink bool) error {
return archive.CopyResource(srcPath, dstPath, followLink)
}
// CopyTo handles extracting the given content whose
// entries should be sourced from srcInfo to dstPath.
//
// Deprecated: use [archive.CopyTo] instead.
func CopyTo(content io.Reader, srcInfo archive.CopyInfo, dstPath string) error {
return archive.CopyTo(content, srcInfo, dstPath)
}
// ResolveHostSourcePath decides real path need to be copied.
//
// Deprecated: use [archive.ResolveHostSourcePath] instead.
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, _ error) {
return archive.ResolveHostSourcePath(path, followLink)
}
// GetRebaseName normalizes and compares path and resolvedPath.
//
// Deprecated: use [archive.GetRebaseName] instead.
func GetRebaseName(path, resolvedPath string) (string, string) {
return archive.GetRebaseName(path, resolvedPath)
}

View File

@ -1,11 +0,0 @@
//go:build !windows
package archive
import (
"path/filepath"
)
func normalizePath(path string) string {
return filepath.ToSlash(path)
}

View File

@ -1,9 +0,0 @@
package archive
import (
"path/filepath"
)
func normalizePath(path string) string {
return filepath.FromSlash(path)
}

View File

@ -1,7 +0,0 @@
//go:build freebsd
package archive
import "golang.org/x/sys/unix"
var mknod = unix.Mknod

View File

@ -1,9 +0,0 @@
//go:build !windows && !freebsd
package archive
import "golang.org/x/sys/unix"
func mknod(path string, mode uint32, dev uint64) error {
return unix.Mknod(path, mode, int(dev))
}

View File

@ -1,258 +0,0 @@
package archive
import (
"archive/tar"
"context"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/containerd/log"
)
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
tr := tar.NewReader(layer)
var dirs []*tar.Header
unpackedPaths := make(map[string]struct{})
if options == nil {
options = &TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
aufsTempdir := ""
aufsHardlinks := make(map[string]*tar.Header)
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return 0, err
}
size += hdr.Size
// Normalize name, for safety and for a simple is-root check
hdr.Name = filepath.Clean(hdr.Name)
// Windows does not support filenames with colons in them. Ignore
// these files. This is not a problem though (although it might
// appear that it is). Let's suppose a client is running docker pull.
// The daemon it points to is Windows. Would it make sense for the
// client to be doing a docker pull Ubuntu for example (which has files
// with colons in the name under /usr/share/man/man3)? No, absolutely
// not as it would really only make sense that they were pulling a
// Windows image. However, for development, it is necessary to be able
// to pull Linux images which are in the repository.
//
// TODO Windows. Once the registry is aware of what images are Windows-
// specific or Linux-specific, this warning should be changed to an error
// to cater for the situation where someone does manage to upload a Linux
// image but have it tagged as Windows inadvertently.
if runtime.GOOS == "windows" {
if strings.Contains(hdr.Name, ":") {
log.G(context.TODO()).Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
continue
}
}
// Ensure that the parent directory exists.
err = createImpliedDirectories(dest, hdr, options)
if err != nil {
return 0, err
}
// Skip AUFS metadata dirs
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
// We don't want this directory, but we need the files in them so that
// such hardlinks can be resolved.
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
if aufsTempdir, err = os.MkdirTemp(dest, "dockerplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)
}
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, options); err != nil {
return 0, err
}
}
if hdr.Name != WhiteoutOpaqueDir {
continue
}
}
// #nosec G305 -- The joined path is guarded against path traversal.
path := filepath.Join(dest, hdr.Name)
rel, err := filepath.Rel(dest, path)
if err != nil {
return 0, err
}
// Note as these operations are platform specific, so must the slash be.
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
}
base := filepath.Base(path)
if strings.HasPrefix(base, WhiteoutPrefix) {
dir := filepath.Dir(path)
if base == WhiteoutOpaqueDir {
_, err := os.Lstat(dir)
if err != nil {
return 0, err
}
err = filepath.WalkDir(dir, func(path string, info os.DirEntry, err error) error {
if err != nil {
if os.IsNotExist(err) {
err = nil // parent was deleted
}
return err
}
if path == dir {
return nil
}
if _, exists := unpackedPaths[path]; !exists {
return os.RemoveAll(path)
}
return nil
})
if err != nil {
return 0, err
}
} else {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := os.RemoveAll(originalPath); err != nil {
return 0, err
}
}
} else {
// If path exits we almost always just want to remove and replace it.
// The only exception is when it is a directory *and* the file from
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil {
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return 0, err
}
}
}
srcData := io.Reader(tr)
srcHdr := hdr
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
// we manually retarget these into the temporary files we extracted them into
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
return 0, fmt.Errorf("Invalid aufs hardlink")
}
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
if err != nil {
return 0, err
}
defer tmpFile.Close()
srcData = tmpFile
}
if err := remapIDs(options.IDMap, srcHdr); err != nil {
return 0, err
}
if err := createTarFile(path, dest, srcHdr, srcData, options); err != nil {
return 0, err
}
// Directory mtimes must be handled at the end to avoid further
// file creation in them to modify the directory mtime
if hdr.Typeflag == tar.TypeDir {
dirs = append(dirs, hdr)
}
unpackedPaths[path] = struct{}{}
}
}
for _, hdr := range dirs {
// #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice.
path := filepath.Join(dest, hdr.Name)
if err := chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
return 0, err
}
}
return size, nil
}
// ApplyLayer parses a diff in the standard layer format from `layer`,
// and applies it to the directory `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyLayer(dest string, layer io.Reader) (int64, error) {
return applyLayerHandler(dest, layer, &TarOptions{}, true)
}
// ApplyUncompressedLayer parses a diff in the standard layer format from
// `layer`, and applies it to the directory `dest`. The stream `layer`
// can only be uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
return applyLayerHandler(dest, layer, options, false)
}
// IsEmpty checks if the tar archive is empty (doesn't contain any entries).
func IsEmpty(rd io.Reader) (bool, error) {
decompRd, err := DecompressStream(rd)
if err != nil {
return true, fmt.Errorf("failed to decompress archive: %v", err)
}
defer decompRd.Close()
tarReader := tar.NewReader(decompRd)
if _, err := tarReader.Next(); err != nil {
if err == io.EOF {
return true, nil
}
return false, fmt.Errorf("failed to read next archive header: %v", err)
}
return false, nil
}
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
dest = filepath.Clean(dest)
// We need to be able to set any perms
restore := overrideUmask(0)
defer restore()
if decompress {
decompLayer, err := DecompressStream(layer)
if err != nil {
return 0, err
}
defer decompLayer.Close()
layer = decompLayer
}
return UnpackLayer(dest, layer, options)
}

View File

@ -0,0 +1,37 @@
package archive
import (
"io"
"github.com/moby/go-archive"
)
// UnpackLayer unpack `layer` to a `dest`.
//
// Deprecated: use [archive.UnpackLayer] instead.
func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
return archive.UnpackLayer(dest, layer, toArchiveOpt(options))
}
// ApplyLayer parses a diff in the standard layer format from `layer`,
// and applies it to the directory `dest`.
//
// Deprecated: use [archive.ApplyLayer] instead.
func ApplyLayer(dest string, layer io.Reader) (int64, error) {
return archive.ApplyLayer(dest, layer)
}
// ApplyUncompressedLayer parses a diff in the standard layer format from
// `layer`, and applies it to the directory `dest`.
//
// Deprecated: use [archive.ApplyUncompressedLayer] instead.
func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
return archive.ApplyUncompressedLayer(dest, layer, toArchiveOpt(options))
}
// IsEmpty checks if the tar archive is empty (doesn't contain any entries).
//
// Deprecated: use [archive.IsEmpty] instead.
func IsEmpty(rd io.Reader) (bool, error) {
return archive.IsEmpty(rd)
}

View File

@ -1,21 +0,0 @@
//go:build !windows
package archive
import "golang.org/x/sys/unix"
// overrideUmask sets current process's file mode creation mask to newmask
// and returns a function to restore it.
//
// WARNING for readers stumbling upon this code. Changing umask in a multi-
// threaded environment isn't safe. Don't use this without understanding the
// risks, and don't export this function for others to use (we shouldn't even
// be using this ourself).
//
// FIXME(thaJeztah): we should get rid of these hacks if possible.
func overrideUmask(newMask int) func() {
oldMask := unix.Umask(newMask)
return func() {
unix.Umask(oldMask)
}
}

View File

@ -1,6 +0,0 @@
package archive
// overrideUmask is a no-op on windows.
func overrideUmask(newmask int) func() {
return func() {}
}

View File

@ -1,20 +0,0 @@
package archive
// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
// is the system drive.
// On Linux: this is a no-op.
// On Windows: this does the following>
// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
// This is used, for example, when validating a user provided path in docker cp.
// If a drive letter is supplied, it must be the system drive. The drive letter
// is always removed. Also, it translates it to OS semantics (IOW / to \). We
// need the path in this syntax so that it can ultimately be concatenated with
// a Windows long-path which doesn't support drive-letters. Examples:
// C: --> Fail
// C:\ --> \
// a --> a
// /a --> \a
// d:\ --> Fail
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
return checkSystemDriveAndRemoveDriveLetter(path)
}

View File

@ -0,0 +1,10 @@
package archive
import "github.com/moby/go-archive"
// CheckSystemDriveAndRemoveDriveLetter verifies that a path is the system drive.
//
// Deprecated: use [archive.CheckSystemDriveAndRemoveDriveLetter] instead.
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
return archive.CheckSystemDriveAndRemoveDriveLetter(path)
}

View File

@ -1,9 +0,0 @@
//go:build !windows
package archive
// checkSystemDriveAndRemoveDriveLetter is the non-Windows implementation
// of CheckSystemDriveAndRemoveDriveLetter
func checkSystemDriveAndRemoveDriveLetter(path string) (string, error) {
return path, nil
}

View File

@ -1,22 +0,0 @@
package archive
import (
"fmt"
"path/filepath"
"strings"
)
// checkSystemDriveAndRemoveDriveLetter is the Windows implementation
// of CheckSystemDriveAndRemoveDriveLetter
func checkSystemDriveAndRemoveDriveLetter(path string) (string, error) {
if len(path) == 2 && string(path[1]) == ":" {
return "", fmt.Errorf("no relative path specified in %q", path)
}
if !filepath.IsAbs(path) || len(path) < 2 {
return filepath.FromSlash(path), nil
}
if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
return "", fmt.Errorf("the specified path is not on the system drive (C:)")
}
return filepath.FromSlash(path[2:]), nil
}

View File

@ -1,38 +0,0 @@
package archive
import (
"syscall"
"time"
"unsafe"
)
var (
minTime = time.Unix(0, 0)
maxTime time.Time
)
func init() {
if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
// This is a 64 bit timespec
// os.Chtimes limits time to the following
maxTime = time.Unix(0, 1<<63-1)
} else {
// This is a 32 bit timespec
maxTime = time.Unix(1<<31-1, 0)
}
}
func boundTime(t time.Time) time.Time {
if t.Before(minTime) || t.After(maxTime) {
return minTime
}
return t
}
func latestTime(t1, t2 time.Time) time.Time {
if t1.Before(t2) {
return t2
}
return t1
}

View File

@ -1,41 +0,0 @@
//go:build !windows
package archive
import (
"os"
"time"
"golang.org/x/sys/unix"
)
// chtimes changes the access time and modified time of a file at the given path.
// If the modified time is prior to the Unix Epoch (unixMinTime), or after the
// end of Unix Time (unixEpochTime), os.Chtimes has undefined behavior. In this
// case, Chtimes defaults to Unix Epoch, just in case.
func chtimes(name string, atime time.Time, mtime time.Time) error {
return os.Chtimes(name, atime, mtime)
}
func timeToTimespec(time time.Time) unix.Timespec {
if time.IsZero() {
// Return UTIME_OMIT special value
return unix.Timespec{
Sec: 0,
Nsec: (1 << 30) - 2,
}
}
return unix.NsecToTimespec(time.UnixNano())
}
func lchtimes(name string, atime time.Time, mtime time.Time) error {
utimes := [2]unix.Timespec{
timeToTimespec(atime),
timeToTimespec(mtime),
}
err := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes[0:], unix.AT_SYMLINK_NOFOLLOW)
if err != nil && err != unix.ENOSYS {
return err
}
return err
}

View File

@ -1,32 +0,0 @@
package archive
import (
"os"
"time"
"golang.org/x/sys/windows"
)
func chtimes(name string, atime time.Time, mtime time.Time) error {
if err := os.Chtimes(name, atime, mtime); err != nil {
return err
}
pathp, err := windows.UTF16PtrFromString(name)
if err != nil {
return err
}
h, err := windows.CreateFile(pathp,
windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil,
windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0)
if err != nil {
return err
}
defer windows.Close(h)
c := windows.NsecToFiletime(mtime.UnixNano())
return windows.SetFileTime(h, &c, nil, nil)
}
func lchtimes(name string, atime time.Time, mtime time.Time) error {
return nil
}

42
vendor/github.com/docker/docker/pkg/archive/utils.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
package archive
import (
"github.com/docker/docker/pkg/idtools"
"github.com/moby/go-archive"
)
// ToArchiveOpt converts an [TarOptions] to a [archive.TarOptions].
//
// Deprecated: use [archive.TarOptions] instead, this utility is for internal use to transition to the [github.com/moby/go-archive] module.
func ToArchiveOpt(options *TarOptions) *archive.TarOptions {
return toArchiveOpt(options)
}
func toArchiveOpt(options *TarOptions) *archive.TarOptions {
if options == nil {
return nil
}
var chownOpts *archive.ChownOpts
if options.ChownOpts != nil {
chownOpts = &archive.ChownOpts{
UID: options.ChownOpts.UID,
GID: options.ChownOpts.GID,
}
}
return &archive.TarOptions{
IncludeFiles: options.IncludeFiles,
ExcludePatterns: options.ExcludePatterns,
Compression: options.Compression,
NoLchown: options.NoLchown,
IDMap: idtools.ToUserIdentityMapping(options.IDMap),
ChownOpts: chownOpts,
IncludeSourceDir: options.IncludeSourceDir,
WhiteoutFormat: options.WhiteoutFormat,
NoOverwriteDirNonDir: options.NoOverwriteDirNonDir,
RebaseNames: options.RebaseNames,
InUserNS: options.InUserNS,
BestEffortXattrs: options.BestEffortXattrs,
}
}

View File

@ -1,23 +0,0 @@
package archive
// Whiteouts are files with a special meaning for the layered filesystem.
// Docker uses AUFS whiteout files inside exported archives. In other
// filesystems these files are generated/handled on tar creation/extraction.
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
// filename this means that file has been removed from the base layer.
const WhiteoutPrefix = ".wh."
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
// for removing an actual file. Normally these files are excluded from exported
// archives.
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
// layers. Normally these should not go into exported archives and all changed
// hardlinks should be copied to the top layer.
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
// readdir calls to this directory do not follow to lower layers.
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"

View File

@ -0,0 +1,10 @@
package archive
import "github.com/moby/go-archive"
const (
WhiteoutPrefix = archive.WhiteoutPrefix // Deprecated: use [archive.WhiteoutPrefix] instead.
WhiteoutMetaPrefix = archive.WhiteoutMetaPrefix // Deprecated: use [archive.WhiteoutMetaPrefix] instead.
WhiteoutLinkDir = archive.WhiteoutLinkDir // Deprecated: use [archive.WhiteoutLinkDir] instead.
WhiteoutOpaqueDir = archive.WhiteoutOpaqueDir // Deprecated: use [archive.WhiteoutOpaqueDir] instead.
)

View File

@ -1,59 +0,0 @@
package archive
import (
"archive/tar"
"bytes"
"io"
)
// Generate generates a new archive from the content provided
// as input.
//
// `files` is a sequence of path/content pairs. A new file is
// added to the archive for each pair.
// If the last pair is incomplete, the file is created with an
// empty content. For example:
//
// Generate("foo.txt", "hello world", "emptyfile")
//
// The above call will return an archive with 2 files:
// - ./foo.txt with content "hello world"
// - ./empty with empty content
//
// FIXME: stream content instead of buffering
// FIXME: specify permissions and other archive metadata
func Generate(input ...string) (io.Reader, error) {
files := parseStringPairs(input...)
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, file := range files {
name, content := file[0], file[1]
hdr := &tar.Header{
Name: name,
Size: int64(len(content)),
}
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
if _, err := tw.Write([]byte(content)); err != nil {
return nil, err
}
}
if err := tw.Close(); err != nil {
return nil, err
}
return buf, nil
}
func parseStringPairs(input ...string) [][2]string {
output := make([][2]string, 0, len(input)/2+1)
for i := 0; i < len(input); i += 2 {
var pair [2]string
pair[0] = input[i]
if i+1 < len(input) {
pair[1] = input[i+1]
}
output = append(output, pair)
}
return output
}

View File

@ -0,0 +1,14 @@
package archive
import (
"io"
"github.com/moby/go-archive"
)
// Generate generates a new archive from the content provided as input.
//
// Deprecated: use [archive.Generate] instead.
func Generate(input ...string) (io.Reader, error) {
return archive.Generate(input...)
}

View File

@ -1,52 +0,0 @@
//go:build linux || darwin || freebsd || netbsd
package archive
import (
"errors"
"fmt"
"io/fs"
"golang.org/x/sys/unix"
)
// lgetxattr retrieves the value of the extended attribute identified by attr
// and associated with the given path in the file system.
// It returns a nil slice and nil error if the xattr is not set.
func lgetxattr(path string, attr string) ([]byte, error) {
// Start with a 128 length byte array
dest := make([]byte, 128)
sz, err := unix.Lgetxattr(path, attr, dest)
for errors.Is(err, unix.ERANGE) {
// Buffer too small, use zero-sized buffer to get the actual size
sz, err = unix.Lgetxattr(path, attr, []byte{})
if err != nil {
return nil, wrapPathError("lgetxattr", path, attr, err)
}
dest = make([]byte, sz)
sz, err = unix.Lgetxattr(path, attr, dest)
}
if err != nil {
if errors.Is(err, noattr) {
return nil, nil
}
return nil, wrapPathError("lgetxattr", path, attr, err)
}
return dest[:sz], nil
}
// lsetxattr sets the value of the extended attribute identified by attr
// and associated with the given path in the file system.
func lsetxattr(path string, attr string, data []byte, flags int) error {
return wrapPathError("lsetxattr", path, attr, unix.Lsetxattr(path, attr, data, flags))
}
func wrapPathError(op, path, attr string, err error) error {
if err == nil {
return nil
}
return &fs.PathError{Op: op, Path: path, Err: fmt.Errorf("xattr %q: %w", attr, err)}
}

View File

@ -1,5 +0,0 @@
package archive
import "golang.org/x/sys/unix"
var noattr = unix.ENODATA

View File

@ -1,7 +0,0 @@
//go:build !linux && !windows
package archive
import "golang.org/x/sys/unix"
var noattr = unix.ENOATTR

View File

@ -1,11 +0,0 @@
//go:build !linux && !darwin && !freebsd && !netbsd
package archive
func lgetxattr(path string, attr string) ([]byte, error) {
return nil, nil
}
func lsetxattr(path string, attr string, data []byte, flags int) error {
return nil
}

View File

@ -3,11 +3,15 @@ package idtools
import (
"fmt"
"os"
"github.com/moby/sys/user"
)
// IDMap contains a single entry for user namespace range remapping. An array
// of IDMap entries represents the structure that will be provided to the Linux
// kernel for creating a user namespace.
//
// Deprecated: use [user.IDMap] instead.
type IDMap struct {
ContainerID int `json:"container_id"`
HostID int `json:"host_id"`
@ -17,28 +21,42 @@ type IDMap struct {
// MkdirAllAndChown creates a directory (include any along the path) and then modifies
// ownership to the requested uid/gid. If the directory already exists, this
// function will still change ownership and permissions.
//
// Deprecated: use [user.MkdirAllAndChown] instead.
func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error {
return mkdirAs(path, mode, owner, true, true)
return user.MkdirAllAndChown(path, mode, owner.UID, owner.GID)
}
// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
// If the directory already exists, this function still changes ownership and permissions.
// Note that unlike os.Mkdir(), this function does not return IsExist error
// in case path already exists.
//
// Deprecated: use [user.MkdirAndChown] instead.
func MkdirAndChown(path string, mode os.FileMode, owner Identity) error {
return mkdirAs(path, mode, owner, false, true)
return user.MkdirAndChown(path, mode, owner.UID, owner.GID)
}
// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies
// ownership ONLY of newly created directories to the requested uid/gid. If the
// directories along the path exist, no change of ownership or permissions will be performed
//
// Deprecated: use [user.MkdirAllAndChown] with the [user.WithOnlyNew] option instead.
func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error {
return mkdirAs(path, mode, owner, true, false)
return user.MkdirAllAndChown(path, mode, owner.UID, owner.GID, user.WithOnlyNew)
}
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
// If the maps are empty, then the root uid/gid will default to "real" 0/0
//
// Deprecated: use [(user.IdentityMapping).RootPair] instead.
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
return getRootUIDGID(uidMap, gidMap)
}
// getRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
// If the maps are empty, then the root uid/gid will default to "real" 0/0
func getRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
uid, err := toHost(0, uidMap)
if err != nil {
return -1, -1, err
@ -101,11 +119,61 @@ type IdentityMapping struct {
GIDMaps []IDMap `json:"GIDMaps"`
}
// FromUserIdentityMapping converts a [user.IdentityMapping] to an [idtools.IdentityMapping].
//
// Deprecated: use [user.IdentityMapping] directly, this is transitioning to user package.
func FromUserIdentityMapping(u user.IdentityMapping) IdentityMapping {
return IdentityMapping{
UIDMaps: fromUserIDMap(u.UIDMaps),
GIDMaps: fromUserIDMap(u.GIDMaps),
}
}
func fromUserIDMap(u []user.IDMap) []IDMap {
if u == nil {
return nil
}
m := make([]IDMap, len(u))
for i := range u {
m[i] = IDMap{
ContainerID: int(u[i].ID),
HostID: int(u[i].ParentID),
Size: int(u[i].Count),
}
}
return m
}
// ToUserIdentityMapping converts an [idtools.IdentityMapping] to a [user.IdentityMapping].
//
// Deprecated: use [user.IdentityMapping] directly, this is transitioning to user package.
func ToUserIdentityMapping(u IdentityMapping) user.IdentityMapping {
return user.IdentityMapping{
UIDMaps: toUserIDMap(u.UIDMaps),
GIDMaps: toUserIDMap(u.GIDMaps),
}
}
func toUserIDMap(u []IDMap) []user.IDMap {
if u == nil {
return nil
}
m := make([]user.IDMap, len(u))
for i := range u {
m[i] = user.IDMap{
ID: int64(u[i].ContainerID),
ParentID: int64(u[i].HostID),
Count: int64(u[i].Size),
}
}
return m
}
// RootPair returns a uid and gid pair for the root user. The error is ignored
// because a root user always exists, and the defaults are correct when the uid
// and gid maps are empty.
func (i IdentityMapping) RootPair() Identity {
uid, gid, _ := GetRootUIDGID(i.UIDMaps, i.GIDMaps)
uid, gid, _ := getRootUIDGID(i.UIDMaps, i.GIDMaps)
return Identity{UID: uid, GID: gid}
}
@ -144,6 +212,8 @@ func (i IdentityMapping) Empty() bool {
}
// CurrentIdentity returns the identity of the current process
//
// Deprecated: use [os.Getuid] and [os.Getegid] instead.
func CurrentIdentity() Identity {
return Identity{UID: os.Getuid(), GID: os.Getegid()}
}

View File

@ -1,166 +0,0 @@
//go:build !windows
package idtools
import (
"fmt"
"os"
"path/filepath"
"strconv"
"syscall"
"github.com/moby/sys/user"
)
func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error {
path, err := filepath.Abs(path)
if err != nil {
return err
}
stat, err := os.Stat(path)
if err == nil {
if !stat.IsDir() {
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
}
if !chownExisting {
return nil
}
// short-circuit -- we were called with an existing directory and chown was requested
return setPermissions(path, mode, owner, stat)
}
// make an array containing the original path asked for, plus (for mkAll == true)
// all path components leading up to the complete path that don't exist before we MkdirAll
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
// chown the full directory path if it exists
var paths []string
if os.IsNotExist(err) {
paths = []string{path}
}
if mkAll {
// walk back to "/" looking for directories which do not exist
// and add them to the paths array for chown after creation
dirPath := path
for {
dirPath = filepath.Dir(dirPath)
if dirPath == "/" {
break
}
if _, err = os.Stat(dirPath); err != nil && os.IsNotExist(err) {
paths = append(paths, dirPath)
}
}
if err = os.MkdirAll(path, mode); err != nil {
return err
}
} else if err = os.Mkdir(path, mode); err != nil {
return err
}
// even if it existed, we will chown the requested path + any subpaths that
// didn't exist when we called MkdirAll
for _, pathComponent := range paths {
if err = setPermissions(pathComponent, mode, owner, nil); err != nil {
return err
}
}
return nil
}
// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username
//
// Deprecated: use [user.LookupUser] instead
func LookupUser(name string) (user.User, error) {
return user.LookupUser(name)
}
// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid
//
// Deprecated: use [user.LookupUid] instead
func LookupUID(uid int) (user.User, error) {
return user.LookupUid(uid)
}
// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
//
// Deprecated: use [user.LookupGroup] instead
func LookupGroup(name string) (user.Group, error) {
return user.LookupGroup(name)
}
// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested
// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the
// dir is on an NFS share, so don't call chown unless we absolutely must.
// Likewise for setting permissions.
func setPermissions(p string, mode os.FileMode, owner Identity, stat os.FileInfo) error {
if stat == nil {
var err error
stat, err = os.Stat(p)
if err != nil {
return err
}
}
if stat.Mode().Perm() != mode.Perm() {
if err := os.Chmod(p, mode.Perm()); err != nil {
return err
}
}
ssi := stat.Sys().(*syscall.Stat_t)
if ssi.Uid == uint32(owner.UID) && ssi.Gid == uint32(owner.GID) {
return nil
}
return os.Chown(p, owner.UID, owner.GID)
}
// LoadIdentityMapping takes a requested username and
// using the data from /etc/sub{uid,gid} ranges, creates the
// proper uid and gid remapping ranges for that user/group pair
func LoadIdentityMapping(name string) (IdentityMapping, error) {
// TODO: Consider adding support for calling out to "getent"
usr, err := user.LookupUser(name)
if err != nil {
return IdentityMapping{}, fmt.Errorf("could not get user for username %s: %v", name, err)
}
subuidRanges, err := lookupSubRangesFile("/etc/subuid", usr)
if err != nil {
return IdentityMapping{}, err
}
subgidRanges, err := lookupSubRangesFile("/etc/subgid", usr)
if err != nil {
return IdentityMapping{}, err
}
return IdentityMapping{
UIDMaps: subuidRanges,
GIDMaps: subgidRanges,
}, nil
}
func lookupSubRangesFile(path string, usr user.User) ([]IDMap, error) {
uidstr := strconv.Itoa(usr.Uid)
rangeList, err := user.ParseSubIDFileFilter(path, func(sid user.SubID) bool {
return sid.Name == usr.Name || sid.Name == uidstr
})
if err != nil {
return nil, err
}
if len(rangeList) == 0 {
return nil, fmt.Errorf("no subuid ranges found for user %q", usr.Name)
}
idMap := []IDMap{}
containerID := 0
for _, idrange := range rangeList {
idMap = append(idMap, IDMap{
ContainerID: containerID,
HostID: int(idrange.SubID),
Size: int(idrange.Count),
})
containerID = containerID + int(idrange.Count)
}
return idMap, nil
}

View File

@ -1,9 +1,5 @@
package idtools
import (
"os"
)
const (
SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege"
)
@ -14,11 +10,3 @@ const (
ContainerUserSidString = "S-1-5-93-2-2"
)
// This is currently a wrapper around [os.MkdirAll] since currently
// permissions aren't set through this path, the identity isn't utilized.
// Ownership is handled elsewhere, but in the future could be support here
// too.
func mkdirAs(path string, _ os.FileMode, _ Identity, _, _ bool) error {
return os.MkdirAll(path, 0)
}