Merge pull request #25916 from containers/renovate/github.com-docker-docker-28.x

fix(deps): update module github.com/docker/docker to v28.1.0+incompatible
This commit is contained in:
openshift-merge-bot[bot]
2025-04-22 11:43:06 +00:00
committed by GitHub
70 changed files with 1562 additions and 741 deletions

4
go.mod
View File

@ -27,7 +27,7 @@ require (
github.com/cyphar/filepath-securejoin v0.4.1
github.com/digitalocean/go-qemu v0.0.0-20250212194115-ee9b0668d242
github.com/docker/distribution v2.8.3+incompatible
github.com/docker/docker v28.0.4+incompatible
github.com/docker/docker v28.1.0+incompatible
github.com/docker/go-connections v0.5.0
github.com/docker/go-plugins-helpers v0.0.0-20240701071450-45e2431495c8
github.com/docker/go-units v0.5.0
@ -153,7 +153,9 @@ require (
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/buildkit v0.20.2 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/go-archive v0.1.0 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/sys/atomicwriter v0.1.0 // indirect
github.com/moby/sys/mountinfo v0.7.2 // indirect
github.com/moby/sys/sequential v0.6.0 // indirect
github.com/moby/sys/userns v0.1.0 // indirect

8
go.sum
View File

@ -121,8 +121,8 @@ github.com/docker/cli v28.0.4+incompatible h1:pBJSJeNd9QeIWPjRcV91RVJihd/TXB77q1
github.com/docker/cli v28.0.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v28.0.4+incompatible h1:JNNkBctYKurkw6FrHfKqY0nKIDf5nrbxjVBtS+cdcok=
github.com/docker/docker v28.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v28.1.0+incompatible h1:4iqpcWQCt3Txcz7iWIb1U3SZ/n9ffo4U+ryY5/3eOp0=
github.com/docker/docker v28.1.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8=
github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
@ -322,8 +322,12 @@ github.com/moby/buildkit v0.20.2 h1:qIeR47eQ1tzI1rwz0on3Xx2enRw/1CKjFhoONVcTlMA=
github.com/moby/buildkit v0.20.2/go.mod h1:DhaF82FjwOElTftl0JUAJpH/SUIUx4UvcFncLeOtlDI=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo=
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
github.com/moby/sys/capability v0.4.0 h1:4D4mI6KlNtWMCM1Z/K0i7RV1FkX+DBDHKVJpCndZoHk=
github.com/moby/sys/capability v0.4.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I=
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=

View File

@ -293,6 +293,7 @@ Brandon Liu <bdon@bdon.org>
Brandon Philips <brandon.philips@coreos.com>
Brandon Rhodes <brandon@rhodesmill.org>
Brendan Dixon <brendand@microsoft.com>
Brendon Smith <bws@bws.bio>
Brennan Kinney <5098581+polarathene@users.noreply.github.com>
Brent Salisbury <brent.salisbury@docker.com>
Brett Higgins <brhiggins@arbor.net>
@ -347,6 +348,7 @@ Casey Bisson <casey.bisson@joyent.com>
Catalin Pirvu <pirvu.catalin94@gmail.com>
Ce Gao <ce.gao@outlook.com>
Cedric Davies <cedricda@microsoft.com>
Cesar Talledo <cesar.talledo@docker.com>
Cezar Sa Espinola <cezarsa@gmail.com>
Chad Swenson <chadswen@gmail.com>
Chance Zibolski <chance.zibolski@gmail.com>
@ -1281,6 +1283,7 @@ Krasi Georgiev <krasi@vip-consult.solutions>
Krasimir Georgiev <support@vip-consult.co.uk>
Kris-Mikael Krister <krismikael@protonmail.com>
Kristian Haugene <kristian.haugene@capgemini.com>
Kristian Heljas <kristian@kristian.ee>
Kristina Zabunova <triara.xiii@gmail.com>
Krystian Wojcicki <kwojcicki@sympatico.ca>
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
@ -1712,6 +1715,7 @@ Patrick Hemmer <patrick.hemmer@gmail.com>
Patrick St. laurent <patrick@saint-laurent.us>
Patrick Stapleton <github@gdi2290.com>
Patrik Cyvoct <patrik@ptrk.io>
Patrik Leifert <patrikleifert@hotmail.com>
pattichen <craftsbear@gmail.com>
Paul "TBBle" Hampson <Paul.Hampson@Pobox.com>
Paul <paul9869@gmail.com>

View File

@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api"
// Common constants for daemon and client.
const (
// DefaultVersion of the current REST API.
DefaultVersion = "1.48"
DefaultVersion = "1.49"
// MinSupportedAPIVersion is the minimum API version that can be supported
// by the API server, specified as "major.minor". Note that the daemon

View File

@ -19,10 +19,10 @@ produces:
consumes:
- "application/json"
- "text/plain"
basePath: "/v1.48"
basePath: "/v1.49"
info:
title: "Docker Engine API"
version: "1.48"
version: "1.49"
x-logo:
url: "https://docs.docker.com/assets/images/logo-docker-main.png"
description: |
@ -55,8 +55,8 @@ info:
the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
is returned.
If you omit the version-prefix, the current version of the API (v1.48) is used.
For example, calling `/info` is the same as calling `/v1.48/info`. Using the
If you omit the version-prefix, the current version of the API (v1.49) is used.
For example, calling `/info` is the same as calling `/v1.49/info`. Using the
API without a version-prefix is deprecated and will be removed in a future release.
Engine releases in the near future should support this version of the API,
@ -6856,6 +6856,8 @@ definitions:
description: "The network pool size"
type: "integer"
example: "24"
FirewallBackend:
$ref: "#/definitions/FirewallInfo"
Warnings:
description: |
List of warnings / informational messages about missing features, or
@ -6939,6 +6941,37 @@ definitions:
default: "plugins.moby"
example: "plugins.moby"
FirewallInfo:
description: |
Information about the daemon's firewalling configuration.
This field is currently only used on Linux, and omitted on other platforms.
type: "object"
x-nullable: true
properties:
Driver:
description: |
The name of the firewall backend driver.
type: "string"
example: "nftables"
Info:
description: |
Information about the firewall backend, provided as
"label" / "value" pairs.
<p><br /></p>
> **Note**: The information returned in this field, including the
> formatting of values and labels, should not be considered stable,
> and may change without notice.
type: "array"
items:
type: "array"
items:
type: "string"
example:
- ["ReloadedAt", "2025-01-01T00:00:00Z"]
# PluginsInfo is a temp struct holding Plugins name
# registered with docker daemon. It is used by Info struct
PluginsInfo:
@ -6984,32 +7017,6 @@ definitions:
type: "object"
x-nullable: true
properties:
AllowNondistributableArtifactsCIDRs:
description: |
List of IP ranges to which nondistributable artifacts can be pushed,
using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632).
<p><br /></p>
> **Deprecated**: Pushing nondistributable artifacts is now always enabled
> and this field is always `null`. This field will be removed in a API v1.49.
type: "array"
items:
type: "string"
example: []
AllowNondistributableArtifactsHostnames:
description: |
List of registry hostnames to which nondistributable artifacts can be
pushed, using the format `<hostname>[:<port>]` or `<IP address>[:<port>]`.
<p><br /></p>
> **Deprecated**: Pushing nondistributable artifacts is now always enabled
> and this field is always `null`. This field will be removed in a API v1.49.
type: "array"
items:
type: "string"
example: []
InsecureRegistryCIDRs:
description: |
List of IP ranges of insecure registries, using the CIDR syntax
@ -7179,13 +7186,6 @@ definitions:
description: "Actual commit ID of external tool."
type: "string"
example: "cfb82a876ecc11b5ca0977d1733adbe58599088a"
Expected:
description: |
Commit ID of external tool expected by dockerd as set at build time.
**Deprecated**: This field is deprecated and will be omitted in a API v1.49.
type: "string"
example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4"
SwarmInfo:
description: |
@ -10491,13 +10491,9 @@ paths:
### Image tarball format
An image tarball contains one directory per image layer (named using its long ID), each containing these files:
An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content).
- `VERSION`: currently `1.0` - the file format version
- `json`: detailed layer information, similar to `docker inspect layer_id`
- `layer.tar`: A tarfile containing the filesystem changes in this layer
The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions.
Additionally, includes the manifest.json file associated with a backwards compatible docker save format.
If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs.
@ -10537,6 +10533,7 @@ paths:
If not provided, the full multi-platform image will be saved.
Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
tags: ["Image"]
/images/get:
get:
summary: "Export several images"
@ -10571,6 +10568,16 @@ paths:
type: "array"
items:
type: "string"
- name: "platform"
type: "string"
in: "query"
description: |
JSON encoded OCI platform describing a platform which will be used
to select a platform-specific image to be saved if the image is
multi-platform.
If not provided, the full multi-platform image will be saved.
Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
tags: ["Image"]
/images/load:
post:

View File

@ -153,6 +153,7 @@ type GetImageOpts struct {
// ImageInspectOpts holds parameters to inspect an image.
type ImageInspectOpts struct {
Manifests bool
Platform *ocispec.Platform
}
// CommitConfig is the configuration for creating an image as part of a build.

View File

@ -132,7 +132,8 @@ type InspectResponse struct {
// provides a more detailed view of the platform-specific image manifests or
// other image-attached data like build attestations.
//
// Only available if the daemon provides a multi-platform image store.
// Only available if the daemon provides a multi-platform image store, the client
// requests manifests AND does not request a specific platform.
//
// WARNING: This is experimental and may change at any time without any backward
// compatibility.

View File

@ -106,6 +106,11 @@ type LoadOptions struct {
type InspectOptions struct {
// Manifests returns the image manifests.
Manifests bool
// Platform selects the specific platform of a multi-platform image to inspect.
//
// This option is only available for API version 1.49 and up.
Platform *ocispec.Platform
}
// SaveOptions holds parameters to save images.

View File

@ -1,3 +1,6 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.22
package registry // import "github.com/docker/docker/api/types/registry"
import (
@ -15,23 +18,26 @@ type ServiceConfig struct {
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
Mirrors []string
// ExtraFields is for internal use to include deprecated fields on older API versions.
ExtraFields map[string]any `json:"-"`
}
// MarshalJSON implements a custom marshaler to include legacy fields
// in API responses.
func (sc ServiceConfig) MarshalJSON() ([]byte, error) {
tmp := map[string]interface{}{
"InsecureRegistryCIDRs": sc.InsecureRegistryCIDRs,
"IndexConfigs": sc.IndexConfigs,
"Mirrors": sc.Mirrors,
func (sc *ServiceConfig) MarshalJSON() ([]byte, error) {
type tmp ServiceConfig
base, err := json.Marshal((*tmp)(sc))
if err != nil {
return nil, err
}
if sc.AllowNondistributableArtifactsCIDRs != nil {
tmp["AllowNondistributableArtifactsCIDRs"] = nil
var merged map[string]any
_ = json.Unmarshal(base, &merged)
for k, v := range sc.ExtraFields {
merged[k] = v
}
if sc.AllowNondistributableArtifactsHostnames != nil {
tmp["AllowNondistributableArtifactsHostnames"] = nil
}
return json.Marshal(tmp)
return json.Marshal(merged)
}
// NetIPNet is the net.IPNet type, which can be marshalled and

View File

@ -73,6 +73,7 @@ type Info struct {
SecurityOptions []string
ProductLicense string `json:",omitempty"`
DefaultAddressPools []NetworkAddressPool `json:",omitempty"`
FirewallBackend *FirewallInfo `json:"FirewallBackend,omitempty"`
CDISpecDirs []string
Containerd *ContainerdInfo `json:",omitempty"`
@ -143,7 +144,7 @@ type Commit struct {
// Expected is the commit ID of external tool expected by dockerd as set at build time.
//
// Deprecated: this field is no longer used in API v1.49, but kept for backward-compatibility with older API versions.
Expected string
Expected string `json:",omitempty"`
}
// NetworkAddressPool is a temp struct used by [Info] struct.
@ -151,3 +152,11 @@ type NetworkAddressPool struct {
Base string
Size int
}
// FirewallInfo describes the firewall backend.
type FirewallInfo struct {
// Driver is the name of the firewall backend driver.
Driver string `json:"Driver"`
// Info is a list of label/value pairs, containing information related to the firewall.
Info [][2]string `json:"Info,omitempty"`
}

View File

@ -32,7 +32,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, containerID string, opti
if tagged, ok := ref.(reference.Tagged); ok {
tag = tagged.Tag()
}
repository = reference.FamiliarName(ref)
repository = ref.Name()
}
query := url.Values{}

View File

@ -21,7 +21,7 @@ func (cli *Client) ImageCreate(ctx context.Context, parentReference string, opti
}
query := url.Values{}
query.Set("fromImage", reference.FamiliarName(ref))
query.Set("fromImage", ref.Name())
query.Set("tag", getAPITagFromNamedRef(ref))
if options.Platform != "" {
query.Set("platform", strings.ToLower(options.Platform))

View File

@ -32,6 +32,17 @@ func (cli *Client) ImageInspect(ctx context.Context, imageID string, inspectOpts
query.Set("manifests", "1")
}
if opts.apiOptions.Platform != nil {
if err := cli.NewVersionError(ctx, "1.49", "platform"); err != nil {
return image.InspectResponse{}, err
}
platform, err := encodePlatform(opts.apiOptions.Platform)
if err != nil {
return image.InspectResponse{}, err
}
query.Set("platform", platform)
}
resp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil)
defer ensureReaderClosed(resp)
if err != nil {

View File

@ -4,6 +4,7 @@ import (
"bytes"
"github.com/docker/docker/api/types/image"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// ImageInspectOption is a type representing functional options for the image inspect operation.
@ -36,6 +37,17 @@ func ImageInspectWithManifests(manifests bool) ImageInspectOption {
})
}
// ImageInspectWithPlatform sets platform API option for the image inspect operation.
// This option is only available for API version 1.49 and up.
// With this option set, the image inspect operation will return information for the
// specified platform variant of the multi-platform image.
func ImageInspectWithPlatform(platform *ocispec.Platform) ImageInspectOption {
return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error {
clientOpts.apiOptions.Platform = platform
return nil
})
}
// ImageInspectWithAPIOpts sets the API options for the image inspect operation.
func ImageInspectWithAPIOpts(opts image.InspectOptions) ImageInspectOption {
return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error {

View File

@ -26,7 +26,7 @@ func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.P
}
query := url.Values{}
query.Set("fromImage", reference.FamiliarName(ref))
query.Set("fromImage", ref.Name())
if !options.All {
query.Set("tag", getAPITagFromNamedRef(ref))
}

View File

@ -29,7 +29,6 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu
return nil, errors.New("cannot push a digest reference")
}
name := reference.FamiliarName(ref)
query := url.Values{}
if !options.All {
ref = reference.TagNameOnly(ref)
@ -52,13 +51,13 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu
query.Set("platform", string(pJson))
}
resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth)
resp, err := cli.tryImagePush(ctx, ref.Name(), query, options.RegistryAuth)
if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx)
if privilegeErr != nil {
return nil, privilegeErr
}
resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader)
resp, err = cli.tryImagePush(ctx, ref.Name(), query, newAuthHeader)
}
if err != nil {
return nil, err

View File

@ -26,7 +26,7 @@ func (cli *Client) ImageTag(ctx context.Context, source, target string) error {
ref = reference.TagNameOnly(ref)
query := url.Values{}
query.Set("repo", reference.FamiliarName(ref))
query.Set("repo", ref.Name())
if tagged, ok := ref.(reference.Tagged); ok {
query.Set("tag", tagged.Tag())
}

View File

@ -237,7 +237,7 @@ func (cli *Client) checkResponseErr(serverResp *http.Response) (retErr error) {
}
var daemonErr error
if serverResp.Header.Get("Content-Type") == "application/json" && (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) {
if serverResp.Header.Get("Content-Type") == "application/json" {
var errorResponse types.ErrorResponse
if err := json.Unmarshal(body, &errorResponse); err != nil {
return errors.Wrap(err, "Error reading JSON")

View File

@ -0,0 +1,259 @@
// Package archive provides helper functions for dealing with archive files.
package archive
import (
"archive/tar"
"io"
"os"
"github.com/docker/docker/pkg/idtools"
"github.com/moby/go-archive"
"github.com/moby/go-archive/compression"
"github.com/moby/go-archive/tarheader"
)
// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a
// tar, but that do not have their own header entry.
//
// Deprecated: use [archive.ImpliedDirectoryMode] instead.
const ImpliedDirectoryMode = archive.ImpliedDirectoryMode
type (
// Compression is the state represents if compressed or not.
//
// Deprecated: use [compression.Compression] instead.
Compression = compression.Compression
// WhiteoutFormat is the format of whiteouts unpacked
//
// Deprecated: use [archive.WhiteoutFormat] instead.
WhiteoutFormat = archive.WhiteoutFormat
// TarOptions wraps the tar options.
//
// Deprecated: use [archive.TarOptions] instead.
TarOptions struct {
IncludeFiles []string
ExcludePatterns []string
Compression compression.Compression
NoLchown bool
IDMap idtools.IdentityMapping
ChownOpts *idtools.Identity
IncludeSourceDir bool
// WhiteoutFormat is the expected on disk format for whiteout files.
// This format will be converted to the standard format on pack
// and from the standard format on unpack.
WhiteoutFormat archive.WhiteoutFormat
// When unpacking, specifies whether overwriting a directory with a
// non-directory is allowed and vice versa.
NoOverwriteDirNonDir bool
// For each include when creating an archive, the included name will be
// replaced with the matching name from this map.
RebaseNames map[string]string
InUserNS bool
// Allow unpacking to succeed in spite of failures to set extended
// attributes on the unpacked files due to the destination filesystem
// not supporting them or a lack of permissions. Extended attributes
// were probably in the archive for a reason, so set this option at
// your own peril.
BestEffortXattrs bool
}
)
// Archiver implements the Archiver interface and allows the reuse of most utility functions of
// this package with a pluggable Untar function. Also, to facilitate the passing of specific id
// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
//
// Deprecated: use [archive.Archiver] instead.
type Archiver struct {
Untar func(io.Reader, string, *TarOptions) error
IDMapping idtools.IdentityMapping
}
// NewDefaultArchiver returns a new Archiver without any IdentityMapping
//
// Deprecated: use [archive.NewDefaultArchiver] instead.
func NewDefaultArchiver() *Archiver {
return &Archiver{Untar: Untar}
}
const (
Uncompressed = compression.None // Deprecated: use [compression.None] instead.
Bzip2 = compression.Bzip2 // Deprecated: use [compression.Bzip2] instead.
Gzip = compression.Gzip // Deprecated: use [compression.Gzip] instead.
Xz = compression.Xz // Deprecated: use [compression.Xz] instead.
Zstd = compression.Zstd // Deprecated: use [compression.Zstd] instead.
)
const (
AUFSWhiteoutFormat = archive.AUFSWhiteoutFormat // Deprecated: use [archive.AUFSWhiteoutFormat] instead.
OverlayWhiteoutFormat = archive.OverlayWhiteoutFormat // Deprecated: use [archive.OverlayWhiteoutFormat] instead.
)
// IsArchivePath checks if the (possibly compressed) file at the given path
// starts with a tar file header.
//
// Deprecated: use [archive.IsArchivePath] instead.
func IsArchivePath(path string) bool {
return archive.IsArchivePath(path)
}
// DetectCompression detects the compression algorithm of the source.
//
// Deprecated: use [compression.Detect] instead.
func DetectCompression(source []byte) archive.Compression {
return compression.Detect(source)
}
// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
//
// Deprecated: use [compression.DecompressStream] instead.
func DecompressStream(arch io.Reader) (io.ReadCloser, error) {
return compression.DecompressStream(arch)
}
// CompressStream compresses the dest with specified compression algorithm.
//
// Deprecated: use [compression.CompressStream] instead.
func CompressStream(dest io.Writer, comp compression.Compression) (io.WriteCloser, error) {
return compression.CompressStream(dest, comp)
}
// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper.
//
// Deprecated: use [archive.TarModifierFunc] instead.
type TarModifierFunc = archive.TarModifierFunc
// ReplaceFileTarWrapper converts inputTarStream to a new tar stream.
//
// Deprecated: use [archive.ReplaceFileTarWrapper] instead.
func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]archive.TarModifierFunc) io.ReadCloser {
return archive.ReplaceFileTarWrapper(inputTarStream, mods)
}
// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi.
//
// Deprecated: use [tarheader.FileInfoHeaderNoLookups] instead.
func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) {
return tarheader.FileInfoHeaderNoLookups(fi, link)
}
// FileInfoHeader creates a populated Header from fi.
//
// Deprecated: use [archive.FileInfoHeader] instead.
func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
return archive.FileInfoHeader(name, fi, link)
}
// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
// to a tar header
//
// Deprecated: use [archive.ReadSecurityXattrToTarHeader] instead.
func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
return archive.ReadSecurityXattrToTarHeader(path, hdr)
}
// Tar creates an archive from the directory at `path`, and returns it as a
// stream of bytes.
//
// Deprecated: use [archive.Tar] instead.
func Tar(path string, compression archive.Compression) (io.ReadCloser, error) {
return archive.TarWithOptions(path, &archive.TarOptions{Compression: compression})
}
// TarWithOptions creates an archive with the given options.
//
// Deprecated: use [archive.TarWithOptions] instead.
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
return archive.TarWithOptions(srcPath, toArchiveOpt(options))
}
// Tarballer is a lower-level interface to TarWithOptions.
//
// Deprecated: use [archive.Tarballer] instead.
type Tarballer = archive.Tarballer
// NewTarballer constructs a new tarballer using TarWithOptions.
//
// Deprecated: use [archive.Tarballer] instead.
func NewTarballer(srcPath string, options *TarOptions) (*archive.Tarballer, error) {
return archive.NewTarballer(srcPath, toArchiveOpt(options))
}
// Unpack unpacks the decompressedArchive to dest with options.
//
// Deprecated: use [archive.Unpack] instead.
func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
return archive.Unpack(decompressedArchive, dest, toArchiveOpt(options))
}
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
//
// Deprecated: use [archive.Untar] instead.
func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
return archive.Untar(tarArchive, dest, toArchiveOpt(options))
}
// UntarUncompressed reads a stream of bytes from `tarArchive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive must be an uncompressed stream.
//
// Deprecated: use [archive.UntarUncompressed] instead.
func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
return archive.UntarUncompressed(tarArchive, dest, toArchiveOpt(options))
}
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
// If either Tar or Untar fails, TarUntar aborts and returns the error.
func (archiver *Archiver) TarUntar(src, dst string) error {
return (&archive.Archiver{
Untar: func(reader io.Reader, s string, options *archive.TarOptions) error {
return archiver.Untar(reader, s, &TarOptions{
IDMap: archiver.IDMapping,
})
},
IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping),
}).TarUntar(src, dst)
}
// UntarPath untar a file from path to a destination, src is the source tar file path.
func (archiver *Archiver) UntarPath(src, dst string) error {
return (&archive.Archiver{
Untar: func(reader io.Reader, s string, options *archive.TarOptions) error {
return archiver.Untar(reader, s, &TarOptions{
IDMap: archiver.IDMapping,
})
},
IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping),
}).UntarPath(src, dst)
}
// CopyWithTar creates a tar archive of filesystem path `src`, and
// unpacks it at filesystem path `dst`.
// The archive is streamed directly with fixed buffering and no
// intermediary disk IO.
func (archiver *Archiver) CopyWithTar(src, dst string) error {
return (&archive.Archiver{
Untar: func(reader io.Reader, s string, options *archive.TarOptions) error {
return archiver.Untar(reader, s, nil)
},
IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping),
}).CopyWithTar(src, dst)
}
// CopyFileWithTar emulates the behavior of the 'cp' command-line
// for a single file. It copies a regular file from path `src` to
// path `dst`, and preserves all its metadata.
func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
return (&archive.Archiver{
Untar: func(reader io.Reader, s string, options *archive.TarOptions) error {
return archiver.Untar(reader, s, nil)
},
IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping),
}).CopyFileWithTar(src, dst)
}
// IdentityMapping returns the IdentityMapping of the archiver.
func (archiver *Archiver) IdentityMapping() idtools.IdentityMapping {
return archiver.IDMapping
}

View File

@ -0,0 +1,56 @@
package archive
import (
"io"
"github.com/docker/docker/pkg/idtools"
"github.com/moby/go-archive"
)
// ChangeType represents the change
//
// Deprecated: use [archive.ChangeType] instead.
type ChangeType = archive.ChangeType
const (
ChangeModify = archive.ChangeModify // Deprecated: use [archive.ChangeModify] instead.
ChangeAdd = archive.ChangeAdd // Deprecated: use [archive.ChangeAdd] instead.
ChangeDelete = archive.ChangeDelete // Deprecated: use [archive.ChangeDelete] instead.
)
// Change represents a change.
//
// Deprecated: use [archive.Change] instead.
type Change = archive.Change
// Changes walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
//
// Deprecated: use [archive.Changes] instead.
func Changes(layers []string, rw string) ([]archive.Change, error) {
return archive.Changes(layers, rw)
}
// FileInfo describes the information of a file.
//
// Deprecated: use [archive.FileInfo] instead.
type FileInfo = archive.FileInfo
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
//
// Deprecated: use [archive.ChangesDirs] instead.
func ChangesDirs(newDir, oldDir string) ([]archive.Change, error) {
return archive.ChangesDirs(newDir, oldDir)
}
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
//
// Deprecated: use [archive.ChangesSize] instead.
func ChangesSize(newDir string, changes []archive.Change) int64 {
return archive.ChangesSize(newDir, changes)
}
// ExportChanges produces an Archive from the provided changes, relative to dir.
func ExportChanges(dir string, changes []archive.Change, idMap idtools.IdentityMapping) (io.ReadCloser, error) {
return archive.ExportChanges(dir, changes, idtools.ToUserIdentityMapping(idMap))
}

View File

@ -0,0 +1,130 @@
package archive
import (
"io"
"github.com/moby/go-archive"
"github.com/moby/go-archive/compression"
)
var (
ErrNotDirectory = archive.ErrNotDirectory // Deprecated: use [archive.ErrNotDirectory] instead.
ErrDirNotExists = archive.ErrDirNotExists // Deprecated: use [archive.ErrDirNotExists] instead.
ErrCannotCopyDir = archive.ErrCannotCopyDir // Deprecated: use [archive.ErrCannotCopyDir] instead.
ErrInvalidCopySource = archive.ErrInvalidCopySource // Deprecated: use [archive.ErrInvalidCopySource] instead.
)
// PreserveTrailingDotOrSeparator returns the given cleaned path.
//
// Deprecated: use [archive.PreserveTrailingDotOrSeparator] instead.
func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string) string {
return archive.PreserveTrailingDotOrSeparator(cleanedPath, originalPath)
}
// SplitPathDirEntry splits the given path between its directory name and its
// basename.
//
// Deprecated: use [archive.SplitPathDirEntry] instead.
func SplitPathDirEntry(path string) (dir, base string) {
return archive.SplitPathDirEntry(path)
}
// TarResource archives the resource described by the given CopyInfo to a Tar
// archive.
//
// Deprecated: use [archive.TarResource] instead.
func TarResource(sourceInfo archive.CopyInfo) (content io.ReadCloser, err error) {
return archive.TarResource(sourceInfo)
}
// TarResourceRebase is like TarResource but renames the first path element of
// items in the resulting tar archive to match the given rebaseName if not "".
//
// Deprecated: use [archive.TarResourceRebase] instead.
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, _ error) {
return archive.TarResourceRebase(sourcePath, rebaseName)
}
// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
// parameters to be sent to TarWithOptions.
//
// Deprecated: use [archive.TarResourceRebaseOpts] instead.
func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
filter := []string{sourceBase}
return &TarOptions{
Compression: compression.None,
IncludeFiles: filter,
IncludeSourceDir: true,
RebaseNames: map[string]string{
sourceBase: rebaseName,
},
}
}
// CopyInfo holds basic info about the source or destination path of a copy operation.
//
// Deprecated: use [archive.CopyInfo] instead.
type CopyInfo = archive.CopyInfo
// CopyInfoSourcePath stats the given path to create a CopyInfo struct.
// struct representing that resource for the source of an archive copy
// operation.
//
// Deprecated: use [archive.CopyInfoSourcePath] instead.
func CopyInfoSourcePath(path string, followLink bool) (archive.CopyInfo, error) {
return archive.CopyInfoSourcePath(path, followLink)
}
// CopyInfoDestinationPath stats the given path to create a CopyInfo
// struct representing that resource for the destination of an archive copy
// operation.
//
// Deprecated: use [archive.CopyInfoDestinationPath] instead.
func CopyInfoDestinationPath(path string) (info archive.CopyInfo, err error) {
return archive.CopyInfoDestinationPath(path)
}
// PrepareArchiveCopy prepares the given srcContent archive.
//
// Deprecated: use [archive.PrepareArchiveCopy] instead.
func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo archive.CopyInfo) (dstDir string, content io.ReadCloser, err error) {
return archive.PrepareArchiveCopy(srcContent, srcInfo, dstInfo)
}
// RebaseArchiveEntries rewrites the given srcContent archive replacing
// an occurrence of oldBase with newBase at the beginning of entry names.
//
// Deprecated: use [archive.RebaseArchiveEntries] instead.
func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
return archive.RebaseArchiveEntries(srcContent, oldBase, newBase)
}
// CopyResource performs an archive copy from the given source path to the
// given destination path.
//
// Deprecated: use [archive.CopyResource] instead.
func CopyResource(srcPath, dstPath string, followLink bool) error {
return archive.CopyResource(srcPath, dstPath, followLink)
}
// CopyTo handles extracting the given content whose
// entries should be sourced from srcInfo to dstPath.
//
// Deprecated: use [archive.CopyTo] instead.
func CopyTo(content io.Reader, srcInfo archive.CopyInfo, dstPath string) error {
return archive.CopyTo(content, srcInfo, dstPath)
}
// ResolveHostSourcePath decides real path need to be copied.
//
// Deprecated: use [archive.ResolveHostSourcePath] instead.
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, _ error) {
return archive.ResolveHostSourcePath(path, followLink)
}
// GetRebaseName normalizes and compares path and resolvedPath.
//
// Deprecated: use [archive.GetRebaseName] instead.
func GetRebaseName(path, resolvedPath string) (string, string) {
return archive.GetRebaseName(path, resolvedPath)
}

View File

@ -1,7 +0,0 @@
//go:build freebsd
package archive
import "golang.org/x/sys/unix"
var mknod = unix.Mknod

View File

@ -0,0 +1,37 @@
package archive
import (
"io"
"github.com/moby/go-archive"
)
// UnpackLayer unpack `layer` to a `dest`.
//
// Deprecated: use [archive.UnpackLayer] instead.
func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
return archive.UnpackLayer(dest, layer, toArchiveOpt(options))
}
// ApplyLayer parses a diff in the standard layer format from `layer`,
// and applies it to the directory `dest`.
//
// Deprecated: use [archive.ApplyLayer] instead.
func ApplyLayer(dest string, layer io.Reader) (int64, error) {
return archive.ApplyLayer(dest, layer)
}
// ApplyUncompressedLayer parses a diff in the standard layer format from
// `layer`, and applies it to the directory `dest`.
//
// Deprecated: use [archive.ApplyUncompressedLayer] instead.
func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
return archive.ApplyUncompressedLayer(dest, layer, toArchiveOpt(options))
}
// IsEmpty checks if the tar archive is empty (doesn't contain any entries).
//
// Deprecated: use [archive.IsEmpty] instead.
func IsEmpty(rd io.Reader) (bool, error) {
return archive.IsEmpty(rd)
}

View File

@ -0,0 +1,10 @@
package archive
import "github.com/moby/go-archive"
// CheckSystemDriveAndRemoveDriveLetter verifies that a path is the system drive.
//
// Deprecated: use [archive.CheckSystemDriveAndRemoveDriveLetter] instead.
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
return archive.CheckSystemDriveAndRemoveDriveLetter(path)
}

42
vendor/github.com/docker/docker/pkg/archive/utils.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
package archive
import (
"github.com/docker/docker/pkg/idtools"
"github.com/moby/go-archive"
)
// ToArchiveOpt converts an [TarOptions] to a [archive.TarOptions].
//
// Deprecated: use [archive.TarOptions] instead, this utility is for internal use to transition to the [github.com/moby/go-archive] module.
func ToArchiveOpt(options *TarOptions) *archive.TarOptions {
return toArchiveOpt(options)
}
func toArchiveOpt(options *TarOptions) *archive.TarOptions {
if options == nil {
return nil
}
var chownOpts *archive.ChownOpts
if options.ChownOpts != nil {
chownOpts = &archive.ChownOpts{
UID: options.ChownOpts.UID,
GID: options.ChownOpts.GID,
}
}
return &archive.TarOptions{
IncludeFiles: options.IncludeFiles,
ExcludePatterns: options.ExcludePatterns,
Compression: options.Compression,
NoLchown: options.NoLchown,
IDMap: idtools.ToUserIdentityMapping(options.IDMap),
ChownOpts: chownOpts,
IncludeSourceDir: options.IncludeSourceDir,
WhiteoutFormat: options.WhiteoutFormat,
NoOverwriteDirNonDir: options.NoOverwriteDirNonDir,
RebaseNames: options.RebaseNames,
InUserNS: options.InUserNS,
BestEffortXattrs: options.BestEffortXattrs,
}
}

View File

@ -0,0 +1,10 @@
package archive
import "github.com/moby/go-archive"
const (
WhiteoutPrefix = archive.WhiteoutPrefix // Deprecated: use [archive.WhiteoutPrefix] instead.
WhiteoutMetaPrefix = archive.WhiteoutMetaPrefix // Deprecated: use [archive.WhiteoutMetaPrefix] instead.
WhiteoutLinkDir = archive.WhiteoutLinkDir // Deprecated: use [archive.WhiteoutLinkDir] instead.
WhiteoutOpaqueDir = archive.WhiteoutOpaqueDir // Deprecated: use [archive.WhiteoutOpaqueDir] instead.
)

View File

@ -0,0 +1,14 @@
package archive
import (
"io"
"github.com/moby/go-archive"
)
// Generate generates a new archive from the content provided as input.
//
// Deprecated: use [archive.Generate] instead.
func Generate(input ...string) (io.Reader, error) {
return archive.Generate(input...)
}

View File

@ -3,11 +3,15 @@ package idtools
import (
"fmt"
"os"
"github.com/moby/sys/user"
)
// IDMap contains a single entry for user namespace range remapping. An array
// of IDMap entries represents the structure that will be provided to the Linux
// kernel for creating a user namespace.
//
// Deprecated: use [user.IDMap] instead.
type IDMap struct {
ContainerID int `json:"container_id"`
HostID int `json:"host_id"`
@ -17,28 +21,42 @@ type IDMap struct {
// MkdirAllAndChown creates a directory (include any along the path) and then modifies
// ownership to the requested uid/gid. If the directory already exists, this
// function will still change ownership and permissions.
//
// Deprecated: use [user.MkdirAllAndChown] instead.
func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error {
return mkdirAs(path, mode, owner, true, true)
return user.MkdirAllAndChown(path, mode, owner.UID, owner.GID)
}
// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
// If the directory already exists, this function still changes ownership and permissions.
// Note that unlike os.Mkdir(), this function does not return IsExist error
// in case path already exists.
//
// Deprecated: use [user.MkdirAndChown] instead.
func MkdirAndChown(path string, mode os.FileMode, owner Identity) error {
return mkdirAs(path, mode, owner, false, true)
return user.MkdirAndChown(path, mode, owner.UID, owner.GID)
}
// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies
// ownership ONLY of newly created directories to the requested uid/gid. If the
// directories along the path exist, no change of ownership or permissions will be performed
//
// Deprecated: use [user.MkdirAllAndChown] with the [user.WithOnlyNew] option instead.
func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error {
return mkdirAs(path, mode, owner, true, false)
return user.MkdirAllAndChown(path, mode, owner.UID, owner.GID, user.WithOnlyNew)
}
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
// If the maps are empty, then the root uid/gid will default to "real" 0/0
//
// Deprecated: use [(user.IdentityMapping).RootPair] instead.
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
return getRootUIDGID(uidMap, gidMap)
}
// getRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
// If the maps are empty, then the root uid/gid will default to "real" 0/0
func getRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
uid, err := toHost(0, uidMap)
if err != nil {
return -1, -1, err
@ -101,11 +119,61 @@ type IdentityMapping struct {
GIDMaps []IDMap `json:"GIDMaps"`
}
// FromUserIdentityMapping converts a [user.IdentityMapping] to an [idtools.IdentityMapping].
//
// Deprecated: use [user.IdentityMapping] directly, this is transitioning to user package.
func FromUserIdentityMapping(u user.IdentityMapping) IdentityMapping {
return IdentityMapping{
UIDMaps: fromUserIDMap(u.UIDMaps),
GIDMaps: fromUserIDMap(u.GIDMaps),
}
}
func fromUserIDMap(u []user.IDMap) []IDMap {
if u == nil {
return nil
}
m := make([]IDMap, len(u))
for i := range u {
m[i] = IDMap{
ContainerID: int(u[i].ID),
HostID: int(u[i].ParentID),
Size: int(u[i].Count),
}
}
return m
}
// ToUserIdentityMapping converts an [idtools.IdentityMapping] to a [user.IdentityMapping].
//
// Deprecated: use [user.IdentityMapping] directly, this is transitioning to user package.
func ToUserIdentityMapping(u IdentityMapping) user.IdentityMapping {
return user.IdentityMapping{
UIDMaps: toUserIDMap(u.UIDMaps),
GIDMaps: toUserIDMap(u.GIDMaps),
}
}
func toUserIDMap(u []IDMap) []user.IDMap {
if u == nil {
return nil
}
m := make([]user.IDMap, len(u))
for i := range u {
m[i] = user.IDMap{
ID: int64(u[i].ContainerID),
ParentID: int64(u[i].HostID),
Count: int64(u[i].Size),
}
}
return m
}
// RootPair returns a uid and gid pair for the root user. The error is ignored
// because a root user always exists, and the defaults are correct when the uid
// and gid maps are empty.
func (i IdentityMapping) RootPair() Identity {
uid, gid, _ := GetRootUIDGID(i.UIDMaps, i.GIDMaps)
uid, gid, _ := getRootUIDGID(i.UIDMaps, i.GIDMaps)
return Identity{UID: uid, GID: gid}
}
@ -144,6 +212,8 @@ func (i IdentityMapping) Empty() bool {
}
// CurrentIdentity returns the identity of the current process
//
// Deprecated: use [os.Getuid] and [os.Getegid] instead.
func CurrentIdentity() Identity {
return Identity{UID: os.Getuid(), GID: os.Getegid()}
}

View File

@ -1,166 +0,0 @@
//go:build !windows
package idtools
import (
"fmt"
"os"
"path/filepath"
"strconv"
"syscall"
"github.com/moby/sys/user"
)
func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error {
path, err := filepath.Abs(path)
if err != nil {
return err
}
stat, err := os.Stat(path)
if err == nil {
if !stat.IsDir() {
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
}
if !chownExisting {
return nil
}
// short-circuit -- we were called with an existing directory and chown was requested
return setPermissions(path, mode, owner, stat)
}
// make an array containing the original path asked for, plus (for mkAll == true)
// all path components leading up to the complete path that don't exist before we MkdirAll
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
// chown the full directory path if it exists
var paths []string
if os.IsNotExist(err) {
paths = []string{path}
}
if mkAll {
// walk back to "/" looking for directories which do not exist
// and add them to the paths array for chown after creation
dirPath := path
for {
dirPath = filepath.Dir(dirPath)
if dirPath == "/" {
break
}
if _, err = os.Stat(dirPath); err != nil && os.IsNotExist(err) {
paths = append(paths, dirPath)
}
}
if err = os.MkdirAll(path, mode); err != nil {
return err
}
} else if err = os.Mkdir(path, mode); err != nil {
return err
}
// even if it existed, we will chown the requested path + any subpaths that
// didn't exist when we called MkdirAll
for _, pathComponent := range paths {
if err = setPermissions(pathComponent, mode, owner, nil); err != nil {
return err
}
}
return nil
}
// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username
//
// Deprecated: use [user.LookupUser] instead
func LookupUser(name string) (user.User, error) {
return user.LookupUser(name)
}
// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid
//
// Deprecated: use [user.LookupUid] instead
func LookupUID(uid int) (user.User, error) {
return user.LookupUid(uid)
}
// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
//
// Deprecated: use [user.LookupGroup] instead
func LookupGroup(name string) (user.Group, error) {
return user.LookupGroup(name)
}
// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested
// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the
// dir is on an NFS share, so don't call chown unless we absolutely must.
// Likewise for setting permissions.
func setPermissions(p string, mode os.FileMode, owner Identity, stat os.FileInfo) error {
if stat == nil {
var err error
stat, err = os.Stat(p)
if err != nil {
return err
}
}
if stat.Mode().Perm() != mode.Perm() {
if err := os.Chmod(p, mode.Perm()); err != nil {
return err
}
}
ssi := stat.Sys().(*syscall.Stat_t)
if ssi.Uid == uint32(owner.UID) && ssi.Gid == uint32(owner.GID) {
return nil
}
return os.Chown(p, owner.UID, owner.GID)
}
// LoadIdentityMapping takes a requested username and
// using the data from /etc/sub{uid,gid} ranges, creates the
// proper uid and gid remapping ranges for that user/group pair
func LoadIdentityMapping(name string) (IdentityMapping, error) {
// TODO: Consider adding support for calling out to "getent"
usr, err := user.LookupUser(name)
if err != nil {
return IdentityMapping{}, fmt.Errorf("could not get user for username %s: %v", name, err)
}
subuidRanges, err := lookupSubRangesFile("/etc/subuid", usr)
if err != nil {
return IdentityMapping{}, err
}
subgidRanges, err := lookupSubRangesFile("/etc/subgid", usr)
if err != nil {
return IdentityMapping{}, err
}
return IdentityMapping{
UIDMaps: subuidRanges,
GIDMaps: subgidRanges,
}, nil
}
func lookupSubRangesFile(path string, usr user.User) ([]IDMap, error) {
uidstr := strconv.Itoa(usr.Uid)
rangeList, err := user.ParseSubIDFileFilter(path, func(sid user.SubID) bool {
return sid.Name == usr.Name || sid.Name == uidstr
})
if err != nil {
return nil, err
}
if len(rangeList) == 0 {
return nil, fmt.Errorf("no subuid ranges found for user %q", usr.Name)
}
idMap := []IDMap{}
containerID := 0
for _, idrange := range rangeList {
idMap = append(idMap, IDMap{
ContainerID: containerID,
HostID: int(idrange.SubID),
Size: int(idrange.Count),
})
containerID = containerID + int(idrange.Count)
}
return idMap, nil
}

View File

@ -1,9 +1,5 @@
package idtools
import (
"os"
)
const (
SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege"
)
@ -14,11 +10,3 @@ const (
ContainerUserSidString = "S-1-5-93-2-2"
)
// This is currently a wrapper around [os.MkdirAll] since currently
// permissions aren't set through this path, the identity isn't utilized.
// Ownership is handled elsewhere, but in the future could be support here
// too.
func mkdirAs(path string, _ os.FileMode, _ Identity, _, _ bool) error {
return os.MkdirAll(path, 0)
}

2
vendor/github.com/moby/go-archive/.gitattributes generated vendored Normal file
View File

@ -0,0 +1,2 @@
*.go -text diff=golang
*.go text eol=lf

1
vendor/github.com/moby/go-archive/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
/coverage.txt

33
vendor/github.com/moby/go-archive/.golangci.yml generated vendored Normal file
View File

@ -0,0 +1,33 @@
version: "2"
issues:
# Disable maximum issues count per one linter.
max-issues-per-linter: 0
# Disable maximum count of issues with the same text.
max-same-issues: 0
linters:
enable:
- errorlint
- unconvert
- unparam
exclusions:
generated: disable
presets:
- comments
- std-error-handling
settings:
staticcheck:
# Enable all options, with some exceptions.
# For defaults, see https://golangci-lint.run/usage/linters/#staticcheck
checks:
- all
- -QF1008 # Omit embedded fields from selector expression; https://staticcheck.dev/docs/checks/#QF1008
- -ST1003 # Poorly chosen identifier; https://staticcheck.dev/docs/checks/#ST1003
formatters:
enable:
- gofumpt
- goimports
exclusions:
generated: disable

202
vendor/github.com/moby/go-archive/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -3,32 +3,24 @@ package archive
import (
"archive/tar"
"bufio"
"bytes"
"compress/bzip2"
"compress/gzip"
"context"
"encoding/binary"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/containerd/log"
"github.com/docker/docker/pkg/idtools"
"github.com/klauspost/compress/zstd"
"github.com/moby/patternmatcher"
"github.com/moby/sys/sequential"
"github.com/moby/sys/user"
"github.com/moby/go-archive/compression"
"github.com/moby/go-archive/tarheader"
)
// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a
@ -45,18 +37,25 @@ const ImpliedDirectoryMode = 0o755
type (
// Compression is the state represents if compressed or not.
Compression int
//
// Deprecated: use [compression.Compression].
Compression = compression.Compression
// WhiteoutFormat is the format of whiteouts unpacked
WhiteoutFormat int
ChownOpts struct {
UID int
GID int
}
// TarOptions wraps the tar options.
TarOptions struct {
IncludeFiles []string
ExcludePatterns []string
Compression Compression
Compression compression.Compression
NoLchown bool
IDMap idtools.IdentityMapping
ChownOpts *idtools.Identity
IDMap user.IdentityMapping
ChownOpts *ChownOpts
IncludeSourceDir bool
// WhiteoutFormat is the expected on disk format for whiteout files.
// This format will be converted to the standard format on pack
@ -83,7 +82,7 @@ type (
// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
type Archiver struct {
Untar func(io.Reader, string, *TarOptions) error
IDMapping idtools.IdentityMapping
IDMapping user.IdentityMapping
}
// NewDefaultArchiver returns a new Archiver without any IdentityMapping
@ -97,11 +96,11 @@ func NewDefaultArchiver() *Archiver {
type breakoutError error
const (
Uncompressed Compression = 0 // Uncompressed represents the uncompressed.
Bzip2 Compression = 1 // Bzip2 is bzip2 compression algorithm.
Gzip Compression = 2 // Gzip is gzip compression algorithm.
Xz Compression = 3 // Xz is xz compression algorithm.
Zstd Compression = 4 // Zstd is zstd compression algorithm.
Uncompressed = compression.None // Deprecated: use [compression.None].
Bzip2 = compression.Bzip2 // Deprecated: use [compression.Bzip2].
Gzip = compression.Gzip // Deprecated: use [compression.Gzip].
Xz = compression.Xz // Deprecated: use [compression.Xz].
Zstd = compression.Zstd // Deprecated: use [compression.Zstd].
)
const (
@ -117,7 +116,7 @@ func IsArchivePath(path string) bool {
return false
}
defer file.Close()
rdr, err := DecompressStream(file)
rdr, err := compression.DecompressStream(file)
if err != nil {
return false
}
@ -127,242 +126,25 @@ func IsArchivePath(path string) bool {
return err == nil
}
const (
zstdMagicSkippableStart = 0x184D2A50
zstdMagicSkippableMask = 0xFFFFFFF0
)
var (
bzip2Magic = []byte{0x42, 0x5A, 0x68}
gzipMagic = []byte{0x1F, 0x8B, 0x08}
xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}
zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
)
type matcher = func([]byte) bool
func magicNumberMatcher(m []byte) matcher {
return func(source []byte) bool {
return bytes.HasPrefix(source, m)
}
}
// zstdMatcher detects zstd compression algorithm.
// Zstandard compressed data is made of one or more frames.
// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames.
// See https://datatracker.ietf.org/doc/html/rfc8878#section-3 for more details.
func zstdMatcher() matcher {
return func(source []byte) bool {
if bytes.HasPrefix(source, zstdMagic) {
// Zstandard frame
return true
}
// skippable frame
if len(source) < 8 {
return false
}
// magic number from 0x184D2A50 to 0x184D2A5F.
if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart {
return true
}
return false
}
}
// DetectCompression detects the compression algorithm of the source.
func DetectCompression(source []byte) Compression {
compressionMap := map[Compression]matcher{
Bzip2: magicNumberMatcher(bzip2Magic),
Gzip: magicNumberMatcher(gzipMagic),
Xz: magicNumberMatcher(xzMagic),
Zstd: zstdMatcher(),
}
for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} {
fn := compressionMap[compression]
if fn(source) {
return compression
}
}
return Uncompressed
}
func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) {
args := []string{"xz", "-d", "-c", "-q"}
return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive)
}
func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" {
noPigz, err := strconv.ParseBool(noPigzEnv)
if err != nil {
log.G(ctx).WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var")
}
if noPigz {
log.G(ctx).Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv)
return gzip.NewReader(buf)
}
}
unpigzPath, err := exec.LookPath("unpigz")
if err != nil {
log.G(ctx).Debugf("unpigz binary not found, falling back to go gzip library")
return gzip.NewReader(buf)
}
log.G(ctx).Debugf("Using %s to decompress", unpigzPath)
return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf)
}
type readCloserWrapper struct {
io.Reader
closer func() error
closed atomic.Bool
}
func (r *readCloserWrapper) Close() error {
if !r.closed.CompareAndSwap(false, true) {
log.G(context.TODO()).Error("subsequent attempt to close readCloserWrapper")
if log.GetLevel() >= log.DebugLevel {
log.G(context.TODO()).Errorf("stack trace: %s", string(debug.Stack()))
}
return nil
}
if r.closer != nil {
return r.closer()
}
return nil
}
var bufioReader32KPool = &sync.Pool{
New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) },
}
type bufferedReader struct {
buf *bufio.Reader
}
func newBufferedReader(r io.Reader) *bufferedReader {
buf := bufioReader32KPool.Get().(*bufio.Reader)
buf.Reset(r)
return &bufferedReader{buf}
}
func (r *bufferedReader) Read(p []byte) (int, error) {
if r.buf == nil {
return 0, io.EOF
}
n, err := r.buf.Read(p)
if err == io.EOF {
r.buf.Reset(nil)
bufioReader32KPool.Put(r.buf)
r.buf = nil
}
return n, err
}
func (r *bufferedReader) Peek(n int) ([]byte, error) {
if r.buf == nil {
return nil, io.EOF
}
return r.buf.Peek(n)
//
// Deprecated: use [compression.Detect].
func DetectCompression(source []byte) compression.Compression {
return compression.Detect(source)
}
// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
//
// Deprecated: use [compression.DecompressStream].
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
buf := newBufferedReader(archive)
bs, err := buf.Peek(10)
if err != nil && err != io.EOF {
// Note: we'll ignore any io.EOF error because there are some odd
// cases where the layer.tar file will be empty (zero bytes) and
// that results in an io.EOF from the Peek() call. So, in those
// cases we'll just treat it as a non-compressed stream and
// that means just create an empty layer.
// See Issue 18170
return nil, err
}
compression := DetectCompression(bs)
switch compression {
case Uncompressed:
return &readCloserWrapper{
Reader: buf,
}, nil
case Gzip:
ctx, cancel := context.WithCancel(context.Background())
gzReader, err := gzDecompress(ctx, buf)
if err != nil {
cancel()
return nil, err
}
return &readCloserWrapper{
Reader: gzReader,
closer: func() error {
cancel()
return gzReader.Close()
},
}, nil
case Bzip2:
bz2Reader := bzip2.NewReader(buf)
return &readCloserWrapper{
Reader: bz2Reader,
}, nil
case Xz:
ctx, cancel := context.WithCancel(context.Background())
xzReader, err := xzDecompress(ctx, buf)
if err != nil {
cancel()
return nil, err
}
return &readCloserWrapper{
Reader: xzReader,
closer: func() error {
cancel()
return xzReader.Close()
},
}, nil
case Zstd:
zstdReader, err := zstd.NewReader(buf)
if err != nil {
return nil, err
}
return &readCloserWrapper{
Reader: zstdReader,
closer: func() error {
zstdReader.Close()
return nil
},
}, nil
default:
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
}
return compression.DecompressStream(archive)
}
type nopWriteCloser struct {
io.Writer
}
func (nopWriteCloser) Close() error { return nil }
// CompressStream compresses the dest with specified compression algorithm.
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
switch compression {
case Uncompressed:
return nopWriteCloser{dest}, nil
case Gzip:
return gzip.NewWriter(dest), nil
case Bzip2, Xz:
// archive/bzip2 does not support writing, and there is no xz support at all
// However, this is not a problem as docker only currently generates gzipped tars
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
default:
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
}
//
// Deprecated: use [compression.CompressStream].
func CompressStream(dest io.Writer, comp compression.Compression) (io.WriteCloser, error) {
return compression.CompressStream(dest, comp)
}
// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to
@ -411,7 +193,7 @@ func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModi
var originalHeader *tar.Header
for {
originalHeader, err = tarReader.Next()
if err == io.EOF {
if errors.Is(err, io.EOF) {
break
}
if err != nil {
@ -453,90 +235,11 @@ func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModi
return pipeReader
}
// Extension returns the extension of a file that uses the specified compression algorithm.
func (compression *Compression) Extension() string {
switch *compression {
case Uncompressed:
return "tar"
case Bzip2:
return "tar.bz2"
case Gzip:
return "tar.gz"
case Xz:
return "tar.xz"
case Zstd:
return "tar.zst"
}
return ""
}
// assert that we implement [tar.FileInfoNames].
//
// TODO(thaJeztah): disabled to allow compiling on < go1.23. un-comment once we drop support for older versions of go.
// var _ tar.FileInfoNames = (*nosysFileInfo)(nil)
// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to
// prevent tar.FileInfoHeader from introspecting it and potentially calling into
// glibc.
//
// It implements [tar.FileInfoNames] to further prevent [tar.FileInfoHeader]
// from performing any lookups on go1.23 and up. see https://go.dev/issue/50102
type nosysFileInfo struct {
os.FileInfo
}
// Uname stubs out looking up username. It implements [tar.FileInfoNames]
// to prevent [tar.FileInfoHeader] from loading libraries to perform
// username lookups.
func (fi nosysFileInfo) Uname() (string, error) {
return "", nil
}
// Gname stubs out looking up group-name. It implements [tar.FileInfoNames]
// to prevent [tar.FileInfoHeader] from loading libraries to perform
// username lookups.
func (fi nosysFileInfo) Gname() (string, error) {
return "", nil
}
func (fi nosysFileInfo) Sys() interface{} {
// A Sys value of type *tar.Header is safe as it is system-independent.
// The tar.FileInfoHeader function copies the fields into the returned
// header without performing any OS lookups.
if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok {
return sys
}
return nil
}
// sysStat, if non-nil, populates hdr from system-dependent fields of fi.
var sysStat func(fi os.FileInfo, hdr *tar.Header) error
// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi.
//
// Compared to the archive/tar.FileInfoHeader function, this function is safe to
// call from a chrooted process as it does not populate fields which would
// require operating system lookups. It behaves identically to
// tar.FileInfoHeader when fi is a FileInfo value returned from
// tar.Header.FileInfo().
//
// When fi is a FileInfo for a native file, such as returned from os.Stat() and
// os.Lstat(), the returned Header value differs from one returned from
// tar.FileInfoHeader in the following ways. The Uname and Gname fields are not
// set as OS lookups would be required to populate them. The AccessTime and
// ChangeTime fields are not currently set (not yet implemented) although that
// is subject to change. Callers which require the AccessTime or ChangeTime
// fields to be zeroed should explicitly zero them out in the returned Header
// value to avoid any compatibility issues in the future.
// Deprecated: use [tarheader.FileInfoHeaderNoLookups].
func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) {
hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link)
if err != nil {
return nil, err
}
if sysStat != nil {
return hdr, sysStat(fi, hdr)
}
return hdr, nil
return tarheader.FileInfoHeaderNoLookups(fi, link)
}
// FileInfoHeader creates a populated Header from fi.
@ -547,7 +250,7 @@ func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) {
// precision, and the Uname and Gname fields are only set when fi is a FileInfo
// value returned from tar.Header.FileInfo().
func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
hdr, err := FileInfoHeaderNoLookups(fi, link)
hdr, err := tarheader.FileInfoHeaderNoLookups(fi, link)
if err != nil {
return nil, err
}
@ -598,8 +301,8 @@ type tarAppender struct {
// for hardlink mapping
SeenFiles map[uint64]string
IdentityMapping idtools.IdentityMapping
ChownOpts *idtools.Identity
IdentityMapping user.IdentityMapping
ChownOpts *ChownOpts
// For packing and unpacking whiteout files in the
// non standard format. The whiteout files defined
@ -608,7 +311,7 @@ type tarAppender struct {
WhiteoutConverter tarWhiteoutConverter
}
func newTarAppender(idMapping idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender {
func newTarAppender(idMapping user.IdentityMapping, writer io.Writer, chownOpts *ChownOpts) *tarAppender {
return &tarAppender{
SeenFiles: make(map[uint64]string),
TarWriter: tar.NewWriter(writer),
@ -679,11 +382,11 @@ func (ta *tarAppender) addTarFile(path, name string) error {
// writing tar headers/files. We skip whiteout files because they were written
// by the kernel and already have proper ownership relative to the host
if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() {
fileIDPair, err := getFileUIDGID(fi.Sys())
uid, gid, err := getFileUIDGID(fi.Sys())
if err != nil {
return err
}
hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair)
hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(uid, gid)
if err != nil {
return err
}
@ -743,7 +446,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, o
var (
Lchown = true
inUserns, bestEffortXattrs bool
chownOpts *idtools.Identity
chownOpts *ChownOpts
)
// TODO(thaJeztah): make opts a required argument.
@ -763,7 +466,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, o
case tar.TypeDir:
// Create directory unless it exists as a directory already.
// In that case we just want to merge the two
if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
if fi, err := os.Lstat(path); err != nil || !fi.IsDir() {
if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
return err
}
@ -839,7 +542,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, o
// Lchown is not supported on Windows.
if Lchown && runtime.GOOS != "windows" {
if chownOpts == nil {
chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}
chownOpts = &ChownOpts{UID: hdr.Uid, GID: hdr.Gid}
}
if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
var msg string
@ -903,8 +606,8 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, o
// Tar creates an archive from the directory at `path`, and returns it as a
// stream of bytes.
func Tar(path string, compression Compression) (io.ReadCloser, error) {
return TarWithOptions(path, &TarOptions{Compression: compression})
func Tar(path string, comp compression.Compression) (io.ReadCloser, error) {
return TarWithOptions(path, &TarOptions{Compression: comp})
}
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
@ -940,7 +643,7 @@ func NewTarballer(srcPath string, options *TarOptions) (*Tarballer, error) {
pipeReader, pipeWriter := io.Pipe()
compressWriter, err := CompressStream(pipeWriter, options.Compression)
compressWriter, err := compression.CompressStream(pipeWriter, options.Compression)
if err != nil {
return nil, err
}
@ -1026,7 +729,8 @@ func (t *Tarballer) Do() {
)
walkRoot := getWalkRoot(t.srcPath, include)
filepath.WalkDir(walkRoot, func(filePath string, f os.DirEntry, err error) error {
// TODO(thaJeztah): should this error be handled?
_ = filepath.WalkDir(walkRoot, func(filePath string, f os.DirEntry, err error) error {
if err != nil {
log.G(context.TODO()).Errorf("Tar: Can't stat file %s to tar: %s", t.srcPath, err)
return nil
@ -1130,7 +834,7 @@ func (t *Tarballer) Do() {
if err := ta.addTarFile(filePath, relFilePath); err != nil {
log.G(context.TODO()).Errorf("Can't add file %s to tar: %s", filePath, err)
// if pipe is broken, stop writing tar stream to it
if err == io.ErrClosedPipe {
if errors.Is(err, io.ErrClosedPipe) {
return err
}
}
@ -1150,7 +854,7 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
loop:
for {
hdr, err := tr.Next()
if err == io.EOF {
if errors.Is(err, io.EOF) {
// end of tar archive
break
}
@ -1212,7 +916,7 @@ loop:
continue
}
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if !fi.IsDir() || hdr.Typeflag != tar.TypeDir {
if err := os.RemoveAll(path); err != nil {
return err
}
@ -1272,9 +976,9 @@ func createImpliedDirectories(dest string, hdr *tar.Header, options *TarOptions)
// RootPair() is confined inside this loop as most cases will not require a call, so we can spend some
// unneeded function calls in the uncommon case to encapsulate logic -- implied directories are a niche
// usage that reduces the portability of an image.
rootIDs := options.IDMap.RootPair()
uid, gid := options.IDMap.RootPair()
err = idtools.MkdirAllAndChownNew(parentPath, ImpliedDirectoryMode, rootIDs)
err = user.MkdirAllAndChown(parentPath, ImpliedDirectoryMode, uid, gid, user.WithOnlyNew)
if err != nil {
return err
}
@ -1304,7 +1008,7 @@ func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) e
// Handler for teasing out the automatic decompression
func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
if tarArchive == nil {
return fmt.Errorf("Empty archive")
return errors.New("empty archive")
}
dest = filepath.Clean(dest)
if options == nil {
@ -1316,7 +1020,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp
r := tarArchive
if decompress {
decompressedArchive, err := DecompressStream(tarArchive)
decompressedArchive, err := compression.DecompressStream(tarArchive)
if err != nil {
return err
}
@ -1330,15 +1034,14 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
// If either Tar or Untar fails, TarUntar aborts and returns the error.
func (archiver *Archiver) TarUntar(src, dst string) error {
archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
archive, err := Tar(src, compression.None)
if err != nil {
return err
}
defer archive.Close()
options := &TarOptions{
return archiver.Untar(archive, dst, &TarOptions{
IDMap: archiver.IDMapping,
}
return archiver.Untar(archive, dst, options)
})
}
// UntarPath untar a file from path to a destination, src is the source tar file path.
@ -1348,10 +1051,9 @@ func (archiver *Archiver) UntarPath(src, dst string) error {
return err
}
defer archive.Close()
options := &TarOptions{
return archiver.Untar(archive, dst, &TarOptions{
IDMap: archiver.IDMapping,
}
return archiver.Untar(archive, dst, options)
})
}
// CopyWithTar creates a tar archive of filesystem path `src`, and
@ -1370,9 +1072,9 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error {
// if this Archiver is set up with ID mapping we need to create
// the new destination directory with the remapped root UID/GID pair
// as owner
rootIDs := archiver.IDMapping.RootPair()
uid, gid := archiver.IDMapping.RootPair()
// Create dst, copy src's content into it
if err := idtools.MkdirAllAndChownNew(dst, 0o755, rootIDs); err != nil {
if err := user.MkdirAllAndChown(dst, 0o755, uid, gid, user.WithOnlyNew); err != nil {
return err
}
return archiver.TarUntar(src, dst)
@ -1388,7 +1090,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
}
if srcSt.IsDir() {
return fmt.Errorf("Can't copy a directory")
return errors.New("can't copy a directory")
}
// Clean up the trailing slash. This must be done in an operating
@ -1416,7 +1118,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
}
defer srcF.Close()
hdr, err := FileInfoHeaderNoLookups(srcSt, "")
hdr, err := tarheader.FileInfoHeaderNoLookups(srcSt, "")
if err != nil {
return err
}
@ -1456,52 +1158,12 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
}
// IdentityMapping returns the IdentityMapping of the archiver.
func (archiver *Archiver) IdentityMapping() idtools.IdentityMapping {
func (archiver *Archiver) IdentityMapping() user.IdentityMapping {
return archiver.IDMapping
}
func remapIDs(idMapping idtools.IdentityMapping, hdr *tar.Header) error {
ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid})
hdr.Uid, hdr.Gid = ids.UID, ids.GID
func remapIDs(idMapping user.IdentityMapping, hdr *tar.Header) error {
uid, gid, err := idMapping.ToHost(hdr.Uid, hdr.Gid)
hdr.Uid, hdr.Gid = uid, gid
return err
}
// cmdStream executes a command, and returns its stdout as a stream.
// If the command fails to run or doesn't complete successfully, an error
// will be returned, including anything written on stderr.
func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
cmd.Stdin = input
pipeR, pipeW := io.Pipe()
cmd.Stdout = pipeW
var errBuf bytes.Buffer
cmd.Stderr = &errBuf
// Run the command and return the pipe
if err := cmd.Start(); err != nil {
return nil, err
}
// Ensure the command has exited before we clean anything up
done := make(chan struct{})
// Copy stdout to the returned pipe
go func() {
if err := cmd.Wait(); err != nil {
pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
} else {
pipeW.Close()
}
close(done)
}()
return &readCloserWrapper{
Reader: pipeR,
closer: func() error {
// Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as
// cmd.Wait waits for any non-file stdout/stderr/stdin to close.
err := pipeR.Close()
<-done
return err
},
}, nil
}

View File

@ -7,18 +7,12 @@ import (
"errors"
"os"
"path/filepath"
"runtime"
"strings"
"syscall"
"github.com/docker/docker/pkg/idtools"
"golang.org/x/sys/unix"
)
func init() {
sysStat = statUnix
}
// addLongPathPrefix adds the Windows long path prefix to the path provided if
// it does not already have it. It is a no-op on platforms other than Windows.
func addLongPathPrefix(srcPath string) string {
@ -39,40 +33,6 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm // noop for unix as golang APIs provide perm bits correctly
}
// statUnix populates hdr from system-dependent fields of fi without performing
// any OS lookups.
func statUnix(fi os.FileInfo, hdr *tar.Header) error {
// Devmajor and Devminor are only needed for special devices.
// In FreeBSD, RDev for regular files is -1 (unless overridden by FS):
// https://cgit.freebsd.org/src/tree/sys/kern/vfs_default.c?h=stable/13#n1531
// (NODEV is -1: https://cgit.freebsd.org/src/tree/sys/sys/param.h?h=stable/13#n241).
// ZFS in particular does not override the default:
// https://cgit.freebsd.org/src/tree/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c?h=stable/13#n2027
// Since `Stat_t.Rdev` is uint64, the cast turns -1 into (2^64 - 1).
// Such large values cannot be encoded in a tar header.
if runtime.GOOS == "freebsd" && hdr.Typeflag != tar.TypeBlock && hdr.Typeflag != tar.TypeChar {
return nil
}
s, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
hdr.Uid = int(s.Uid)
hdr.Gid = int(s.Gid)
if s.Mode&unix.S_IFBLK != 0 ||
s.Mode&unix.S_IFCHR != 0 {
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert
}
return nil
}
func getInodeFromStat(stat interface{}) (uint64, error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
@ -82,13 +42,13 @@ func getInodeFromStat(stat interface{}) (uint64, error) {
return s.Ino, nil
}
func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
func getFileUIDGID(stat interface{}) (int, int, error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t")
return 0, 0, errors.New("cannot convert stat value to syscall.Stat_t")
}
return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil
return int(s.Uid), int(s.Gid), nil
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by

View File

@ -5,8 +5,6 @@ import (
"os"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/idtools"
)
// longPathPrefix is the longpath prefix for Windows file paths.
@ -43,11 +41,6 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm | 0o111
}
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
// do nothing. no notion of Rdev, Nlink in stat on Windows
return
}
func getInodeFromStat(stat interface{}) (uint64, error) {
// do nothing. no notion of Inode in stat on Windows
return 0, nil
@ -63,7 +56,7 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
return nil
}
func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
func getFileUIDGID(stat interface{}) (int, int, error) {
// no notion of file ownership mapping yet on Windows
return idtools.Identity{UID: 0, GID: 0}, nil
return 0, 0, nil
}

View File

@ -14,7 +14,7 @@ import (
"time"
"github.com/containerd/log"
"github.com/docker/docker/pkg/idtools"
"github.com/moby/sys/user"
)
// ChangeType represents the change type.
@ -75,7 +75,7 @@ func sameFsTime(a, b time.Time) bool {
// Changes walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func Changes(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
return collectChanges(layers, rw, aufsDeletedFile, aufsMetadataSkip)
}
func aufsMetadataSkip(path string) (skip bool, err error) {
@ -103,7 +103,7 @@ type (
deleteChange func(string, string, os.FileInfo) (string, error)
)
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
func collectChanges(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
var (
changes []Change
changedDirs = make(map[string]struct{})
@ -383,7 +383,7 @@ func ChangesSize(newDir string, changes []Change) int64 {
}
// ExportChanges produces an Archive from the provided changes, relative to dir.
func ExportChanges(dir string, changes []Change, idMap idtools.IdentityMapping) (io.ReadCloser, error) {
func ExportChanges(dir string, changes []Change, idMap user.IdentityMapping) (io.ReadCloser, error) {
reader, writer := io.Pipe()
go func() {
ta := newTarAppender(idMap, writer, nil)

View File

@ -132,14 +132,7 @@ func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
ix1 := 0
ix2 := 0
for {
if ix1 >= len(names1) {
break
}
if ix2 >= len(names2) {
break
}
for ix1 < len(names1) && ix2 < len(names2) {
ni1 := names1[ix1]
ni2 := names2[ix2]

View File

@ -0,0 +1,263 @@
package compression
import (
"bufio"
"bytes"
"compress/bzip2"
"compress/gzip"
"context"
"errors"
"fmt"
"io"
"os"
"os/exec"
"strconv"
"sync"
"github.com/containerd/log"
"github.com/klauspost/compress/zstd"
)
// Compression is the state represents if compressed or not.
type Compression int
const (
None Compression = 0 // None represents the uncompressed.
Bzip2 Compression = 1 // Bzip2 is bzip2 compression algorithm.
Gzip Compression = 2 // Gzip is gzip compression algorithm.
Xz Compression = 3 // Xz is xz compression algorithm.
Zstd Compression = 4 // Zstd is zstd compression algorithm.
)
// Extension returns the extension of a file that uses the specified compression algorithm.
func (c *Compression) Extension() string {
switch *c {
case None:
return "tar"
case Bzip2:
return "tar.bz2"
case Gzip:
return "tar.gz"
case Xz:
return "tar.xz"
case Zstd:
return "tar.zst"
default:
return ""
}
}
type readCloserWrapper struct {
io.Reader
closer func() error
}
func (r *readCloserWrapper) Close() error {
if r.closer != nil {
return r.closer()
}
return nil
}
type nopWriteCloser struct {
io.Writer
}
func (nopWriteCloser) Close() error { return nil }
var bufioReader32KPool = &sync.Pool{
New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) },
}
type bufferedReader struct {
buf *bufio.Reader
}
func newBufferedReader(r io.Reader) *bufferedReader {
buf := bufioReader32KPool.Get().(*bufio.Reader)
buf.Reset(r)
return &bufferedReader{buf}
}
func (r *bufferedReader) Read(p []byte) (int, error) {
if r.buf == nil {
return 0, io.EOF
}
n, err := r.buf.Read(p)
if errors.Is(err, io.EOF) {
r.buf.Reset(nil)
bufioReader32KPool.Put(r.buf)
r.buf = nil
}
return n, err
}
func (r *bufferedReader) Peek(n int) ([]byte, error) {
if r.buf == nil {
return nil, io.EOF
}
return r.buf.Peek(n)
}
// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
buf := newBufferedReader(archive)
bs, err := buf.Peek(10)
if err != nil && !errors.Is(err, io.EOF) {
// Note: we'll ignore any io.EOF error because there are some odd
// cases where the layer.tar file will be empty (zero bytes) and
// that results in an io.EOF from the Peek() call. So, in those
// cases we'll just treat it as a non-compressed stream and
// that means just create an empty layer.
// See Issue 18170
return nil, err
}
switch compression := Detect(bs); compression {
case None:
return &readCloserWrapper{
Reader: buf,
}, nil
case Gzip:
ctx, cancel := context.WithCancel(context.Background())
gzReader, err := gzipDecompress(ctx, buf)
if err != nil {
cancel()
return nil, err
}
return &readCloserWrapper{
Reader: gzReader,
closer: func() error {
cancel()
return gzReader.Close()
},
}, nil
case Bzip2:
bz2Reader := bzip2.NewReader(buf)
return &readCloserWrapper{
Reader: bz2Reader,
}, nil
case Xz:
ctx, cancel := context.WithCancel(context.Background())
xzReader, err := xzDecompress(ctx, buf)
if err != nil {
cancel()
return nil, err
}
return &readCloserWrapper{
Reader: xzReader,
closer: func() error {
cancel()
return xzReader.Close()
},
}, nil
case Zstd:
zstdReader, err := zstd.NewReader(buf)
if err != nil {
return nil, err
}
return &readCloserWrapper{
Reader: zstdReader,
closer: func() error {
zstdReader.Close()
return nil
},
}, nil
default:
return nil, fmt.Errorf("unsupported compression format (%d)", compression)
}
}
// CompressStream compresses the dest with specified compression algorithm.
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
switch compression {
case None:
return nopWriteCloser{dest}, nil
case Gzip:
return gzip.NewWriter(dest), nil
case Bzip2:
// archive/bzip2 does not support writing.
return nil, errors.New("unsupported compression format: tar.bz2")
case Xz:
// there is no xz support at all
// However, this is not a problem as docker only currently generates gzipped tars
return nil, errors.New("unsupported compression format: tar.xz")
default:
return nil, fmt.Errorf("unsupported compression format (%d)", compression)
}
}
func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) {
args := []string{"xz", "-d", "-c", "-q"}
return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive)
}
func gzipDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" {
noPigz, err := strconv.ParseBool(noPigzEnv)
if err != nil {
log.G(ctx).WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var")
}
if noPigz {
log.G(ctx).Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv)
return gzip.NewReader(buf)
}
}
unpigzPath, err := exec.LookPath("unpigz")
if err != nil {
log.G(ctx).Debugf("unpigz binary not found, falling back to go gzip library")
return gzip.NewReader(buf)
}
log.G(ctx).Debugf("Using %s to decompress", unpigzPath)
return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf)
}
// cmdStream executes a command, and returns its stdout as a stream.
// If the command fails to run or doesn't complete successfully, an error
// will be returned, including anything written on stderr.
func cmdStream(cmd *exec.Cmd, in io.Reader) (io.ReadCloser, error) {
reader, writer := io.Pipe()
cmd.Stdin = in
cmd.Stdout = writer
var errBuf bytes.Buffer
cmd.Stderr = &errBuf
// Run the command and return the pipe
if err := cmd.Start(); err != nil {
return nil, err
}
// Ensure the command has exited before we clean anything up
done := make(chan struct{})
// Copy stdout to the returned pipe
go func() {
if err := cmd.Wait(); err != nil {
_ = writer.CloseWithError(fmt.Errorf("%w: %s", err, errBuf.String()))
} else {
_ = writer.Close()
}
close(done)
}()
return &readCloserWrapper{
Reader: reader,
closer: func() error {
// Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as
// cmd.Wait waits for any non-file stdout/stderr/stdin to close.
err := reader.Close()
<-done
return err
},
}, nil
}

View File

@ -0,0 +1,65 @@
package compression
import (
"bytes"
"encoding/binary"
)
const (
zstdMagicSkippableStart = 0x184D2A50
zstdMagicSkippableMask = 0xFFFFFFF0
)
var (
bzip2Magic = []byte{0x42, 0x5A, 0x68}
gzipMagic = []byte{0x1F, 0x8B, 0x08}
xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}
zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
)
type matcher = func([]byte) bool
// Detect detects the compression algorithm of the source.
func Detect(source []byte) Compression {
compressionMap := map[Compression]matcher{
Bzip2: magicNumberMatcher(bzip2Magic),
Gzip: magicNumberMatcher(gzipMagic),
Xz: magicNumberMatcher(xzMagic),
Zstd: zstdMatcher(),
}
for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} {
fn := compressionMap[compression]
if fn(source) {
return compression
}
}
return None
}
func magicNumberMatcher(m []byte) matcher {
return func(source []byte) bool {
return bytes.HasPrefix(source, m)
}
}
// zstdMatcher detects zstd compression algorithm.
// Zstandard compressed data is made of one or more frames.
// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames.
// See https://datatracker.ietf.org/doc/html/rfc8878#section-3 for more details.
func zstdMatcher() matcher {
return func(source []byte) bool {
if bytes.HasPrefix(source, zstdMagic) {
// Zstandard frame
return true
}
// skippable frame
if len(source) < 8 {
return false
}
// magic number from 0x184D2A50 to 0x184D2A5F.
if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart {
return true
}
return false
}
}

View File

@ -128,7 +128,6 @@ func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, _
func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
filter := []string{sourceBase}
return &TarOptions{
Compression: Uncompressed,
IncludeFiles: filter,
IncludeSourceDir: true,
RebaseNames: map[string]string{
@ -335,7 +334,7 @@ func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.Read
for {
hdr, err := srcTar.Next()
if err == io.EOF {
if errors.Is(err, io.EOF) {
// Signals end of archive.
rebasedTar.Close()
w.Close()

9
vendor/github.com/moby/go-archive/dev_freebsd.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
//go:build freebsd
package archive
import "golang.org/x/sys/unix"
func mknod(path string, mode uint32, dev uint64) error {
return unix.Mknod(path, mode, dev)
}

View File

@ -3,6 +3,7 @@ package archive
import (
"archive/tar"
"context"
"errors"
"fmt"
"io"
"os"
@ -11,6 +12,8 @@ import (
"strings"
"github.com/containerd/log"
"github.com/moby/go-archive/compression"
)
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
@ -35,7 +38,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
if errors.Is(err, io.EOF) {
// end of tar archive
break
}
@ -149,7 +152,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil {
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if !fi.IsDir() || hdr.Typeflag != tar.TypeDir {
if err := os.RemoveAll(path); err != nil {
return 0, err
}
@ -165,7 +168,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
return 0, fmt.Errorf("Invalid aufs hardlink")
return 0, errors.New("invalid aufs hardlink")
}
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
if err != nil {
@ -221,18 +224,18 @@ func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (
// IsEmpty checks if the tar archive is empty (doesn't contain any entries).
func IsEmpty(rd io.Reader) (bool, error) {
decompRd, err := DecompressStream(rd)
decompRd, err := compression.DecompressStream(rd)
if err != nil {
return true, fmt.Errorf("failed to decompress archive: %v", err)
return true, fmt.Errorf("failed to decompress archive: %w", err)
}
defer decompRd.Close()
tarReader := tar.NewReader(decompRd)
if _, err := tarReader.Next(); err != nil {
if err == io.EOF {
if errors.Is(err, io.EOF) {
return true, nil
}
return false, fmt.Errorf("failed to read next archive header: %v", err)
return false, fmt.Errorf("failed to read next archive header: %w", err)
}
return false, nil
@ -247,7 +250,7 @@ func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decomp
defer restore()
if decompress {
decompLayer, err := DecompressStream(layer)
decompLayer, err := compression.DecompressStream(layer)
if err != nil {
return 0, err
}

View File

@ -0,0 +1,67 @@
package tarheader
import (
"archive/tar"
"os"
)
// assert that we implement [tar.FileInfoNames].
var _ tar.FileInfoNames = (*nosysFileInfo)(nil)
// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to
// prevent tar.FileInfoHeader from introspecting it and potentially calling into
// glibc.
//
// It implements [tar.FileInfoNames] to further prevent [tar.FileInfoHeader]
// from performing any lookups on go1.23 and up. see https://go.dev/issue/50102
type nosysFileInfo struct {
os.FileInfo
}
// Uname stubs out looking up username. It implements [tar.FileInfoNames]
// to prevent [tar.FileInfoHeader] from loading libraries to perform
// username lookups.
func (fi nosysFileInfo) Uname() (string, error) {
return "", nil
}
// Gname stubs out looking up group-name. It implements [tar.FileInfoNames]
// to prevent [tar.FileInfoHeader] from loading libraries to perform
// username lookups.
func (fi nosysFileInfo) Gname() (string, error) {
return "", nil
}
func (fi nosysFileInfo) Sys() interface{} {
// A Sys value of type *tar.Header is safe as it is system-independent.
// The tar.FileInfoHeader function copies the fields into the returned
// header without performing any OS lookups.
if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok {
return sys
}
return nil
}
// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi.
//
// Compared to the archive/tar.FileInfoHeader function, this function is safe to
// call from a chrooted process as it does not populate fields which would
// require operating system lookups. It behaves identically to
// tar.FileInfoHeader when fi is a FileInfo value returned from
// tar.Header.FileInfo().
//
// When fi is a FileInfo for a native file, such as returned from os.Stat() and
// os.Lstat(), the returned Header value differs from one returned from
// tar.FileInfoHeader in the following ways. The Uname and Gname fields are not
// set as OS lookups would be required to populate them. The AccessTime and
// ChangeTime fields are not currently set (not yet implemented) although that
// is subject to change. Callers which require the AccessTime or ChangeTime
// fields to be zeroed should explicitly zero them out in the returned Header
// value to avoid any compatibility issues in the future.
func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) {
hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link)
if err != nil {
return nil, err
}
return hdr, sysStat(fi, hdr)
}

View File

@ -0,0 +1,46 @@
//go:build !windows
package tarheader
import (
"archive/tar"
"os"
"runtime"
"syscall"
"golang.org/x/sys/unix"
)
// sysStat populates hdr from system-dependent fields of fi without performing
// any OS lookups.
func sysStat(fi os.FileInfo, hdr *tar.Header) error {
// Devmajor and Devminor are only needed for special devices.
// In FreeBSD, RDev for regular files is -1 (unless overridden by FS):
// https://cgit.freebsd.org/src/tree/sys/kern/vfs_default.c?h=stable/13#n1531
// (NODEV is -1: https://cgit.freebsd.org/src/tree/sys/sys/param.h?h=stable/13#n241).
// ZFS in particular does not override the default:
// https://cgit.freebsd.org/src/tree/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c?h=stable/13#n2027
// Since `Stat_t.Rdev` is uint64, the cast turns -1 into (2^64 - 1).
// Such large values cannot be encoded in a tar header.
if runtime.GOOS == "freebsd" && hdr.Typeflag != tar.TypeBlock && hdr.Typeflag != tar.TypeChar {
return nil
}
s, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
hdr.Uid = int(s.Uid)
hdr.Gid = int(s.Gid)
if s.Mode&unix.S_IFBLK != 0 ||
s.Mode&unix.S_IFCHR != 0 {
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert
}
return nil
}

View File

@ -0,0 +1,12 @@
package tarheader
import (
"archive/tar"
"os"
)
// sysStat populates hdr from system-dependent fields of fi without performing
// any OS lookups. It is a no-op on Windows.
func sysStat(os.FileInfo, *tar.Header) error {
return nil
}

9
vendor/modules.txt vendored
View File

@ -428,7 +428,7 @@ github.com/distribution/reference
## explicit
github.com/docker/distribution/registry/api/errcode
github.com/docker/distribution/registry/api/v2
# github.com/docker/docker v28.0.4+incompatible
# github.com/docker/docker v28.1.0+incompatible
## explicit
github.com/docker/docker/api
github.com/docker/docker/api/types
@ -715,9 +715,16 @@ github.com/moby/buildkit/util/stack
# github.com/moby/docker-image-spec v1.3.1
## explicit; go 1.18
github.com/moby/docker-image-spec/specs-go/v1
# github.com/moby/go-archive v0.1.0
## explicit; go 1.23.0
github.com/moby/go-archive
github.com/moby/go-archive/compression
github.com/moby/go-archive/tarheader
# github.com/moby/patternmatcher v0.6.0
## explicit; go 1.19
github.com/moby/patternmatcher
# github.com/moby/sys/atomicwriter v0.1.0
## explicit; go 1.18
# github.com/moby/sys/capability v0.4.0
## explicit; go 1.21
github.com/moby/sys/capability