From f0cfbbe2cc1da6b3a45e4249134f6d2d6849cb27 Mon Sep 17 00:00:00 2001 From: Valentin Rothberg Date: Wed, 29 Mar 2023 14:00:05 +0200 Subject: [PATCH 1/2] vendor containers/common@e27c30ee9b1b Signed-off-by: Valentin Rothberg --- go.mod | 12 +- go.sum | 23 +- .../containers/common/libimage/disk_usage.go | 4 +- .../containers/common/libimage/filters.go | 2 +- .../containers/common/libimage/history.go | 2 +- .../containers/common/libimage/image.go | 19 +- .../containers/common/libimage/image_tree.go | 2 +- .../containers/common/libimage/layer_tree.go | 10 +- .../containers/common/libimage/runtime.go | 38 ++- .../common/pkg/netns/netns_linux.go | 38 +-- .../containers/image/v5/copy/blob.go | 22 +- .../containers/image/v5/copy/single.go | 48 ++-- .../image/v5/directory/directory_dest.go | 32 ++- .../image/v5/docker/docker_image_dest.go | 50 ++-- .../image/v5/docker/internal/tarfile/dest.go | 28 +- .../v5/docker/internal/tarfile/writer.go | 11 +- .../internal/imagedestination/impl/compat.go | 27 +- .../stubs/put_blob_partial.go | 4 +- .../v5/internal/imagedestination/wrapper.go | 30 ++- .../image/v5/internal/private/private.go | 31 ++- .../image/v5/oci/archive/oci_dest.go | 12 +- .../image/v5/oci/layout/oci_dest.go | 38 ++- .../image/v5/openshift/openshift_dest.go | 12 +- .../containers/image/v5/ostree/ostree_dest.go | 28 +- .../containers/image/v5/pkg/blobcache/dest.go | 20 +- .../image/v5/storage/storage_dest.go | 255 +++++++++--------- .../drivers/quota/projectquota_unsupported.go | 4 + .../go-openapi/strfmt/.golangci.yml | 59 ++-- vendor/github.com/go-openapi/strfmt/date.go | 6 +- vendor/github.com/go-openapi/strfmt/format.go | 2 +- vendor/github.com/go-openapi/strfmt/time.go | 21 +- vendor/github.com/go-openapi/strfmt/ulid.go | 11 +- .../github.com/imdario/mergo/CONTRIBUTING.md | 112 ++++++++ vendor/github.com/imdario/mergo/README.md | 5 +- vendor/github.com/imdario/mergo/SECURITY.md | 14 + vendor/github.com/imdario/mergo/map.go | 6 +- vendor/github.com/imdario/mergo/merge.go | 59 ++-- vendor/github.com/imdario/mergo/mergo.go | 11 +- .../sylabs/sif/v2/pkg/sif/create.go | 42 +-- .../github.com/sylabs/sif/v2/pkg/sif/sif.go | 8 +- vendor/modules.txt | 12 +- 41 files changed, 760 insertions(+), 410 deletions(-) create mode 100644 vendor/github.com/imdario/mergo/CONTRIBUTING.md create mode 100644 vendor/github.com/imdario/mergo/SECURITY.md diff --git a/go.mod b/go.mod index 239de6d7d9..b28c16f7f6 100644 --- a/go.mod +++ b/go.mod @@ -12,13 +12,13 @@ require ( github.com/containernetworking/cni v1.1.2 github.com/containernetworking/plugins v1.2.0 github.com/containers/buildah v1.29.1-0.20230201192322-e56eb25575c7 - github.com/containers/common v0.51.1-0.20230323135459-03a2cc01973c + github.com/containers/common v0.51.1-0.20230329113838-e27c30ee9b1b github.com/containers/conmon v2.0.20+incompatible - github.com/containers/image/v5 v5.24.3-0.20230314083015-0c6d07e02a9a + github.com/containers/image/v5 v5.24.3-0.20230324204529-08b04b816eb8 github.com/containers/libhvee v0.0.2 github.com/containers/ocicrypt v1.1.7 github.com/containers/psgo v1.8.0 - github.com/containers/storage v1.45.5-0.20230315220505-1c6287eea927 + github.com/containers/storage v1.45.5-0.20230326103843-b1216421c44b github.com/coreos/go-systemd/v22 v22.5.0 github.com/coreos/stream-metadata-go v0.4.1 github.com/cyphar/filepath-securejoin v0.2.3 @@ -103,7 +103,7 @@ require ( github.com/go-openapi/loads v0.21.2 // indirect github.com/go-openapi/runtime v0.24.1 // indirect github.com/go-openapi/spec v0.20.7 // indirect - github.com/go-openapi/strfmt v0.21.3 // indirect + github.com/go-openapi/strfmt v0.21.5 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/go-openapi/validate v0.22.0 // indirect github.com/go-playground/locales v0.14.0 // indirect @@ -119,7 +119,7 @@ require ( github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.2 // indirect - github.com/imdario/mergo v0.3.13 // indirect + github.com/imdario/mergo v0.3.15 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/jinzhu/copier v0.3.5 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -155,7 +155,7 @@ require ( github.com/sigstore/sigstore v1.6.0 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect - github.com/sylabs/sif/v2 v2.11.0 // indirect + github.com/sylabs/sif/v2 v2.11.1 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/theupdateframework/go-tuf v0.5.2 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect diff --git a/go.sum b/go.sum index e9b1941be9..4188a6bcb8 100644 --- a/go.sum +++ b/go.sum @@ -247,12 +247,12 @@ github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4= github.com/containers/buildah v1.29.1-0.20230201192322-e56eb25575c7 h1:GmQhTfsGuYgGfuYWEF4Ed+rEvlSWRmxisLBL2J8rCb4= github.com/containers/buildah v1.29.1-0.20230201192322-e56eb25575c7/go.mod h1:sFvOi+WMtMtrkxx1Dn8EhF5/ddXNyC1f5LAj4ZGzjAs= -github.com/containers/common v0.51.1-0.20230323135459-03a2cc01973c h1:j/52772OnuMHg3B2sgMM038S6C/uAJ8cXj9l4jNOjvo= -github.com/containers/common v0.51.1-0.20230323135459-03a2cc01973c/go.mod h1:RyY5B1E+PsFnZOW28xgFkjce0oCAMN7c/zskaCYmAkQ= +github.com/containers/common v0.51.1-0.20230329113838-e27c30ee9b1b h1:1QbWSASZc7C9Oi9lO+cQdBlwQ11ofmPAgqu3h3HKyWA= +github.com/containers/common v0.51.1-0.20230329113838-e27c30ee9b1b/go.mod h1:YyHQ+bAH0sv2K6q49qiuyrA8W8RMiBhUq3MtYs8fbfk= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= -github.com/containers/image/v5 v5.24.3-0.20230314083015-0c6d07e02a9a h1:2xIif78r5x2nmdb5uhjXBZuexiDAt1c/XIXFxFhfKSk= -github.com/containers/image/v5 v5.24.3-0.20230314083015-0c6d07e02a9a/go.mod h1:9PM/hiCVTh6dt8Swi7eYKXKHIaPabHn8gtFV2YD44Mk= +github.com/containers/image/v5 v5.24.3-0.20230324204529-08b04b816eb8 h1:hL/KrmP4ZMRmokrz+YNBem1ECKtytLU4/kUn3mSSkz0= +github.com/containers/image/v5 v5.24.3-0.20230324204529-08b04b816eb8/go.mod h1:pquu2CUlF4i+OBB5MM6kb36ZkYGQze8Wqv91aYIe9eo= github.com/containers/libhvee v0.0.2 h1:eWtbOvpT8bD9jvksMES2yXUmEpcE0zENWkci+bbP7U8= github.com/containers/libhvee v0.0.2/go.mod h1:bV1MfbuXk/ZLWHiWZpm8aePOR6iJGD1q55guYhH4CnA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= @@ -266,8 +266,8 @@ github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc= github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4= github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s= -github.com/containers/storage v1.45.5-0.20230315220505-1c6287eea927 h1:VGSwgqH/hBZqlWR48MFNrpT4meMzj+fVg6SYM2uSWWA= -github.com/containers/storage v1.45.5-0.20230315220505-1c6287eea927/go.mod h1:tNwkJMFiChoEURP+ofq34pGRysOoFk/QCVrdmS1EzPI= +github.com/containers/storage v1.45.5-0.20230326103843-b1216421c44b h1:ip+OrrC/fT7iqIuNHXmfxq7Xc5myNesE/e99qu8p030= +github.com/containers/storage v1.45.5-0.20230326103843-b1216421c44b/go.mod h1:oZ5Sscs6ALLhT3j2qTZAtD5Mi8B3G6+PFa54raz7pF4= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= @@ -432,8 +432,9 @@ github.com/go-openapi/spec v0.20.7/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6 github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/strfmt v0.21.5 h1:Z/algjpXIZpbvdN+6KbVTkpO75RuedMrqpn1GN529h4= +github.com/go-openapi/strfmt v0.21.5/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= @@ -628,8 +629,8 @@ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= +github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -973,8 +974,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/sylabs/sif/v2 v2.11.0 h1:s1oEFZCb1TX22zT3Twb4tY0X8CVfpo9IEZfhgZzCP+4= -github.com/sylabs/sif/v2 v2.11.0/go.mod h1:i4GcKLOaT4ertznbsuf11d/G9zLEfUZa7YhrFc5L6YQ= +github.com/sylabs/sif/v2 v2.11.1 h1:d09yPukVa8b74wuy+QTA4Is3w8MH0UjO/xlWQUuFzpY= +github.com/sylabs/sif/v2 v2.11.1/go.mod h1:i4GcKLOaT4ertznbsuf11d/G9zLEfUZa7YhrFc5L6YQ= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= diff --git a/vendor/github.com/containers/common/libimage/disk_usage.go b/vendor/github.com/containers/common/libimage/disk_usage.go index 431642f5de..f27edc03bd 100644 --- a/vendor/github.com/containers/common/libimage/disk_usage.go +++ b/vendor/github.com/containers/common/libimage/disk_usage.go @@ -29,12 +29,12 @@ type ImageDiskUsage struct { // storage. Note that a single image may yield multiple usage reports, one for // each repository tag. func (r *Runtime) DiskUsage(ctx context.Context) ([]ImageDiskUsage, int64, error) { - layerTree, err := r.layerTree() + images, err := r.ListImages(ctx, nil, nil) if err != nil { return nil, -1, err } - images, err := r.ListImages(ctx, nil, nil) + layerTree, err := r.layerTree(images) if err != nil { return nil, -1, err } diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go index 8f85640e6c..441011edd2 100644 --- a/vendor/github.com/containers/common/libimage/filters.go +++ b/vendor/github.com/containers/common/libimage/filters.go @@ -81,7 +81,7 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp var tree *layerTree getTree := func() (*layerTree, error) { if tree == nil { - t, err := r.layerTree() + t, err := r.layerTree(nil) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/common/libimage/history.go b/vendor/github.com/containers/common/libimage/history.go index b63fe696bc..46252df106 100644 --- a/vendor/github.com/containers/common/libimage/history.go +++ b/vendor/github.com/containers/common/libimage/history.go @@ -24,7 +24,7 @@ func (i *Image) History(ctx context.Context) ([]ImageHistory, error) { return nil, err } - layerTree, err := i.runtime.layerTree() + layerTree, err := i.runtime.layerTree(nil) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go index 032dd13999..9090f035a2 100644 --- a/vendor/github.com/containers/common/libimage/image.go +++ b/vendor/github.com/containers/common/libimage/image.go @@ -23,6 +23,17 @@ import ( // Image represents an image in the containers storage and allows for further // operations and data manipulation. type Image struct { + // ListData that is being set by (*Runtime).ListImages(). Note that + // the data may be outdated. + ListData struct { + // Dangling indicates if the image is dangling. Use + // `IsDangling()` to compute the latest state. + IsDangling *bool + // Parent points to the parent image. Use `Parent()` to + // compute the latest state. + Parent *Image + } + // Backwards pointer to the runtime. runtime *Runtime @@ -216,10 +227,14 @@ func (i *Image) TopLayer() string { // Parent returns the parent image or nil if there is none func (i *Image) Parent(ctx context.Context) (*Image, error) { - tree, err := i.runtime.layerTree() + tree, err := i.runtime.layerTree(nil) if err != nil { return nil, err } + return i.parent(ctx, tree) +} + +func (i *Image) parent(ctx context.Context, tree *layerTree) (*Image, error) { return tree.parent(ctx, i) } @@ -246,7 +261,7 @@ func (i *Image) Children(ctx context.Context) ([]*Image, error) { // created for this invocation only. func (i *Image) getChildren(ctx context.Context, all bool, tree *layerTree) ([]*Image, error) { if tree == nil { - t, err := i.runtime.layerTree() + t, err := i.runtime.layerTree(nil) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/common/libimage/image_tree.go b/vendor/github.com/containers/common/libimage/image_tree.go index d48aeeada3..9c958ce6b1 100644 --- a/vendor/github.com/containers/common/libimage/image_tree.go +++ b/vendor/github.com/containers/common/libimage/image_tree.go @@ -35,7 +35,7 @@ func (i *Image) Tree(traverseChildren bool) (string, error) { fmt.Fprintf(sb, "No Image Layers") } - layerTree, err := i.runtime.layerTree() + layerTree, err := i.runtime.layerTree(nil) if err != nil { return "", err } diff --git a/vendor/github.com/containers/common/libimage/layer_tree.go b/vendor/github.com/containers/common/libimage/layer_tree.go index 8c84dc41f4..a7d2f8c588 100644 --- a/vendor/github.com/containers/common/libimage/layer_tree.go +++ b/vendor/github.com/containers/common/libimage/layer_tree.go @@ -75,15 +75,17 @@ func (l *layerNode) repoTags() ([]string, error) { // layerTree extracts a layerTree from the layers in the local storage and // relates them to the specified images. -func (r *Runtime) layerTree() (*layerTree, error) { +func (r *Runtime) layerTree(images []*Image) (*layerTree, error) { layers, err := r.store.Layers() if err != nil { return nil, err } - images, err := r.ListImages(context.Background(), nil, nil) - if err != nil { - return nil, err + if images == nil { + images, err = r.ListImages(context.Background(), nil, nil) + if err != nil { + return nil, err + } } tree := layerTree{ diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go index 7cbf9c95eb..95da83bb99 100644 --- a/vendor/github.com/containers/common/libimage/runtime.go +++ b/vendor/github.com/containers/common/libimage/runtime.go @@ -537,6 +537,8 @@ type ListImagesOptions struct { // used). The definition of an external container can be set by // callers. IsExternalContainerFunc IsExternalContainerFunc + // SetListData will populate the Image.ListData fields of returned images. + SetListData bool } // ListImages lists images in the local container storage. If names are @@ -565,7 +567,41 @@ func (r *Runtime) ListImages(ctx context.Context, names []string, options *ListI } } - return r.filterImages(ctx, images, options) + filtered, err := r.filterImages(ctx, images, options) + if err != nil { + return nil, err + } + + if !options.SetListData { + return filtered, nil + } + + // If explicitly requested by the user, pre-compute and cache the + // dangling and parent information of all filtered images. That will + // considerably speed things up for callers who need this information + // as the layer tree will computed once for all instead of once for + // each individual image (see containers/podman/issues/17828). + + tree, err := r.layerTree(images) + if err != nil { + return nil, err + } + + for i := range filtered { + isDangling, err := filtered[i].isDangling(ctx, tree) + if err != nil { + return nil, err + } + filtered[i].ListData.IsDangling = &isDangling + + parent, err := filtered[i].parent(ctx, tree) + if err != nil { + return nil, err + } + filtered[i].ListData.Parent = parent + } + + return filtered, nil } // RemoveImagesOptions allow for customizing image removal. diff --git a/vendor/github.com/containers/common/pkg/netns/netns_linux.go b/vendor/github.com/containers/common/pkg/netns/netns_linux.go index 883fdd9567..f2569d3797 100644 --- a/vendor/github.com/containers/common/pkg/netns/netns_linux.go +++ b/vendor/github.com/containers/common/pkg/netns/netns_linux.go @@ -20,6 +20,7 @@ package netns import ( "crypto/rand" + "errors" "fmt" "os" "path" @@ -51,13 +52,24 @@ func GetNSRunDir() (string, error) { // NewNS creates a new persistent (bind-mounted) network namespace and returns // an object representing that namespace, without switching to it. func NewNS() (ns.NetNS, error) { - b := make([]byte, 16) - _, err := rand.Reader.Read(b) - if err != nil { - return nil, fmt.Errorf("failed to generate random netns name: %v", err) + for i := 0; i < 10000; i++ { + b := make([]byte, 16) + _, err := rand.Reader.Read(b) + if err != nil { + return nil, fmt.Errorf("failed to generate random netns name: %v", err) + } + nsName := fmt.Sprintf("netns-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) + ns, err := NewNSWithName(nsName) + if err == nil { + return ns, nil + } + // retry when the name already exists + if errors.Is(err, os.ErrExist) { + continue + } + return nil, err } - nsName := fmt.Sprintf("netns-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) - return NewNSWithName(nsName) + return nil, errors.New("failed to find free netns path name") } // NewNSWithName creates a new persistent (bind-mounted) network namespace and returns @@ -101,7 +113,7 @@ func NewNSWithName(name string) (ns.NetNS, error) { // create an empty file at the mount point nsPath := path.Join(nsRunDir, name) - mountPointFd, err := os.Create(nsPath) + mountPointFd, err := os.OpenFile(nsPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) if err != nil { return nil, err } @@ -149,18 +161,6 @@ func NewNSWithName(name string) (ns.NetNS, error) { return } - // Put this thread back to the orig ns, since it might get reused (pre go1.10) - defer func() { - if err := origNS.Set(); err != nil { - if unshare.IsRootless() && strings.Contains(err.Error(), "operation not permitted") { - // When running in rootless mode it will fail to re-join - // the network namespace owned by root on the host. - return - } - logrus.Warnf("Unable to reset namespace: %q", err) - } - }() - // bind mount the netns from the current thread (from /proc) onto the // mount point. This causes the namespace to persist, even when there // are no threads in the ns. Make this a shared mount; it needs to be diff --git a/vendor/github.com/containers/image/v5/copy/blob.go b/vendor/github.com/containers/image/v5/copy/blob.go index cfac3e6d58..96674ddbb8 100644 --- a/vendor/github.com/containers/image/v5/copy/blob.go +++ b/vendor/github.com/containers/image/v5/copy/blob.go @@ -104,12 +104,11 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read if !isConfig { options.LayerIndex = &layerIndex } - uploadedInfo, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options) + destBlob, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options) if err != nil { return types.BlobInfo{}, fmt.Errorf("writing blob: %w", err) } - - uploadedInfo.Annotations = stream.info.Annotations + uploadedInfo := updatedBlobInfoFromUpload(stream.info, destBlob) compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations) decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation) @@ -169,3 +168,20 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) { } return n, err } + +// updatedBlobInfoFromUpload returns inputInfo updated with uploadedBlob which was created based on inputInfo. +func updatedBlobInfoFromUpload(inputInfo types.BlobInfo, uploadedBlob private.UploadedBlob) types.BlobInfo { + // The transport is only tasked with dealing with the raw blob, and possibly computing Digest/Size. + // Handling of compression, encryption, and the related MIME types and the like are all the responsibility + // of the generic code in this package. + return types.BlobInfo{ + Digest: uploadedBlob.Digest, + Size: uploadedBlob.Size, + URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. + Annotations: inputInfo.Annotations, + MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression/Crypto. + CompressionOperation: inputInfo.CompressionOperation, // Expected to be unset, and only updated by copyBlobFromStream. + CompressionAlgorithm: inputInfo.CompressionAlgorithm, // Expected to be unset, and only updated by copyBlobFromStream. + CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset, and only updated by copyBlobFromStream. + } +} diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index c7b52ac9e3..3d8491ede0 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -621,7 +621,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob. // Fixing that will probably require passing more information to TryReusingBlob() than the current version of // the ImageDestination interface lets us pass in. - reused, blobInfo, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ + reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ Cache: ic.c.blobInfoCache, CanSubstitute: canSubstitute, EmptyLayer: emptyLayer, @@ -634,7 +634,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to if reused { logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) func() { // A scope for defer - bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: blobInfo.Digest, Size: 0}, "blob", "skipped: already exists") + bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", "skipped: already exists") defer bar.Abort(false) bar.mark100PercentComplete() }() @@ -647,19 +647,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to } } - // If the reused blob has the same digest as the one we asked for, but - // the transport didn't/couldn't supply compression info, fill it in based - // on what we know from the srcInfos we were given. - // If the srcInfos came from LayerInfosForCopy(), then UpdatedImage() will - // call UpdateLayerInfos(), which uses this information to compute the - // MediaType value for the updated layer infos, and it the transport - // didn't pass the information along from its input to its output, then - // it can derive the MediaType incorrectly. - if blobInfo.Digest == srcInfo.Digest && blobInfo.CompressionAlgorithm == nil { - blobInfo.CompressionOperation = srcInfo.CompressionOperation - blobInfo.CompressionAlgorithm = srcInfo.CompressionAlgorithm - } - return blobInfo, cachedDiffID, nil + return updatedBlobInfoFromReuse(srcInfo, reusedBlob), cachedDiffID, nil } } @@ -679,7 +667,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to wrapped: ic.c.rawSource, bar: bar, } - info, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache) + uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache) if err == nil { if srcInfo.Size != -1 { bar.SetRefill(srcInfo.Size - bar.Current()) @@ -687,7 +675,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to bar.mark100PercentComplete() hideProgressBar = false logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) - return true, info + return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob) } logrus.Debugf("Failed to retrieve partial blob: %v", err) return false, types.BlobInfo{} @@ -742,6 +730,32 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to }() } +// updatedBlobInfoFromReuse returns inputInfo updated with reusedBlob which was created based on inputInfo. +func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.ReusedBlob) types.BlobInfo { + // The transport is only tasked with finding the blob, determining its size if necessary, and returning the right + // compression format if the blob was substituted. + // Handling of compression, encryption, and the related MIME types and the like are all the responsibility + // of the generic code in this package. + res := types.BlobInfo{ + Digest: reusedBlob.Digest, + Size: reusedBlob.Size, + URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. + Annotations: inputInfo.Annotations, + MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation. + CompressionOperation: reusedBlob.CompressionOperation, + CompressionAlgorithm: reusedBlob.CompressionAlgorithm, + CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway. + } + // The transport is only expected to fill CompressionOperation and CompressionAlgorithm + // if the blob was substituted; otherwise, fill it in based + // on what we know from the srcInfos we were given. + if reusedBlob.Digest == inputInfo.Digest { + res.CompressionOperation = inputInfo.CompressionOperation + res.CompressionAlgorithm = inputInfo.CompressionAlgorithm + } + return res +} + // copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. // it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, // perhaps (de/re/)compressing the stream, diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index 55b29fe17a..974d23d5fa 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -132,11 +132,11 @@ func (d *dirImageDestination) Close() error { // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob") if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } succeeded := false explicitClosed := false @@ -153,14 +153,14 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io. // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). size, err := io.Copy(blobFile, stream) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } blobDigest := digester.Digest() if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) } if err := blobFile.Sync(); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. @@ -169,7 +169,7 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io. // always fails on Windows. if runtime.GOOS != "windows" { if err := blobFile.Chmod(0644); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } } @@ -178,32 +178,30 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io. blobFile.Close() explicitClosed = true if err := os.Rename(blobFile.Name(), blobPath); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } succeeded = true - return types.BlobInfo{Digest: blobDigest, Size: size}, nil + return private.UploadedBlob{Digest: blobDigest, Size: size}, nil } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { +func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { if info.Digest == "" { - return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with unknown digest") + return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with unknown digest") } blobPath := d.ref.layerPath(info.Digest) finfo, err := os.Stat(blobPath) if err != nil && os.IsNotExist(err) { - return false, types.BlobInfo{}, nil + return false, private.ReusedBlob{}, nil } if err != nil { - return false, types.BlobInfo{}, err + return false, private.ReusedBlob{}, err } - return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil + return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil } // PutManifest writes manifest to the destination. diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 78c81a3df2..44e2aea23d 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -132,8 +132,8 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) { // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { // If requested, precompute the blob digest to prevent uploading layers that already exist on the registry. // This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests, // the source blob is uncompressed, and the destination blob is being compressed "on the fly". @@ -141,7 +141,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref)) streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } defer cleanup() stream = streamCopy @@ -152,10 +152,10 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, options.Cache) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } if haveBlob { - return reusedInfo, nil + return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil } } @@ -164,16 +164,16 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream logrus.Debugf("Uploading %s", uploadPath) res, err := d.c.makeRequest(ctx, http.MethodPost, uploadPath, nil, nil, v2Auth, nil) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } defer res.Body.Close() if res.StatusCode != http.StatusAccepted { logrus.Debugf("Error initiating layer upload, response %#v", *res) - return types.BlobInfo{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res)) + return private.UploadedBlob{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res)) } uploadLocation, err := res.Location() if err != nil { - return types.BlobInfo{}, fmt.Errorf("determining upload URL: %w", err) + return private.UploadedBlob{}, fmt.Errorf("determining upload URL: %w", err) } digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) @@ -201,7 +201,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream return uploadLocation, nil }() if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } blobDigest := digester.Digest() @@ -212,17 +212,17 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream uploadLocation.RawQuery = locationQuery.Encode() res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } defer res.Body.Close() if res.StatusCode != http.StatusCreated { logrus.Debugf("Error uploading layer, response %#v", *res) - return types.BlobInfo{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res)) + return private.UploadedBlob{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res)) } logrus.Debugf("Upload of layer %s complete", blobDigest) options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref)) - return types.BlobInfo{Digest: blobDigest, Size: sizeCounter.size}, nil + return private.UploadedBlob{Digest: blobDigest, Size: sizeCounter.size}, nil } // blobExists returns true iff repo contains a blob with digest, and if so, also its size. @@ -299,34 +299,32 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc // tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified // blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read. // The caller must ensure info.Digest is set. -func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, types.BlobInfo, error) { +func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, private.ReusedBlob, error) { exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil) if err != nil { - return false, types.BlobInfo{}, err + return false, private.ReusedBlob{}, err } if exists { cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref)) - return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil + return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil } - return false, types.BlobInfo{}, nil + return false, private.ReusedBlob{}, nil } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { +func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { if info.Digest == "" { - return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest") + return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") } // First, check whether the blob happens to already exist at the destination. haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache) if err != nil { - return false, types.BlobInfo{}, err + return false, private.ReusedBlob{}, err } if haveBlob { return true, reusedInfo, nil @@ -396,10 +394,14 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, continue } - return true, types.BlobInfo{Digest: candidate.Digest, MediaType: info.MediaType, Size: size, CompressionOperation: compressionOperation, CompressionAlgorithm: compressionAlgorithm}, nil + return true, private.ReusedBlob{ + Digest: candidate.Digest, + Size: size, + CompressionOperation: compressionOperation, + CompressionAlgorithm: compressionAlgorithm}, nil } - return false, types.BlobInfo{}, nil + return false, private.ReusedBlob{}, nil } // PutManifest writes manifest to the destination. diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go index 9a0ea683e6..00e25748bd 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go @@ -76,15 +76,15 @@ func (d *Destination) AddRepoTags(tags []reference.NamedTagged) { // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { // Ouch, we need to stream the blob into a temporary file just to determine the size. // When the layer is decompressed, we also have to generate the digest on uncompressed data. if inputInfo.Size == -1 || inputInfo.Digest == "" { logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.sysCtx, stream, &inputInfo) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } defer cleanup() stream = streamCopy @@ -92,47 +92,45 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, } if err := d.archive.lock(); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } defer d.archive.unlock() // Maybe the blob has been already sent ok, reusedInfo, err := d.archive.tryReusingBlobLocked(inputInfo) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } if ok { - return reusedInfo, nil + return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil } if options.IsConfig { buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) if err != nil { - return types.BlobInfo{}, fmt.Errorf("reading Config file stream: %w", err) + return private.UploadedBlob{}, fmt.Errorf("reading Config file stream: %w", err) } d.config = buf if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil { - return types.BlobInfo{}, fmt.Errorf("writing Config file: %w", err) + return private.UploadedBlob{}, fmt.Errorf("writing Config file: %w", err) } } else { if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } } d.archive.recordBlobLocked(types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}) - return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil + return private.UploadedBlob{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { +func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { if err := d.archive.lock(); err != nil { - return false, types.BlobInfo{}, err + return false, private.ReusedBlob{}, err } defer d.archive.unlock() diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go index b5721fef88..df7b2c0906 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go @@ -13,6 +13,7 @@ import ( "time" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" @@ -69,17 +70,17 @@ func (w *Writer) unlock() { // tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata. // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // The caller must have locked the Writer. -func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) { +func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, private.ReusedBlob, error) { if info.Digest == "" { - return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest") + return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") } if blob, ok := w.blobs[info.Digest]; ok { - return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil + return true, private.ReusedBlob{Digest: info.Digest, Size: blob.Size}, nil } - return false, types.BlobInfo{}, nil + return false, private.ReusedBlob{}, nil } // recordBlob records metadata of a recorded blob, which must contain at least a digest and size. diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go index cff68ac16e..47c169a1f8 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go @@ -43,10 +43,17 @@ func AddCompat(dest private.ImageDestinationInternalOnly) Compat { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - return c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{ + res, err := c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{ Cache: blobinfocache.FromBlobInfoCache(cache), IsConfig: isConfig, }) + if err != nil { + return types.BlobInfo{}, err + } + return types.BlobInfo{ + Digest: res.Digest, + Size: res.Size, + }, nil } // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination @@ -59,10 +66,26 @@ func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (c *Compat) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - return c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{ + reused, blob, err := c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{ Cache: blobinfocache.FromBlobInfoCache(cache), CanSubstitute: canSubstitute, }) + if !reused || err != nil { + return reused, types.BlobInfo{}, err + } + res := types.BlobInfo{ + Digest: blob.Digest, + Size: blob.Size, + CompressionOperation: blob.CompressionOperation, + CompressionAlgorithm: blob.CompressionAlgorithm, + } + // This is probably not necessary; we preserve MediaType to decrease risks of breaking for external callers. + // Some transports were not setting the MediaType field anyway, and others were setting the old value on substitution; + // provide the value in cases where it is likely to be correct. + if blob.Digest == info.Digest { + res.MediaType = info.MediaType + } + return true, res, nil } // PutSignatures writes a set of signatures to the destination. diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go index 225ea4491f..0dc6bd5af7 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go @@ -39,8 +39,8 @@ func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool { // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) { - return types.BlobInfo{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName) +func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { + return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName) } // ImplementsPutBlobPartial implements SupportsPutBlobPartial() that returns true. diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go index 43575ede33..41a81628bd 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -46,20 +46,34 @@ func FromPublic(dest types.ImageDestination) private.ImageDestination { // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { - return w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig) +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { + res, err := w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig) + if err != nil { + return private.UploadedBlob{}, err + } + return private.UploadedBlob{ + Digest: res.Digest, + Size: res.Size, + }, nil } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { - return w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute) +func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + reused, blob, err := w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute) + if !reused || err != nil { + return reused, private.ReusedBlob{}, err + } + return true, private.ReusedBlob{ + Digest: blob.Digest, + Size: blob.Size, + CompressionOperation: blob.CompressionOperation, + CompressionAlgorithm: blob.CompressionAlgorithm, + }, nil } // PutSignaturesWithFormat writes a set of signatures to the destination. diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go index bfd6148cec..b1dd4ceb0d 100644 --- a/vendor/github.com/containers/image/v5/internal/private/private.go +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -7,6 +7,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/signature" + compression "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" ) @@ -46,24 +47,22 @@ type ImageDestinationInternalOnly interface { // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. - // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. - PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (types.BlobInfo, error) + // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. + PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (UploadedBlob, error) // PutBlobPartial attempts to create a blob using the data that is already present // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. - PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) + PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (UploadedBlob, error) // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. - // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may - // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be - // reflected in the manifest that will be written. + // If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. - TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, types.BlobInfo, error) + TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, ReusedBlob, error) // PutSignaturesWithFormat writes a set of signatures to the destination. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for @@ -79,6 +78,13 @@ type ImageDestination interface { ImageDestinationInternalOnly } +// UploadedBlob is information about a blob written to a destination. +// It is the subset of types.BlobInfo fields the transport is responsible for setting; all fields must be provided. +type UploadedBlob struct { + Digest digest.Digest + Size int64 +} + // PutBlobOptions are used in PutBlobWithOptions. type PutBlobOptions struct { Cache blobinfocache.BlobInfoCache2 // Cache to optionally update with the uploaded bloblook up blob infos. @@ -112,6 +118,17 @@ type TryReusingBlobOptions struct { SrcRef reference.Named // A reference to the source image that contains the input blob. } +// ReusedBlob is information about a blob reused in a destination. +// It is the subset of types.BlobInfo fields the transport is responsible for setting. +type ReusedBlob struct { + Digest digest.Digest // Must be provided + Size int64 // Must be provided + // The following compression fields should be set when the reuse substitutes + // a differently-compressed blob. + CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A + CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A +} + // ImageSourceChunk is a portion of a blob. // This API is experimental and can be changed without bumping the major version number. type ImageSourceChunk struct { diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go index f710be10b6..8386c47a3f 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -109,8 +109,8 @@ func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool { // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { return d.unpackedDest.PutBlobWithOptions(ctx, stream, inputInfo, options) } @@ -119,18 +119,16 @@ func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, str // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) { +func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache) } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (d *ociArchiveImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { +func (d *ociArchiveImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { return d.unpackedDest.TryReusingBlobWithOptions(ctx, info, options) } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index 4e4433f12b..0a9e4eab91 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -107,11 +107,11 @@ func (d *ociImageDestination) Close() error { // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob") if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } succeeded := false explicitClosed := false @@ -128,14 +128,14 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io. // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). size, err := io.Copy(blobFile, stream) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } blobDigest := digester.Digest() if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) } if err := blobFile.Sync(); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. @@ -144,52 +144,50 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io. // always fails on Windows. if runtime.GOOS != "windows" { if err := blobFile.Chmod(0644); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } } blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } if err := ensureParentDirectoryExists(blobPath); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } // need to explicitly close the file, since a rename won't otherwise not work on Windows blobFile.Close() explicitClosed = true if err := os.Rename(blobFile.Name(), blobPath); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } succeeded = true - return types.BlobInfo{Digest: blobDigest, Size: size}, nil + return private.UploadedBlob{Digest: blobDigest, Size: size}, nil } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { +func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { if info.Digest == "" { - return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest") + return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") } blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) if err != nil { - return false, types.BlobInfo{}, err + return false, private.ReusedBlob{}, err } finfo, err := os.Stat(blobPath) if err != nil && os.IsNotExist(err) { - return false, types.BlobInfo{}, nil + return false, private.ReusedBlob{}, nil } if err != nil { - return false, types.BlobInfo{}, err + return false, private.ReusedBlob{}, err } - return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil + return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil } // PutManifest writes a manifest to the destination. Per our list of supported manifest MIME types, diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go index 92aec0266c..7b1b5dfcde 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go @@ -116,8 +116,8 @@ func (d *openshiftImageDestination) SupportsPutBlobPartial() bool { // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { return d.docker.PutBlobWithOptions(ctx, stream, inputInfo, options) } @@ -126,18 +126,16 @@ func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stre // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) { +func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache) } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (d *openshiftImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { +func (d *openshiftImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { return d.docker.TryReusingBlobWithOptions(ctx, info, options) } diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go index abfb59003d..48f3ee5a72 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -135,16 +135,16 @@ func (d *ostreeImageDestination) Close() error { // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { +func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob") if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } blobPath := filepath.Join(tmpDir, "content") blobFile, err := os.Create(blobPath) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } defer blobFile.Close() @@ -152,19 +152,19 @@ func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). size, err := io.Copy(blobFile, stream) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } blobDigest := digester.Digest() if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) } if err := blobFile.Sync(); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } hash := blobDigest.Hex() d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath} - return types.BlobInfo{Digest: blobDigest, Size: size}, nil + return private.UploadedBlob{Digest: blobDigest, Size: size}, nil } func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { @@ -334,11 +334,11 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be // reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { +func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { if d.repo == nil { repo, err := openRepo(d.ref.repo) if err != nil { - return false, types.BlobInfo{}, err + return false, private.ReusedBlob{}, err } d.repo = repo } @@ -346,25 +346,25 @@ func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest") if err != nil || !found { - return found, types.BlobInfo{}, err + return found, private.ReusedBlob{}, err } found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size") if err != nil || !found { - return found, types.BlobInfo{}, err + return found, private.ReusedBlob{}, err } found, data, err = readMetadata(d.repo, branch, "docker.size") if err != nil || !found { - return found, types.BlobInfo{}, err + return found, private.ReusedBlob{}, err } size, err := strconv.ParseInt(data, 10, 64) if err != nil { - return false, types.BlobInfo{}, err + return false, private.ReusedBlob{}, err } - return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil + return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil } // PutManifest writes manifest to the destination. diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go index c69eea6e37..a0e353d46f 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go +++ b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go @@ -134,8 +134,8 @@ func (d *blobCacheDestination) HasThreadSafePutBlob() bool { // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *blobCacheDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *blobCacheDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { var tempfile *os.File var err error var n int @@ -227,18 +227,16 @@ func (d *blobCacheDestination) SupportsPutBlobPartial() bool { // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) { +func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { return d.destination.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache) } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (d *blobCacheDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { +func (d *blobCacheDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { present, reusedInfo, err := d.destination.TryReusingBlobWithOptions(ctx, info, options) if err != nil || present { return present, reusedInfo, err @@ -246,7 +244,7 @@ func (d *blobCacheDestination) TryReusingBlobWithOptions(ctx context.Context, in blobPath, _, isConfig, err := d.reference.findBlob(info) if err != nil { - return false, types.BlobInfo{}, err + return false, private.ReusedBlob{}, err } if blobPath != "" { f, err := os.Open(blobPath) @@ -259,13 +257,13 @@ func (d *blobCacheDestination) TryReusingBlobWithOptions(ctx context.Context, in LayerIndex: options.LayerIndex, }) if err != nil { - return false, types.BlobInfo{}, err + return false, private.ReusedBlob{}, err } - return true, uploadedInfo, nil + return true, private.ReusedBlob{Digest: uploadedInfo.Digest, Size: uploadedInfo.Size}, nil } } - return false, types.BlobInfo{}, nil + return false, private.ReusedBlob{}, nil } func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte, instanceDigest *digest.Digest) error { diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go index d84d494933..576d510cc5 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_dest.go +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -77,13 +77,19 @@ type storageImageDestination struct { indexToStorageID map[int]*string // All accesses to below data are protected by `lock` which is made // *explicit* in the code. - blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs - fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes - filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them - currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) - indexToPulledLayerInfo map[int]*manifest.LayerInfo // Mapping from layer (by index) to pulled down blob - blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer - diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes + filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them + currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) + indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image + blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer + diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output +} + +// addedLayerInfo records data about a layer to use in this image. +type addedLayerInfo struct { + digest digest.Digest + emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. } // newImageDestination sets us up to write a new image, caching blobs in a temporary directory until @@ -111,18 +117,18 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (* HasThreadSafePutBlob: true, }), - imageRef: imageRef, - directory: directory, - signatureses: make(map[digest.Digest][]byte), - blobDiffIDs: make(map[digest.Digest]digest.Digest), - blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer), - fileSizes: make(map[digest.Digest]int64), - filenames: make(map[digest.Digest]string), - SignatureSizes: []int{}, - SignaturesSizes: make(map[digest.Digest][]int), - indexToStorageID: make(map[int]*string), - indexToPulledLayerInfo: make(map[int]*manifest.LayerInfo), - diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput), + imageRef: imageRef, + directory: directory, + signatureses: make(map[digest.Digest][]byte), + blobDiffIDs: make(map[digest.Digest]digest.Digest), + blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer), + fileSizes: make(map[digest.Digest]int64), + filenames: make(map[digest.Digest]string), + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + indexToStorageID: make(map[int]*string), + indexToAddedLayerInfo: make(map[int]addedLayerInfo), + diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput), } dest.Compat = impl.AddCompat(dest) return dest, nil @@ -158,7 +164,7 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string { // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { +func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { info, err := s.putBlobToPendingFile(stream, blobinfo, &options) if err != nil { return info, err @@ -168,21 +174,20 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream return info, nil } - return info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) + return info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{ + digest: info.Digest, + emptyLayer: options.EmptyLayer, + }) } // putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file. // The caller must arrange the blob to be eventually committed using s.commitLayer(). -func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) { +func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (private.UploadedBlob, error) { // Stores a layer or data blob in our temporary directory, checking that any information // in the blobinfo matches the incoming data. - errorBlobInfo := types.BlobInfo{ - Digest: "", - Size: -1, - } if blobinfo.Digest != "" { if err := blobinfo.Digest.Validate(); err != nil { - return errorBlobInfo, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err) + return private.UploadedBlob{}, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err) } } @@ -190,7 +195,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf filename := s.computeNextBlobCacheFile() file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) if err != nil { - return errorBlobInfo, fmt.Errorf("creating temporary file %q: %w", filename, err) + return private.UploadedBlob{}, fmt.Errorf("creating temporary file %q: %w", filename, err) } defer file.Close() counter := ioutils.NewWriteCounter(file) @@ -198,7 +203,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo) decompressed, err := archive.DecompressStream(stream) if err != nil { - return errorBlobInfo, fmt.Errorf("setting up to decompress blob: %w", err) + return private.UploadedBlob{}, fmt.Errorf("setting up to decompress blob: %w", err) } diffID := digest.Canonical.Digester() @@ -207,7 +212,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf _, err = io.Copy(diffID.Hash(), decompressed) decompressed.Close() if err != nil { - return errorBlobInfo, fmt.Errorf("storing blob to file %q: %w", filename, err) + return private.UploadedBlob{}, fmt.Errorf("storing blob to file %q: %w", filename, err) } // Determine blob properties, and fail if information that we were given about the blob @@ -217,7 +222,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf if blobSize < 0 { blobSize = counter.Count } else if blobinfo.Size != counter.Count { - return errorBlobInfo, ErrBlobSizeMismatch + return private.UploadedBlob{}, ErrBlobSizeMismatch } // Record information about the blob. @@ -229,10 +234,9 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf // This is safe because we have just computed diffID, and blobDigest was either computed // by us, or validated by the caller (usually copy.digestingReader). options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) - return types.BlobInfo{ - Digest: blobDigest, - Size: blobSize, - MediaType: blobinfo.MediaType, + return private.UploadedBlob{ + Digest: blobDigest, + Size: blobSize, }, nil } @@ -265,7 +269,7 @@ func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.Read // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) { +func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { fetcher := zstdFetcher{ chunkAccessor: chunkAccessor, ctx: ctx, @@ -274,12 +278,12 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher) if err != nil { - return srcInfo, err + return private.UploadedBlob{}, err } out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ) if err != nil { - return srcInfo, err + return private.UploadedBlob{}, err } blobDigest := srcInfo.Digest @@ -291,124 +295,126 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces s.diffOutputs[blobDigest] = out s.lock.Unlock() - return srcInfo, nil + return private.UploadedBlob{ + Digest: blobDigest, + Size: srcInfo.Size, + }, nil } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { - reused, info, err := s.tryReusingBlobAsPending(blobinfo, &options) +func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + reused, info, err := s.tryReusingBlobAsPending(blobinfo.Digest, blobinfo.Size, &options) if err != nil || !reused || options.LayerIndex == nil { return reused, info, err } - return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) + return reused, info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{ + digest: info.Digest, + emptyLayer: options.EmptyLayer, + }) } -// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata. +// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.blobDiffIDs and other metadata. // The caller must arrange the blob to be eventually committed using s.commitLayer(). -func (s *storageImageDestination) tryReusingBlobAsPending(blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { +func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { // lock the entire method as it executes fairly quickly s.lock.Lock() defer s.lock.Unlock() if options.SrcRef != nil { // Check if we have the layer in the underlying additional layer store. - aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, options.SrcRef.String()) + aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(digest, options.SrcRef.String()) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { - return false, types.BlobInfo{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobinfo.Digest, err) + return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, digest, err) } else if err == nil { // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest() - s.blobAdditionalLayer[blobinfo.Digest] = aLayer - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: aLayer.CompressedSize(), - MediaType: blobinfo.MediaType, + s.blobDiffIDs[digest] = aLayer.UncompressedDigest() + s.blobAdditionalLayer[digest] = aLayer + return true, private.ReusedBlob{ + Digest: digest, + Size: aLayer.CompressedSize(), }, nil } } - if blobinfo.Digest == "" { - return false, types.BlobInfo{}, errors.New(`Can not check for a blob with unknown digest`) + if digest == "" { + return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`) } - if err := blobinfo.Digest.Validate(); err != nil { - return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err) + if err := digest.Validate(); err != nil { + return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err) } // Check if we've already cached it in a file. - if size, ok := s.fileSizes[blobinfo.Digest]; ok { - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: size, - MediaType: blobinfo.MediaType, + if size, ok := s.fileSizes[digest]; ok { + return true, private.ReusedBlob{ + Digest: digest, + Size: size, }, nil } // Check if we have a wasn't-compressed layer in storage that's based on that blob. - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(digest) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { - return false, types.BlobInfo{}, fmt.Errorf(`looking for layers with digest %q: %w`, blobinfo.Digest, err) + return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, digest, err) } if len(layers) > 0 { // Save this for completeness. - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: layers[0].UncompressedSize, - MediaType: blobinfo.MediaType, + s.blobDiffIDs[digest] = layers[0].UncompressedDigest + return true, private.ReusedBlob{ + Digest: digest, + Size: layers[0].UncompressedSize, }, nil } // Check if we have a was-compressed layer in storage that's based on that blob. - layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) + layers, err = s.imageRef.transport.store.LayersByCompressedDigest(digest) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { - return false, types.BlobInfo{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, blobinfo.Digest, err) + return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, digest, err) } if len(layers) > 0 { // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: layers[0].CompressedSize, - MediaType: blobinfo.MediaType, + s.blobDiffIDs[digest] = layers[0].UncompressedDigest + return true, private.ReusedBlob{ + Digest: digest, + Size: layers[0].CompressedSize, }, nil } // Does the blob correspond to a known DiffID which we already have available? // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size. - if options.CanSubstitute || blobinfo.Size != -1 { - if uncompressedDigest := options.Cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { + if options.CanSubstitute || size != -1 { + if uncompressedDigest := options.Cache.UncompressedDigest(digest); uncompressedDigest != "" && uncompressedDigest != digest { layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { - return false, types.BlobInfo{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err) + return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err) } if len(layers) > 0 { - if blobinfo.Size != -1 { - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, blobinfo, nil + if size != -1 { + s.blobDiffIDs[digest] = layers[0].UncompressedDigest + return true, private.ReusedBlob{ + Digest: digest, + Size: size, + }, nil } if !options.CanSubstitute { - return false, types.BlobInfo{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blobInfo %v", blobinfo) + return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", digest) } s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest - return true, types.BlobInfo{ - Digest: uncompressedDigest, - Size: layers[0].UncompressedSize, - MediaType: blobinfo.MediaType, + return true, private.ReusedBlob{ + Digest: uncompressedDigest, + Size: layers[0].UncompressedSize, }, nil } } } // Nope, we don't have it. - return false, types.BlobInfo{}, nil + return false, private.ReusedBlob{}, nil } // computeID computes a recommended image ID based on information we have so far. If @@ -470,10 +476,10 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er return nil, errors.New("blob not found") } -// queueOrCommit queues in the specified blob to be committed to the storage. +// queueOrCommit queues the specified layer to be committed to the storage. // If no other goroutine is already committing layers, the layer and all // subsequent layers (if already queued) will be committed to the storage. -func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.BlobInfo, index int, emptyLayer bool) error { +func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) error { // NOTE: whenever the code below is touched, make sure that all code // paths unlock the lock and to unlock it exactly once. // @@ -493,10 +499,7 @@ func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types. // caller is the "worker" routine committing layers. All other routines // can continue pulling and queuing in layers. s.lock.Lock() - s.indexToPulledLayerInfo[index] = &manifest.LayerInfo{ - BlobInfo: blob, - EmptyLayer: emptyLayer, - } + s.indexToAddedLayerInfo[index] = info // We're still waiting for at least one previous/parent layer to be // committed, so there's nothing to do. @@ -505,10 +508,14 @@ func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types. return nil } - for info := s.indexToPulledLayerInfo[index]; info != nil; info = s.indexToPulledLayerInfo[index] { + for { + info, ok := s.indexToAddedLayerInfo[index] + if !ok { + break + } s.lock.Unlock() // Note: commitLayer locks on-demand. - if err := s.commitLayer(ctx, *info, index); err != nil { + if err := s.commitLayer(index, info, -1); err != nil { return err } s.lock.Lock() @@ -522,13 +529,15 @@ func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types. return nil } -// commitLayer commits the specified blob with the given index to the storage. +// commitLayer commits the specified layer with the given index to the storage. +// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs. +// // Note that the previous layer is expected to already be committed. // // Caution: this function must be called without holding `s.lock`. Callers // must guarantee that, at any given time, at most one goroutine may execute // `commitLayer()`. -func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest.LayerInfo, index int) error { +func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) error { // Already committed? Return early. if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted { return nil @@ -543,7 +552,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest } // Carry over the previous ID for empty non-base layers. - if blob.EmptyLayer { + if info.emptyLayer { s.indexToStorageID[index] = &lastLayer return nil } @@ -551,7 +560,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest // Check if there's already a layer with the ID that we'd give to the result of applying // this layer blob to its parent, if it has one, or the blob's hex value otherwise. s.lock.Lock() - diffID, haveDiffID := s.blobDiffIDs[blob.Digest] + diffID, haveDiffID := s.blobDiffIDs[info.digest] s.lock.Unlock() if !haveDiffID { // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), @@ -560,18 +569,21 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest // that relies on using a blob digest that has never been seen by the store had better call // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only // so far we are going to accommodate that (if we should be doing that at all). - logrus.Debugf("looking for diffID for blob %+v", blob.Digest) - // NOTE: use `TryReusingBlob` to prevent recursion. - has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false) + logrus.Debugf("looking for diffID for blob %+v", info.digest) + // Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit. + has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{ + Cache: none.NoCache, + CanSubstitute: false, + }) if err != nil { - return fmt.Errorf("checking for a layer based on blob %q: %w", blob.Digest.String(), err) + return fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err) } if !has { - return fmt.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) + return fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String()) } - diffID, haveDiffID = s.blobDiffIDs[blob.Digest] + diffID, haveDiffID = s.blobDiffIDs[info.digest] if !haveDiffID { - return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) + return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", info.digest.String()) } } id := diffID.Hex() @@ -586,7 +598,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest } s.lock.Lock() - diffOutput, ok := s.diffOutputs[blob.Digest] + diffOutput, ok := s.diffOutputs[info.digest] s.lock.Unlock() if ok { layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil) @@ -595,7 +607,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest } // FIXME: what to do with the uncompressed digest? - diffOutput.UncompressedDigest = blob.Digest + diffOutput.UncompressedDigest = info.digest if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil { _ = s.imageRef.transport.store.Delete(layer.ID) @@ -607,7 +619,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest } s.lock.Lock() - al, ok := s.blobAdditionalLayer[blob.Digest] + al, ok := s.blobAdditionalLayer[info.digest] s.lock.Unlock() if ok { layer, err := al.PutAs(id, lastLayer, nil) @@ -622,7 +634,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest // Check if we previously cached a file with that blob's contents. If we didn't, // then we need to read the desired contents from a layer. s.lock.Lock() - filename, ok := s.filenames[blob.Digest] + filename, ok := s.filenames[info.digest] s.lock.Unlock() if !ok { // Try to find the layer with contents matching that blobsum. @@ -631,13 +643,13 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest if err2 == nil && len(layers) > 0 { layer = layers[0].ID } else { - layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) + layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(info.digest) if err2 == nil && len(layers) > 0 { layer = layers[0].ID } } if layer == "" { - return fmt.Errorf("locating layer for blob %q: %w", blob.Digest, err2) + return fmt.Errorf("locating layer for blob %q: %w", info.digest, err2) } // Read the layer's contents. noCompression := archive.Uncompressed @@ -646,7 +658,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest } diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) if err2 != nil { - return fmt.Errorf("reading layer %q for blob %q: %w", layer, blob.Digest, err2) + return fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2) } // Copy the layer diff to a file. Diff() takes a lock that it holds // until the ReadCloser that it returns is closed, and PutLayer() wants @@ -670,7 +682,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest // Make sure that we can find this file later, should we need the layer's // contents again. s.lock.Lock() - s.filenames[blob.Digest] = filename + s.filenames[info.digest] = filename s.lock.Unlock() } // Read the cached blob and use it as a diff. @@ -682,11 +694,11 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest // Build the new layer using the diff, regardless of where it came from. // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{ - OriginalDigest: blob.Digest, + OriginalDigest: info.digest, UncompressedDigest: diffID, }, file) if err != nil && !errors.Is(err, storage.ErrDuplicateID) { - return fmt.Errorf("adding layer with blob %q: %w", blob.Digest, err) + return fmt.Errorf("adding layer with blob %q: %w", info.digest, err) } s.indexToStorageID[index] = &layer.ID @@ -737,7 +749,10 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t // Extract, commit, or find the layers. for i, blob := range layerBlobs { - if err := s.commitLayer(ctx, blob, i); err != nil { + if err := s.commitLayer(i, addedLayerInfo{ + digest: blob.Digest, + emptyLayer: blob.EmptyLayer, + }, blob.Size); err != nil { return err } } diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go index a15e91de26..2f6c7f28f8 100644 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go @@ -32,3 +32,7 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error { func (q *Control) GetQuota(targetPath string, quota *Quota) error { return errors.New("filesystem does not support, or has not enabled quotas") } + +// ClearQuota removes the map entry in the quotas map for targetPath. +// It does so to prevent the map leaking entries as directories are deleted. +func (q *Control) ClearQuota(targetPath string) {} diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml index d36b25665c..be4899cb12 100644 --- a/vendor/github.com/go-openapi/strfmt/.golangci.yml +++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -14,31 +14,40 @@ linters-settings: min-occurrences: 4 linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoinits - - gochecknoglobals - - godox - - gocognit - - whitespace - - wsl - - funlen - - wrapcheck - - testpackage - - nlreturn - - gofumpt - - goerr113 - - gci - - gomnd - - godot - - exhaustivestruct - - paralleltest - - varnamelen - - ireturn - - exhaustruct - #- thelper + enable: + - revive + - goimports + - gosec + - unparam + - unconvert + - predeclared + - prealloc + - misspell + + # disable: + # - maligned + # - lll + # - gochecknoinits + # - gochecknoglobals + # - godox + # - gocognit + # - whitespace + # - wsl + # - funlen + # - wrapcheck + # - testpackage + # - nlreturn + # - gofumpt + # - goerr113 + # - gci + # - gomnd + # - godot + # - exhaustivestruct + # - paralleltest + # - varnamelen + # - ireturn + # - exhaustruct + # #- thelper issues: exclude-rules: diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go index f0b310964d..3c93381c7c 100644 --- a/vendor/github.com/go-openapi/strfmt/date.go +++ b/vendor/github.com/go-openapi/strfmt/date.go @@ -57,7 +57,7 @@ func (d *Date) UnmarshalText(text []byte) error { if len(text) == 0 { return nil } - dd, err := time.Parse(RFC3339FullDate, string(text)) + dd, err := time.ParseInLocation(RFC3339FullDate, string(text), DefaultTimeLocation) if err != nil { return err } @@ -107,7 +107,7 @@ func (d *Date) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &strdate); err != nil { return err } - tt, err := time.Parse(RFC3339FullDate, strdate) + tt, err := time.ParseInLocation(RFC3339FullDate, strdate, DefaultTimeLocation) if err != nil { return err } @@ -126,7 +126,7 @@ func (d *Date) UnmarshalBSON(data []byte) error { } if data, ok := m["data"].(string); ok { - rd, err := time.Parse(RFC3339FullDate, data) + rd, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) if err != nil { return err } diff --git a/vendor/github.com/go-openapi/strfmt/format.go b/vendor/github.com/go-openapi/strfmt/format.go index 172055d22d..ad3b3c355b 100644 --- a/vendor/github.com/go-openapi/strfmt/format.go +++ b/vendor/github.com/go-openapi/strfmt/format.go @@ -109,7 +109,7 @@ func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { // if to == tpe { switch v.Name { case "date": - d, err := time.Parse(RFC3339FullDate, data) + d, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) if err != nil { return nil, err } diff --git a/vendor/github.com/go-openapi/strfmt/time.go b/vendor/github.com/go-openapi/strfmt/time.go index 023676e6c9..6f5a44bb7d 100644 --- a/vendor/github.com/go-openapi/strfmt/time.go +++ b/vendor/github.com/go-openapi/strfmt/time.go @@ -29,6 +29,12 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" ) +var ( + // UnixZero sets the zero unix timestamp we want to compare against. + // Unix 0 for an EST timezone is not equivalent to a UTC timezone. + UnixZero = time.Unix(0, 0).UTC() +) + func init() { dt := DateTime{} Default.Add("datetime", &dt, IsDateTime) @@ -86,6 +92,9 @@ var ( // NormalizeTimeForMarshal provides a normalization function on time befeore marshalling (e.g. time.UTC). // By default, the time value is not changed. NormalizeTimeForMarshal = func(t time.Time) time.Time { return t } + + // DefaultTimeLocation provides a location for a time when the time zone is not encoded in the string (ex: ISO8601 Local variants). + DefaultTimeLocation = time.UTC ) // ParseDateTime parses a string that represents an ISO8601 time or a unix epoch @@ -95,7 +104,7 @@ func ParseDateTime(data string) (DateTime, error) { } var lastError error for _, layout := range DateTimeFormats { - dd, err := time.Parse(layout, data) + dd, err := time.ParseInLocation(layout, data, DefaultTimeLocation) if err != nil { lastError = err continue @@ -123,6 +132,16 @@ func (t DateTime) String() string { return NormalizeTimeForMarshal(time.Time(t)).Format(MarshalFormat) } +// IsZero returns whether the date time is a zero value +func (t DateTime) IsZero() bool { + return time.Time(t).IsZero() +} + +// IsUnixZerom returns whether the date time is equivalent to time.Unix(0, 0).UTC(). +func (t DateTime) IsUnixZero() bool { + return time.Time(t) == UnixZero +} + // MarshalText implements the text marshaller interface func (t DateTime) MarshalText() ([]byte, error) { return []byte(t.String()), nil diff --git a/vendor/github.com/go-openapi/strfmt/ulid.go b/vendor/github.com/go-openapi/strfmt/ulid.go index 4bd2ccd8f6..e71aff7c36 100644 --- a/vendor/github.com/go-openapi/strfmt/ulid.go +++ b/vendor/github.com/go-openapi/strfmt/ulid.go @@ -15,9 +15,12 @@ import ( // ULID represents a ulid string format // ref: -// https://github.com/ulid/spec +// +// https://github.com/ulid/spec +// // impl: -// https://github.com/oklog/ulid +// +// https://github.com/oklog/ulid // // swagger:strfmt ulid type ULID struct { @@ -89,7 +92,9 @@ func NewULIDZero() ULID { } // NewULID generates new unique ULID value and a error if any -func NewULID() (u ULID, err error) { +func NewULID() (ULID, error) { + var u ULID + obj := ulidEntropyPool.Get() entropy, ok := obj.(io.Reader) if !ok { diff --git a/vendor/github.com/imdario/mergo/CONTRIBUTING.md b/vendor/github.com/imdario/mergo/CONTRIBUTING.md new file mode 100644 index 0000000000..0a1ff9f94d --- /dev/null +++ b/vendor/github.com/imdario/mergo/CONTRIBUTING.md @@ -0,0 +1,112 @@ + +# Contributing to mergo + +First off, thanks for taking the time to contribute! ❤️ + +All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 + +> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: +> - Star the project +> - Tweet about it +> - Refer this project in your project's readme +> - Mention the project at local meetups and tell your friends/colleagues + + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [I Have a Question](#i-have-a-question) +- [I Want To Contribute](#i-want-to-contribute) +- [Reporting Bugs](#reporting-bugs) +- [Suggesting Enhancements](#suggesting-enhancements) + +## Code of Conduct + +This project and everyone participating in it is governed by the +[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). +By participating, you are expected to uphold this code. Please report unacceptable behavior +to <>. + + +## I Have a Question + +> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). + +Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. + +If you then still feel the need to ask a question and need clarification, we recommend the following: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). +- Provide as much context as you can about what you're running into. +- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. + +We will then take care of the issue as soon as possible. + +## I Want To Contribute + +> ### Legal Notice +> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. + +### Reporting Bugs + + +#### Before Submitting a Bug Report + +A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. + +- Make sure that you are using the latest version. +- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). +- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). +- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. +- Collect information about the bug: +- Stack trace (Traceback) +- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) +- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. +- Possibly your input and the output +- Can you reliably reproduce the issue? And can you also reproduce it with older versions? + + +#### How Do I Submit a Good Bug Report? + +> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . + + +We use GitHub issues to track bugs and errors. If you run into an issue with the project: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) +- Explain the behavior you would expect and the actual behavior. +- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. +- Provide the information you collected in the previous section. + +Once it's filed: + +- The project team will label the issue accordingly. +- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. +- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. + +### Suggesting Enhancements + +This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. + + +#### Before Submitting an Enhancement + +- Make sure that you are using the latest version. +- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. +- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. +- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. + + +#### How Do I Submit a Good Enhancement Suggestion? + +Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). + +- Use a **clear and descriptive title** for the issue to identify the suggestion. +- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. +- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. +- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. +- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. + + +## Attribution +This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index 7e6f7aeee8..4f02874985 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -1,6 +1,5 @@ # Mergo - [![GoDoc][3]][4] [![GitHub release][5]][6] [![GoCard][7]][8] @@ -9,6 +8,7 @@ [![Sourcegraph][11]][12] [![FOSSA Status][13]][14] [![Become my sponsor][15]][16] +[![Tidelift][17]][18] [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo @@ -26,6 +26,8 @@ [14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield [15]: https://img.shields.io/github/sponsors/imdario [16]: https://github.com/sponsors/imdario +[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo +[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. @@ -55,7 +57,6 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont ### Mergo in the wild -- [cli/cli](https://github.com/cli/cli) - [moby/moby](https://github.com/moby/moby) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - [vmware/dispatch](https://github.com/vmware/dispatch) diff --git a/vendor/github.com/imdario/mergo/SECURITY.md b/vendor/github.com/imdario/mergo/SECURITY.md new file mode 100644 index 0000000000..a5de61f77b --- /dev/null +++ b/vendor/github.com/imdario/mergo/SECURITY.md @@ -0,0 +1,14 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.3.x | :white_check_mark: | +| < 0.3 | :x: | + +## Security contact information + +To report a security vulnerability, please use the +[Tidelift security contact](https://tidelift.com/security). +Tidelift will coordinate the fix and disclosure. diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index a13a7ee46c..b50d5c2a4e 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -44,7 +44,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } } // Remember, remember... - visited[h] = &visit{addr, typ, seen} + visited[h] = &visit{typ, seen, addr} } zeroValue := reflect.Value{} switch dst.Kind() { @@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } fieldName := field.Name fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { dstMap[fieldName] = src.Field(i).Interface() } } @@ -142,7 +142,7 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { func _map(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument + return ErrNonPointerArgument } var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 8b4e2f47a0..0ef9b2138c 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -38,10 +38,11 @@ func isExportedComponent(field *reflect.StructField) bool { } type Config struct { + Transformers Transformers Overwrite bool + ShouldNotDereference bool AppendSlice bool TypeCheck bool - Transformers Transformers overwriteWithEmptyValue bool overwriteSliceWithEmptyValue bool sliceDeepCopy bool @@ -76,7 +77,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } // Remember, remember... - visited[h] = &visit{addr, typ, seen} + visited[h] = &visit{typ, seen, addr} } if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { @@ -95,7 +96,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { dst.Set(src) } } @@ -110,7 +111,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } if src.Kind() != reflect.Map { - if overwrite { + if overwrite && dst.CanSet() { dst.Set(src) } return @@ -162,7 +163,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dstSlice = reflect.ValueOf(dstElement.Interface()) } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { if typeCheck && srcSlice.Type() != dstSlice.Type() { return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) } @@ -194,22 +195,38 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dst.SetMapIndex(key, dstSlice) } } - if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { - continue + + if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { + continue + } + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { + continue + } } - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } dst.SetMapIndex(key, srcElement) } } + + // Ensure that all keys in dst are deleted if they are not in src. + if overwriteWithEmptySrc { + for _, key := range dst.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + dst.SetMapIndex(key, reflect.Value{}) + } + } + } case reflect.Slice: if !dst.CanSet() { break } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { dst.Set(src) } else if config.AppendSlice { if src.Type() != dst.Type() { @@ -244,12 +261,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if src.Kind() != reflect.Interface { if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { dst.Set(src) } } else if src.Kind() == reflect.Ptr { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return + if !config.ShouldNotDereference { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else { + if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { + dst.Set(src) + } } } else if dst.Elem().Type() == src.Type() { if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { @@ -262,7 +285,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { dst.Set(src) } break @@ -275,7 +298,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co break } default: - mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) + mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) if mustSet { if dst.CanSet() { dst.Set(src) @@ -326,6 +349,12 @@ func WithOverrideEmptySlice(config *Config) { config.overwriteSliceWithEmptyValue = true } +// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty +// (i.e. a non-nil pointer is never considered empty). +func WithoutDereference(config *Config) { + config.ShouldNotDereference = true +} + // WithAppendSlice will make merge append slices instead of overwriting it. func WithAppendSlice(config *Config) { config.AppendSlice = true @@ -344,7 +373,7 @@ func WithSliceDeepCopy(config *Config) { func merge(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument + return ErrNonPointerArgument } var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index 9fe362d476..0a721e2d85 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -20,7 +20,7 @@ var ( ErrNotSupported = errors.New("only structs, maps, and slices are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerAgument = errors.New("dst must be a pointer") + ErrNonPointerArgument = errors.New("dst must be a pointer") ) // During deepMerge, must keep track of checks that are @@ -28,13 +28,13 @@ var ( // checks in progress are true when it reencounters them. // Visited are stored in a map indexed by 17 * a1 + a2; type visit struct { - ptr uintptr typ reflect.Type next *visit + ptr uintptr } // From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value) bool { +func isEmptyValue(v reflect.Value, shouldDereference bool) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 @@ -50,7 +50,10 @@ func isEmptyValue(v reflect.Value) bool { if v.IsNil() { return true } - return isEmptyValue(v.Elem()) + if shouldDereference { + return isEmptyValue(v.Elem(), shouldDereference) + } + return false case reflect.Func: return v.IsNil() case reflect.Invalid: diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go index 4d6cbb64d7..2c15eeab74 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go @@ -251,7 +251,7 @@ func createContainer(rw ReadWriter, co createOpts) (*FileImage, error) { // By default, the image ID is set to a randomly generated value. To override this, consider using // OptCreateDeterministic or OptCreateWithID. // -// By default, the image creation time is set to time.Now(). To override this, consider using +// By default, the image creation time is set to the current time. To override this, consider using // OptCreateDeterministic or OptCreateWithTime. // // By default, the image will support a maximum of 48 descriptors. To change this, consider using @@ -296,7 +296,7 @@ func CreateContainer(rw ReadWriter, opts ...CreateOpt) (*FileImage, error) { // By default, the image ID is set to a randomly generated value. To override this, consider using // OptCreateDeterministic or OptCreateWithID. // -// By default, the image creation time is set to time.Now(). To override this, consider using +// By default, the image creation time is set to the current time. To override this, consider using // OptCreateDeterministic or OptCreateWithTime. // // By default, the image will support a maximum of 48 descriptors. To change this, consider using @@ -393,11 +393,13 @@ func OptAddWithTime(t time.Time) AddOpt { // AddObject adds a new data object and its descriptor into the specified SIF file. // -// By default, the image modification time is set to the current time. To override this, consider -// using OptAddDeterministic or OptAddWithTime. +// By default, the image modification time is set to the current time for non-deterministic images, +// and unset otherwise. To override this, consider using OptAddDeterministic or OptAddWithTime. func (f *FileImage) AddObject(di DescriptorInput, opts ...AddOpt) error { - ao := addOpts{ - t: time.Now(), + ao := addOpts{} + + if !f.isDeterministic() { + ao.t = time.Now() } for _, opt := range opts { @@ -449,11 +451,7 @@ func (f *FileImage) isLast(d *rawDescriptor) bool { func (f *FileImage) truncateAt(d *rawDescriptor) error { start := d.Offset + d.Size - d.SizeWithPadding - if err := f.rw.Truncate(start); err != nil { - return err - } - - return nil + return f.rw.Truncate(start) } // deleteOpts accumulates object deletion options. @@ -506,11 +504,14 @@ var errCompactNotImplemented = errors.New("compact not implemented for non-last // To zero the data region of the deleted object, use OptDeleteZero. To compact the file following // object deletion, use OptDeleteCompact. // -// By default, the image modification time is set to time.Now(). To override this, consider using -// OptDeleteDeterministic or OptDeleteWithTime. +// By default, the image modification time is set to the current time for non-deterministic images, +// and unset otherwise. To override this, consider using OptDeleteDeterministic or +// OptDeleteWithTime. func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error { - do := deleteOpts{ - t: time.Now(), + do := deleteOpts{} + + if !f.isDeterministic() { + do.t = time.Now() } for _, opt := range opts { @@ -596,11 +597,14 @@ var ( // SetPrimPart sets the specified system partition to be the primary one. // -// By default, the image/object modification times are set to time.Now(). To override this, -// consider using OptSetDeterministic or OptSetWithTime. +// By default, the image/object modification times are set to the current time for +// non-deterministic images, and unset otherwise. To override this, consider using +// OptSetDeterministic or OptSetWithTime. func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error { - so := setOpts{ - t: time.Now(), + so := setOpts{} + + if !f.isDeterministic() { + so.t = time.Now() } for _, opt := range opts { diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go index 2d1c2091dc..74ff10078b 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2022, Sylabs Inc. All rights reserved. +// Copyright (c) 2018-2023, Sylabs Inc. All rights reserved. // Copyright (c) 2017, SingularityWare, LLC. All rights reserved. // Copyright (c) 2017, Yannick Cote All rights reserved. // This software is licensed under a 3-clause BSD license. Please consult the @@ -402,3 +402,9 @@ func (f *FileImage) DataSize() int64 { return f.h.DataSize } func (f *FileImage) GetHeaderIntegrityReader() io.Reader { return f.h.GetIntegrityReader() } + +// isDeterministic returns true if the UUID and timestamps in the header of f are set to +// deterministic values. +func (f *FileImage) isDeterministic() bool { + return f.h.ID == uuid.Nil && f.CreatedAt().IsZero() && f.ModifiedAt().IsZero() +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 390c253c7d..f399eef4e0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -123,7 +123,7 @@ github.com/containers/buildah/pkg/rusage github.com/containers/buildah/pkg/sshagent github.com/containers/buildah/pkg/util github.com/containers/buildah/util -# github.com/containers/common v0.51.1-0.20230323135459-03a2cc01973c +# github.com/containers/common v0.51.1-0.20230329113838-e27c30ee9b1b ## explicit; go 1.18 github.com/containers/common/libimage github.com/containers/common/libimage/define @@ -177,7 +177,7 @@ github.com/containers/common/version # github.com/containers/conmon v2.0.20+incompatible ## explicit github.com/containers/conmon/runner/config -# github.com/containers/image/v5 v5.24.3-0.20230314083015-0c6d07e02a9a +# github.com/containers/image/v5 v5.24.3-0.20230324204529-08b04b816eb8 ## explicit; go 1.18 github.com/containers/image/v5/copy github.com/containers/image/v5/directory @@ -284,7 +284,7 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.45.5-0.20230315220505-1c6287eea927 +# github.com/containers/storage v1.45.5-0.20230326103843-b1216421c44b ## explicit; go 1.17 github.com/containers/storage github.com/containers/storage/drivers @@ -486,7 +486,7 @@ github.com/go-openapi/runtime/yamlpc # github.com/go-openapi/spec v0.20.7 ## explicit; go 1.13 github.com/go-openapi/spec -# github.com/go-openapi/strfmt v0.21.3 +# github.com/go-openapi/strfmt v0.21.5 ## explicit; go 1.13 github.com/go-openapi/strfmt # github.com/go-openapi/swag v0.22.3 @@ -573,7 +573,7 @@ github.com/hashicorp/go-multierror # github.com/hashicorp/go-retryablehttp v0.7.2 ## explicit; go 1.13 github.com/hashicorp/go-retryablehttp -# github.com/imdario/mergo v0.3.13 +# github.com/imdario/mergo v0.3.15 ## explicit; go 1.13 github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.0.1 @@ -841,7 +841,7 @@ github.com/stefanberger/go-pkcs11uri ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/require -# github.com/sylabs/sif/v2 v2.11.0 +# github.com/sylabs/sif/v2 v2.11.1 ## explicit; go 1.19 github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 From 7f8d5e56545b380474de3789051071ac82c0e06a Mon Sep 17 00:00:00 2001 From: Valentin Rothberg Date: Wed, 29 Mar 2023 14:05:16 +0200 Subject: [PATCH 2/2] speed up image listing As found in #17828, image listing does not scale well with a growing number of local images. Make use of recent improvements in libimage that allow for computing the dangling and parent data with _one_ layer tree. Prior, the layer tree had to be recomputed _twice_ for each image. [NO NEW TESTS NEEDED] as it's a non-functional performance change. Fixes: #17828 Signed-off-by: Valentin Rothberg --- pkg/domain/infra/abi/images_list.go | 24 ++++++++++++------------ test/system/330-corrupt-images.bats | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/domain/infra/abi/images_list.go b/pkg/domain/infra/abi/images_list.go index 4788ecef92..d9661721a3 100644 --- a/pkg/domain/infra/abi/images_list.go +++ b/pkg/domain/infra/abi/images_list.go @@ -5,12 +5,14 @@ import ( "fmt" "github.com/containers/common/libimage" + "github.com/containers/podman/v4/libpod/define" "github.com/containers/podman/v4/pkg/domain/entities" ) func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) ([]*entities.ImageSummary, error) { listImagesOptions := &libimage.ListImagesOptions{ - Filters: opts.Filter, + Filters: opts.Filter, + SetListData: true, } if !opts.All { // Filter intermediate images unless we want to list *all*. @@ -30,9 +32,14 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) if err != nil { return nil, fmt.Errorf("getting repoDigests from image %q: %w", img.ID(), err) } - isDangling, err := img.IsDangling(ctx) - if err != nil { - return nil, fmt.Errorf("checking if image %q is dangling: %w", img.ID(), err) + + if img.ListData.IsDangling == nil { // Sanity check + return nil, fmt.Errorf("%w: ListData.IsDangling is nil but should not", define.ErrInternal) + } + isDangling := *img.ListData.IsDangling + parentID := "" + if img.ListData.Parent != nil { + parentID = img.ListData.Parent.ID() } e := entities.ImageSummary{ @@ -46,6 +53,7 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) ReadOnly: img.IsReadOnly(), SharedSize: 0, RepoTags: img.Names(), // may include tags and digests + ParentId: parentID, } e.Labels, err = img.Labels(ctx) if err != nil { @@ -67,14 +75,6 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) // replaced later with correct calculation logic e.VirtualSize = sz - parent, err := img.Parent(ctx) - if err != nil { - return nil, fmt.Errorf("retrieving parent of image %q: you may need to remove the image to resolve the error: %w", img.ID(), err) - } - if parent != nil { - e.ParentId = parent.ID() - } - summaries = append(summaries, &e) } return summaries, nil diff --git a/test/system/330-corrupt-images.bats b/test/system/330-corrupt-images.bats index 2f0fd753c7..6779018256 100644 --- a/test/system/330-corrupt-images.bats +++ b/test/system/330-corrupt-images.bats @@ -74,7 +74,7 @@ function _corrupt_image_test() { # Corruptify, and confirm that 'podman images' throws an error rm -v ${PODMAN_CORRUPT_TEST_WORKDIR}/root/*-images/$id/${rm_path} run_podman 125 images - is "$output" "Error: retrieving label for image \"$id\": you may need to remove the image to resolve the error.*" + is "$output" "Error: locating item named \".*\" for image with ID \"$id\" (consider removing the image to resolve the issue): file does not exist.*" # Run the requested command. Confirm it succeeds, with suitable warnings run_podman $*