mirror of
https://github.com/containers/podman.git
synced 2025-06-02 02:26:52 +08:00
vendor containers/common@e27c30ee9b
Signed-off-by: Valentin Rothberg <vrothberg@redhat.com>
This commit is contained in:
12
go.mod
12
go.mod
@ -12,13 +12,13 @@ require (
|
|||||||
github.com/containernetworking/cni v1.1.2
|
github.com/containernetworking/cni v1.1.2
|
||||||
github.com/containernetworking/plugins v1.2.0
|
github.com/containernetworking/plugins v1.2.0
|
||||||
github.com/containers/buildah v1.29.1-0.20230201192322-e56eb25575c7
|
github.com/containers/buildah v1.29.1-0.20230201192322-e56eb25575c7
|
||||||
github.com/containers/common v0.51.1-0.20230323135459-03a2cc01973c
|
github.com/containers/common v0.51.1-0.20230329113838-e27c30ee9b1b
|
||||||
github.com/containers/conmon v2.0.20+incompatible
|
github.com/containers/conmon v2.0.20+incompatible
|
||||||
github.com/containers/image/v5 v5.24.3-0.20230314083015-0c6d07e02a9a
|
github.com/containers/image/v5 v5.24.3-0.20230324204529-08b04b816eb8
|
||||||
github.com/containers/libhvee v0.0.2
|
github.com/containers/libhvee v0.0.2
|
||||||
github.com/containers/ocicrypt v1.1.7
|
github.com/containers/ocicrypt v1.1.7
|
||||||
github.com/containers/psgo v1.8.0
|
github.com/containers/psgo v1.8.0
|
||||||
github.com/containers/storage v1.45.5-0.20230315220505-1c6287eea927
|
github.com/containers/storage v1.45.5-0.20230326103843-b1216421c44b
|
||||||
github.com/coreos/go-systemd/v22 v22.5.0
|
github.com/coreos/go-systemd/v22 v22.5.0
|
||||||
github.com/coreos/stream-metadata-go v0.4.1
|
github.com/coreos/stream-metadata-go v0.4.1
|
||||||
github.com/cyphar/filepath-securejoin v0.2.3
|
github.com/cyphar/filepath-securejoin v0.2.3
|
||||||
@ -103,7 +103,7 @@ require (
|
|||||||
github.com/go-openapi/loads v0.21.2 // indirect
|
github.com/go-openapi/loads v0.21.2 // indirect
|
||||||
github.com/go-openapi/runtime v0.24.1 // indirect
|
github.com/go-openapi/runtime v0.24.1 // indirect
|
||||||
github.com/go-openapi/spec v0.20.7 // indirect
|
github.com/go-openapi/spec v0.20.7 // indirect
|
||||||
github.com/go-openapi/strfmt v0.21.3 // indirect
|
github.com/go-openapi/strfmt v0.21.5 // indirect
|
||||||
github.com/go-openapi/swag v0.22.3 // indirect
|
github.com/go-openapi/swag v0.22.3 // indirect
|
||||||
github.com/go-openapi/validate v0.22.0 // indirect
|
github.com/go-openapi/validate v0.22.0 // indirect
|
||||||
github.com/go-playground/locales v0.14.0 // indirect
|
github.com/go-playground/locales v0.14.0 // indirect
|
||||||
@ -119,7 +119,7 @@ require (
|
|||||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.2 // indirect
|
github.com/hashicorp/go-retryablehttp v0.7.2 // indirect
|
||||||
github.com/imdario/mergo v0.3.13 // indirect
|
github.com/imdario/mergo v0.3.15 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||||
github.com/jinzhu/copier v0.3.5 // indirect
|
github.com/jinzhu/copier v0.3.5 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
@ -155,7 +155,7 @@ require (
|
|||||||
github.com/sigstore/sigstore v1.6.0 // indirect
|
github.com/sigstore/sigstore v1.6.0 // indirect
|
||||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||||
github.com/sylabs/sif/v2 v2.11.0 // indirect
|
github.com/sylabs/sif/v2 v2.11.1 // indirect
|
||||||
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
||||||
github.com/theupdateframework/go-tuf v0.5.2 // indirect
|
github.com/theupdateframework/go-tuf v0.5.2 // indirect
|
||||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||||
|
23
go.sum
23
go.sum
@ -247,12 +247,12 @@ github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP
|
|||||||
github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4=
|
github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4=
|
||||||
github.com/containers/buildah v1.29.1-0.20230201192322-e56eb25575c7 h1:GmQhTfsGuYgGfuYWEF4Ed+rEvlSWRmxisLBL2J8rCb4=
|
github.com/containers/buildah v1.29.1-0.20230201192322-e56eb25575c7 h1:GmQhTfsGuYgGfuYWEF4Ed+rEvlSWRmxisLBL2J8rCb4=
|
||||||
github.com/containers/buildah v1.29.1-0.20230201192322-e56eb25575c7/go.mod h1:sFvOi+WMtMtrkxx1Dn8EhF5/ddXNyC1f5LAj4ZGzjAs=
|
github.com/containers/buildah v1.29.1-0.20230201192322-e56eb25575c7/go.mod h1:sFvOi+WMtMtrkxx1Dn8EhF5/ddXNyC1f5LAj4ZGzjAs=
|
||||||
github.com/containers/common v0.51.1-0.20230323135459-03a2cc01973c h1:j/52772OnuMHg3B2sgMM038S6C/uAJ8cXj9l4jNOjvo=
|
github.com/containers/common v0.51.1-0.20230329113838-e27c30ee9b1b h1:1QbWSASZc7C9Oi9lO+cQdBlwQ11ofmPAgqu3h3HKyWA=
|
||||||
github.com/containers/common v0.51.1-0.20230323135459-03a2cc01973c/go.mod h1:RyY5B1E+PsFnZOW28xgFkjce0oCAMN7c/zskaCYmAkQ=
|
github.com/containers/common v0.51.1-0.20230329113838-e27c30ee9b1b/go.mod h1:YyHQ+bAH0sv2K6q49qiuyrA8W8RMiBhUq3MtYs8fbfk=
|
||||||
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
||||||
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||||
github.com/containers/image/v5 v5.24.3-0.20230314083015-0c6d07e02a9a h1:2xIif78r5x2nmdb5uhjXBZuexiDAt1c/XIXFxFhfKSk=
|
github.com/containers/image/v5 v5.24.3-0.20230324204529-08b04b816eb8 h1:hL/KrmP4ZMRmokrz+YNBem1ECKtytLU4/kUn3mSSkz0=
|
||||||
github.com/containers/image/v5 v5.24.3-0.20230314083015-0c6d07e02a9a/go.mod h1:9PM/hiCVTh6dt8Swi7eYKXKHIaPabHn8gtFV2YD44Mk=
|
github.com/containers/image/v5 v5.24.3-0.20230324204529-08b04b816eb8/go.mod h1:pquu2CUlF4i+OBB5MM6kb36ZkYGQze8Wqv91aYIe9eo=
|
||||||
github.com/containers/libhvee v0.0.2 h1:eWtbOvpT8bD9jvksMES2yXUmEpcE0zENWkci+bbP7U8=
|
github.com/containers/libhvee v0.0.2 h1:eWtbOvpT8bD9jvksMES2yXUmEpcE0zENWkci+bbP7U8=
|
||||||
github.com/containers/libhvee v0.0.2/go.mod h1:bV1MfbuXk/ZLWHiWZpm8aePOR6iJGD1q55guYhH4CnA=
|
github.com/containers/libhvee v0.0.2/go.mod h1:bV1MfbuXk/ZLWHiWZpm8aePOR6iJGD1q55guYhH4CnA=
|
||||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||||
@ -266,8 +266,8 @@ github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY
|
|||||||
github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc=
|
github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc=
|
||||||
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
|
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
|
||||||
github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
|
github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
|
||||||
github.com/containers/storage v1.45.5-0.20230315220505-1c6287eea927 h1:VGSwgqH/hBZqlWR48MFNrpT4meMzj+fVg6SYM2uSWWA=
|
github.com/containers/storage v1.45.5-0.20230326103843-b1216421c44b h1:ip+OrrC/fT7iqIuNHXmfxq7Xc5myNesE/e99qu8p030=
|
||||||
github.com/containers/storage v1.45.5-0.20230315220505-1c6287eea927/go.mod h1:tNwkJMFiChoEURP+ofq34pGRysOoFk/QCVrdmS1EzPI=
|
github.com/containers/storage v1.45.5-0.20230326103843-b1216421c44b/go.mod h1:oZ5Sscs6ALLhT3j2qTZAtD5Mi8B3G6+PFa54raz7pF4=
|
||||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||||
@ -432,8 +432,9 @@ github.com/go-openapi/spec v0.20.7/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6
|
|||||||
github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
|
github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
|
||||||
github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
|
github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
|
||||||
github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
|
github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
|
||||||
github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o=
|
|
||||||
github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
|
github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
|
||||||
|
github.com/go-openapi/strfmt v0.21.5 h1:Z/algjpXIZpbvdN+6KbVTkpO75RuedMrqpn1GN529h4=
|
||||||
|
github.com/go-openapi/strfmt v0.21.5/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
|
||||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||||
@ -628,8 +629,8 @@ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
|
|||||||
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||||
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||||
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
|
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
|
||||||
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
|
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||||
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
|
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
|
||||||
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
@ -973,8 +974,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
|||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
github.com/sylabs/sif/v2 v2.11.0 h1:s1oEFZCb1TX22zT3Twb4tY0X8CVfpo9IEZfhgZzCP+4=
|
github.com/sylabs/sif/v2 v2.11.1 h1:d09yPukVa8b74wuy+QTA4Is3w8MH0UjO/xlWQUuFzpY=
|
||||||
github.com/sylabs/sif/v2 v2.11.0/go.mod h1:i4GcKLOaT4ertznbsuf11d/G9zLEfUZa7YhrFc5L6YQ=
|
github.com/sylabs/sif/v2 v2.11.1/go.mod h1:i4GcKLOaT4ertznbsuf11d/G9zLEfUZa7YhrFc5L6YQ=
|
||||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
||||||
|
4
vendor/github.com/containers/common/libimage/disk_usage.go
generated
vendored
4
vendor/github.com/containers/common/libimage/disk_usage.go
generated
vendored
@ -29,12 +29,12 @@ type ImageDiskUsage struct {
|
|||||||
// storage. Note that a single image may yield multiple usage reports, one for
|
// storage. Note that a single image may yield multiple usage reports, one for
|
||||||
// each repository tag.
|
// each repository tag.
|
||||||
func (r *Runtime) DiskUsage(ctx context.Context) ([]ImageDiskUsage, int64, error) {
|
func (r *Runtime) DiskUsage(ctx context.Context) ([]ImageDiskUsage, int64, error) {
|
||||||
layerTree, err := r.layerTree()
|
images, err := r.ListImages(ctx, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, -1, err
|
return nil, -1, err
|
||||||
}
|
}
|
||||||
|
|
||||||
images, err := r.ListImages(ctx, nil, nil)
|
layerTree, err := r.layerTree(images)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, -1, err
|
return nil, -1, err
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/containers/common/libimage/filters.go
generated
vendored
2
vendor/github.com/containers/common/libimage/filters.go
generated
vendored
@ -81,7 +81,7 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp
|
|||||||
var tree *layerTree
|
var tree *layerTree
|
||||||
getTree := func() (*layerTree, error) {
|
getTree := func() (*layerTree, error) {
|
||||||
if tree == nil {
|
if tree == nil {
|
||||||
t, err := r.layerTree()
|
t, err := r.layerTree(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/containers/common/libimage/history.go
generated
vendored
2
vendor/github.com/containers/common/libimage/history.go
generated
vendored
@ -24,7 +24,7 @@ func (i *Image) History(ctx context.Context) ([]ImageHistory, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
layerTree, err := i.runtime.layerTree()
|
layerTree, err := i.runtime.layerTree(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
19
vendor/github.com/containers/common/libimage/image.go
generated
vendored
19
vendor/github.com/containers/common/libimage/image.go
generated
vendored
@ -23,6 +23,17 @@ import (
|
|||||||
// Image represents an image in the containers storage and allows for further
|
// Image represents an image in the containers storage and allows for further
|
||||||
// operations and data manipulation.
|
// operations and data manipulation.
|
||||||
type Image struct {
|
type Image struct {
|
||||||
|
// ListData that is being set by (*Runtime).ListImages(). Note that
|
||||||
|
// the data may be outdated.
|
||||||
|
ListData struct {
|
||||||
|
// Dangling indicates if the image is dangling. Use
|
||||||
|
// `IsDangling()` to compute the latest state.
|
||||||
|
IsDangling *bool
|
||||||
|
// Parent points to the parent image. Use `Parent()` to
|
||||||
|
// compute the latest state.
|
||||||
|
Parent *Image
|
||||||
|
}
|
||||||
|
|
||||||
// Backwards pointer to the runtime.
|
// Backwards pointer to the runtime.
|
||||||
runtime *Runtime
|
runtime *Runtime
|
||||||
|
|
||||||
@ -216,10 +227,14 @@ func (i *Image) TopLayer() string {
|
|||||||
|
|
||||||
// Parent returns the parent image or nil if there is none
|
// Parent returns the parent image or nil if there is none
|
||||||
func (i *Image) Parent(ctx context.Context) (*Image, error) {
|
func (i *Image) Parent(ctx context.Context) (*Image, error) {
|
||||||
tree, err := i.runtime.layerTree()
|
tree, err := i.runtime.layerTree(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
return i.parent(ctx, tree)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Image) parent(ctx context.Context, tree *layerTree) (*Image, error) {
|
||||||
return tree.parent(ctx, i)
|
return tree.parent(ctx, i)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -246,7 +261,7 @@ func (i *Image) Children(ctx context.Context) ([]*Image, error) {
|
|||||||
// created for this invocation only.
|
// created for this invocation only.
|
||||||
func (i *Image) getChildren(ctx context.Context, all bool, tree *layerTree) ([]*Image, error) {
|
func (i *Image) getChildren(ctx context.Context, all bool, tree *layerTree) ([]*Image, error) {
|
||||||
if tree == nil {
|
if tree == nil {
|
||||||
t, err := i.runtime.layerTree()
|
t, err := i.runtime.layerTree(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/containers/common/libimage/image_tree.go
generated
vendored
2
vendor/github.com/containers/common/libimage/image_tree.go
generated
vendored
@ -35,7 +35,7 @@ func (i *Image) Tree(traverseChildren bool) (string, error) {
|
|||||||
fmt.Fprintf(sb, "No Image Layers")
|
fmt.Fprintf(sb, "No Image Layers")
|
||||||
}
|
}
|
||||||
|
|
||||||
layerTree, err := i.runtime.layerTree()
|
layerTree, err := i.runtime.layerTree(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
10
vendor/github.com/containers/common/libimage/layer_tree.go
generated
vendored
10
vendor/github.com/containers/common/libimage/layer_tree.go
generated
vendored
@ -75,15 +75,17 @@ func (l *layerNode) repoTags() ([]string, error) {
|
|||||||
|
|
||||||
// layerTree extracts a layerTree from the layers in the local storage and
|
// layerTree extracts a layerTree from the layers in the local storage and
|
||||||
// relates them to the specified images.
|
// relates them to the specified images.
|
||||||
func (r *Runtime) layerTree() (*layerTree, error) {
|
func (r *Runtime) layerTree(images []*Image) (*layerTree, error) {
|
||||||
layers, err := r.store.Layers()
|
layers, err := r.store.Layers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
images, err := r.ListImages(context.Background(), nil, nil)
|
if images == nil {
|
||||||
if err != nil {
|
images, err = r.ListImages(context.Background(), nil, nil)
|
||||||
return nil, err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tree := layerTree{
|
tree := layerTree{
|
||||||
|
38
vendor/github.com/containers/common/libimage/runtime.go
generated
vendored
38
vendor/github.com/containers/common/libimage/runtime.go
generated
vendored
@ -537,6 +537,8 @@ type ListImagesOptions struct {
|
|||||||
// used). The definition of an external container can be set by
|
// used). The definition of an external container can be set by
|
||||||
// callers.
|
// callers.
|
||||||
IsExternalContainerFunc IsExternalContainerFunc
|
IsExternalContainerFunc IsExternalContainerFunc
|
||||||
|
// SetListData will populate the Image.ListData fields of returned images.
|
||||||
|
SetListData bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListImages lists images in the local container storage. If names are
|
// ListImages lists images in the local container storage. If names are
|
||||||
@ -565,7 +567,41 @@ func (r *Runtime) ListImages(ctx context.Context, names []string, options *ListI
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return r.filterImages(ctx, images, options)
|
filtered, err := r.filterImages(ctx, images, options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !options.SetListData {
|
||||||
|
return filtered, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If explicitly requested by the user, pre-compute and cache the
|
||||||
|
// dangling and parent information of all filtered images. That will
|
||||||
|
// considerably speed things up for callers who need this information
|
||||||
|
// as the layer tree will computed once for all instead of once for
|
||||||
|
// each individual image (see containers/podman/issues/17828).
|
||||||
|
|
||||||
|
tree, err := r.layerTree(images)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range filtered {
|
||||||
|
isDangling, err := filtered[i].isDangling(ctx, tree)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
filtered[i].ListData.IsDangling = &isDangling
|
||||||
|
|
||||||
|
parent, err := filtered[i].parent(ctx, tree)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
filtered[i].ListData.Parent = parent
|
||||||
|
}
|
||||||
|
|
||||||
|
return filtered, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveImagesOptions allow for customizing image removal.
|
// RemoveImagesOptions allow for customizing image removal.
|
||||||
|
38
vendor/github.com/containers/common/pkg/netns/netns_linux.go
generated
vendored
38
vendor/github.com/containers/common/pkg/netns/netns_linux.go
generated
vendored
@ -20,6 +20,7 @@ package netns
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@ -51,13 +52,24 @@ func GetNSRunDir() (string, error) {
|
|||||||
// NewNS creates a new persistent (bind-mounted) network namespace and returns
|
// NewNS creates a new persistent (bind-mounted) network namespace and returns
|
||||||
// an object representing that namespace, without switching to it.
|
// an object representing that namespace, without switching to it.
|
||||||
func NewNS() (ns.NetNS, error) {
|
func NewNS() (ns.NetNS, error) {
|
||||||
b := make([]byte, 16)
|
for i := 0; i < 10000; i++ {
|
||||||
_, err := rand.Reader.Read(b)
|
b := make([]byte, 16)
|
||||||
if err != nil {
|
_, err := rand.Reader.Read(b)
|
||||||
return nil, fmt.Errorf("failed to generate random netns name: %v", err)
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to generate random netns name: %v", err)
|
||||||
|
}
|
||||||
|
nsName := fmt.Sprintf("netns-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
|
||||||
|
ns, err := NewNSWithName(nsName)
|
||||||
|
if err == nil {
|
||||||
|
return ns, nil
|
||||||
|
}
|
||||||
|
// retry when the name already exists
|
||||||
|
if errors.Is(err, os.ErrExist) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
nsName := fmt.Sprintf("netns-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
|
return nil, errors.New("failed to find free netns path name")
|
||||||
return NewNSWithName(nsName)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNSWithName creates a new persistent (bind-mounted) network namespace and returns
|
// NewNSWithName creates a new persistent (bind-mounted) network namespace and returns
|
||||||
@ -101,7 +113,7 @@ func NewNSWithName(name string) (ns.NetNS, error) {
|
|||||||
|
|
||||||
// create an empty file at the mount point
|
// create an empty file at the mount point
|
||||||
nsPath := path.Join(nsRunDir, name)
|
nsPath := path.Join(nsRunDir, name)
|
||||||
mountPointFd, err := os.Create(nsPath)
|
mountPointFd, err := os.OpenFile(nsPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -149,18 +161,6 @@ func NewNSWithName(name string) (ns.NetNS, error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put this thread back to the orig ns, since it might get reused (pre go1.10)
|
|
||||||
defer func() {
|
|
||||||
if err := origNS.Set(); err != nil {
|
|
||||||
if unshare.IsRootless() && strings.Contains(err.Error(), "operation not permitted") {
|
|
||||||
// When running in rootless mode it will fail to re-join
|
|
||||||
// the network namespace owned by root on the host.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
logrus.Warnf("Unable to reset namespace: %q", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// bind mount the netns from the current thread (from /proc) onto the
|
// bind mount the netns from the current thread (from /proc) onto the
|
||||||
// mount point. This causes the namespace to persist, even when there
|
// mount point. This causes the namespace to persist, even when there
|
||||||
// are no threads in the ns. Make this a shared mount; it needs to be
|
// are no threads in the ns. Make this a shared mount; it needs to be
|
||||||
|
22
vendor/github.com/containers/image/v5/copy/blob.go
generated
vendored
22
vendor/github.com/containers/image/v5/copy/blob.go
generated
vendored
@ -104,12 +104,11 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
|
|||||||
if !isConfig {
|
if !isConfig {
|
||||||
options.LayerIndex = &layerIndex
|
options.LayerIndex = &layerIndex
|
||||||
}
|
}
|
||||||
uploadedInfo, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options)
|
destBlob, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, fmt.Errorf("writing blob: %w", err)
|
return types.BlobInfo{}, fmt.Errorf("writing blob: %w", err)
|
||||||
}
|
}
|
||||||
|
uploadedInfo := updatedBlobInfoFromUpload(stream.info, destBlob)
|
||||||
uploadedInfo.Annotations = stream.info.Annotations
|
|
||||||
|
|
||||||
compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations)
|
compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations)
|
||||||
decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation)
|
decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation)
|
||||||
@ -169,3 +168,20 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// updatedBlobInfoFromUpload returns inputInfo updated with uploadedBlob which was created based on inputInfo.
|
||||||
|
func updatedBlobInfoFromUpload(inputInfo types.BlobInfo, uploadedBlob private.UploadedBlob) types.BlobInfo {
|
||||||
|
// The transport is only tasked with dealing with the raw blob, and possibly computing Digest/Size.
|
||||||
|
// Handling of compression, encryption, and the related MIME types and the like are all the responsibility
|
||||||
|
// of the generic code in this package.
|
||||||
|
return types.BlobInfo{
|
||||||
|
Digest: uploadedBlob.Digest,
|
||||||
|
Size: uploadedBlob.Size,
|
||||||
|
URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
|
||||||
|
Annotations: inputInfo.Annotations,
|
||||||
|
MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression/Crypto.
|
||||||
|
CompressionOperation: inputInfo.CompressionOperation, // Expected to be unset, and only updated by copyBlobFromStream.
|
||||||
|
CompressionAlgorithm: inputInfo.CompressionAlgorithm, // Expected to be unset, and only updated by copyBlobFromStream.
|
||||||
|
CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset, and only updated by copyBlobFromStream.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
48
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
48
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
@ -621,7 +621,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||||||
// a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob.
|
// a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob.
|
||||||
// Fixing that will probably require passing more information to TryReusingBlob() than the current version of
|
// Fixing that will probably require passing more information to TryReusingBlob() than the current version of
|
||||||
// the ImageDestination interface lets us pass in.
|
// the ImageDestination interface lets us pass in.
|
||||||
reused, blobInfo, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
|
reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
|
||||||
Cache: ic.c.blobInfoCache,
|
Cache: ic.c.blobInfoCache,
|
||||||
CanSubstitute: canSubstitute,
|
CanSubstitute: canSubstitute,
|
||||||
EmptyLayer: emptyLayer,
|
EmptyLayer: emptyLayer,
|
||||||
@ -634,7 +634,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||||||
if reused {
|
if reused {
|
||||||
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
|
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
|
||||||
func() { // A scope for defer
|
func() { // A scope for defer
|
||||||
bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: blobInfo.Digest, Size: 0}, "blob", "skipped: already exists")
|
bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", "skipped: already exists")
|
||||||
defer bar.Abort(false)
|
defer bar.Abort(false)
|
||||||
bar.mark100PercentComplete()
|
bar.mark100PercentComplete()
|
||||||
}()
|
}()
|
||||||
@ -647,19 +647,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the reused blob has the same digest as the one we asked for, but
|
return updatedBlobInfoFromReuse(srcInfo, reusedBlob), cachedDiffID, nil
|
||||||
// the transport didn't/couldn't supply compression info, fill it in based
|
|
||||||
// on what we know from the srcInfos we were given.
|
|
||||||
// If the srcInfos came from LayerInfosForCopy(), then UpdatedImage() will
|
|
||||||
// call UpdateLayerInfos(), which uses this information to compute the
|
|
||||||
// MediaType value for the updated layer infos, and it the transport
|
|
||||||
// didn't pass the information along from its input to its output, then
|
|
||||||
// it can derive the MediaType incorrectly.
|
|
||||||
if blobInfo.Digest == srcInfo.Digest && blobInfo.CompressionAlgorithm == nil {
|
|
||||||
blobInfo.CompressionOperation = srcInfo.CompressionOperation
|
|
||||||
blobInfo.CompressionAlgorithm = srcInfo.CompressionAlgorithm
|
|
||||||
}
|
|
||||||
return blobInfo, cachedDiffID, nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -679,7 +667,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||||||
wrapped: ic.c.rawSource,
|
wrapped: ic.c.rawSource,
|
||||||
bar: bar,
|
bar: bar,
|
||||||
}
|
}
|
||||||
info, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache)
|
uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if srcInfo.Size != -1 {
|
if srcInfo.Size != -1 {
|
||||||
bar.SetRefill(srcInfo.Size - bar.Current())
|
bar.SetRefill(srcInfo.Size - bar.Current())
|
||||||
@ -687,7 +675,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||||||
bar.mark100PercentComplete()
|
bar.mark100PercentComplete()
|
||||||
hideProgressBar = false
|
hideProgressBar = false
|
||||||
logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
|
logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
|
||||||
return true, info
|
return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob)
|
||||||
}
|
}
|
||||||
logrus.Debugf("Failed to retrieve partial blob: %v", err)
|
logrus.Debugf("Failed to retrieve partial blob: %v", err)
|
||||||
return false, types.BlobInfo{}
|
return false, types.BlobInfo{}
|
||||||
@ -742,6 +730,32 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// updatedBlobInfoFromReuse returns inputInfo updated with reusedBlob which was created based on inputInfo.
|
||||||
|
func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.ReusedBlob) types.BlobInfo {
|
||||||
|
// The transport is only tasked with finding the blob, determining its size if necessary, and returning the right
|
||||||
|
// compression format if the blob was substituted.
|
||||||
|
// Handling of compression, encryption, and the related MIME types and the like are all the responsibility
|
||||||
|
// of the generic code in this package.
|
||||||
|
res := types.BlobInfo{
|
||||||
|
Digest: reusedBlob.Digest,
|
||||||
|
Size: reusedBlob.Size,
|
||||||
|
URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
|
||||||
|
Annotations: inputInfo.Annotations,
|
||||||
|
MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation.
|
||||||
|
CompressionOperation: reusedBlob.CompressionOperation,
|
||||||
|
CompressionAlgorithm: reusedBlob.CompressionAlgorithm,
|
||||||
|
CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway.
|
||||||
|
}
|
||||||
|
// The transport is only expected to fill CompressionOperation and CompressionAlgorithm
|
||||||
|
// if the blob was substituted; otherwise, fill it in based
|
||||||
|
// on what we know from the srcInfos we were given.
|
||||||
|
if reusedBlob.Digest == inputInfo.Digest {
|
||||||
|
res.CompressionOperation = inputInfo.CompressionOperation
|
||||||
|
res.CompressionAlgorithm = inputInfo.CompressionAlgorithm
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
|
// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
|
||||||
// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
|
// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
|
||||||
// perhaps (de/re/)compressing the stream,
|
// perhaps (de/re/)compressing the stream,
|
||||||
|
32
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
32
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
@ -132,11 +132,11 @@ func (d *dirImageDestination) Close() error {
|
|||||||
// inputInfo.MediaType describes the blob format, if known.
|
// inputInfo.MediaType describes the blob format, if known.
|
||||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
// to any other readers for download using the supplied digest.
|
// to any other readers for download using the supplied digest.
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
|
||||||
func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
|
func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
|
||||||
blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob")
|
blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
succeeded := false
|
succeeded := false
|
||||||
explicitClosed := false
|
explicitClosed := false
|
||||||
@ -153,14 +153,14 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
|||||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||||
size, err := io.Copy(blobFile, stream)
|
size, err := io.Copy(blobFile, stream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
blobDigest := digester.Digest()
|
blobDigest := digester.Digest()
|
||||||
if inputInfo.Size != -1 && size != inputInfo.Size {
|
if inputInfo.Size != -1 && size != inputInfo.Size {
|
||||||
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
|
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
|
||||||
}
|
}
|
||||||
if err := blobFile.Sync(); err != nil {
|
if err := blobFile.Sync(); err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
|
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
|
||||||
@ -169,7 +169,7 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
|||||||
// always fails on Windows.
|
// always fails on Windows.
|
||||||
if runtime.GOOS != "windows" {
|
if runtime.GOOS != "windows" {
|
||||||
if err := blobFile.Chmod(0644); err != nil {
|
if err := blobFile.Chmod(0644); err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -178,32 +178,30 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
|||||||
blobFile.Close()
|
blobFile.Close()
|
||||||
explicitClosed = true
|
explicitClosed = true
|
||||||
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
succeeded = true
|
succeeded = true
|
||||||
return types.BlobInfo{Digest: blobDigest, Size: size}, nil
|
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
// If the blob has been successfully reused, returns (true, info, nil).
|
||||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
|
||||||
// reflected in the manifest that will be written.
|
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||||
if info.Digest == "" {
|
if info.Digest == "" {
|
||||||
return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with unknown digest")
|
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with unknown digest")
|
||||||
}
|
}
|
||||||
blobPath := d.ref.layerPath(info.Digest)
|
blobPath := d.ref.layerPath(info.Digest)
|
||||||
finfo, err := os.Stat(blobPath)
|
finfo, err := os.Stat(blobPath)
|
||||||
if err != nil && os.IsNotExist(err) {
|
if err != nil && os.IsNotExist(err) {
|
||||||
return false, types.BlobInfo{}, nil
|
return false, private.ReusedBlob{}, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, types.BlobInfo{}, err
|
return false, private.ReusedBlob{}, err
|
||||||
}
|
}
|
||||||
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
|
return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutManifest writes manifest to the destination.
|
// PutManifest writes manifest to the destination.
|
||||||
|
50
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
50
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@ -132,8 +132,8 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
|
|||||||
// inputInfo.MediaType describes the blob format, if known.
|
// inputInfo.MediaType describes the blob format, if known.
|
||||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
// to any other readers for download using the supplied digest.
|
// to any other readers for download using the supplied digest.
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
|
||||||
func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
|
func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
|
||||||
// If requested, precompute the blob digest to prevent uploading layers that already exist on the registry.
|
// If requested, precompute the blob digest to prevent uploading layers that already exist on the registry.
|
||||||
// This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests,
|
// This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests,
|
||||||
// the source blob is uncompressed, and the destination blob is being compressed "on the fly".
|
// the source blob is uncompressed, and the destination blob is being compressed "on the fly".
|
||||||
@ -141,7 +141,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||||||
logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref))
|
logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref))
|
||||||
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo)
|
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
stream = streamCopy
|
stream = streamCopy
|
||||||
@ -152,10 +152,10 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||||||
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
|
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
|
||||||
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, options.Cache)
|
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, options.Cache)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
if haveBlob {
|
if haveBlob {
|
||||||
return reusedInfo, nil
|
return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -164,16 +164,16 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||||||
logrus.Debugf("Uploading %s", uploadPath)
|
logrus.Debugf("Uploading %s", uploadPath)
|
||||||
res, err := d.c.makeRequest(ctx, http.MethodPost, uploadPath, nil, nil, v2Auth, nil)
|
res, err := d.c.makeRequest(ctx, http.MethodPost, uploadPath, nil, nil, v2Auth, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
if res.StatusCode != http.StatusAccepted {
|
if res.StatusCode != http.StatusAccepted {
|
||||||
logrus.Debugf("Error initiating layer upload, response %#v", *res)
|
logrus.Debugf("Error initiating layer upload, response %#v", *res)
|
||||||
return types.BlobInfo{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res))
|
return private.UploadedBlob{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res))
|
||||||
}
|
}
|
||||||
uploadLocation, err := res.Location()
|
uploadLocation, err := res.Location()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, fmt.Errorf("determining upload URL: %w", err)
|
return private.UploadedBlob{}, fmt.Errorf("determining upload URL: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
|
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
|
||||||
@ -201,7 +201,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||||||
return uploadLocation, nil
|
return uploadLocation, nil
|
||||||
}()
|
}()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
blobDigest := digester.Digest()
|
blobDigest := digester.Digest()
|
||||||
|
|
||||||
@ -212,17 +212,17 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||||||
uploadLocation.RawQuery = locationQuery.Encode()
|
uploadLocation.RawQuery = locationQuery.Encode()
|
||||||
res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
|
res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
if res.StatusCode != http.StatusCreated {
|
if res.StatusCode != http.StatusCreated {
|
||||||
logrus.Debugf("Error uploading layer, response %#v", *res)
|
logrus.Debugf("Error uploading layer, response %#v", *res)
|
||||||
return types.BlobInfo{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res))
|
return private.UploadedBlob{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res))
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Debugf("Upload of layer %s complete", blobDigest)
|
logrus.Debugf("Upload of layer %s complete", blobDigest)
|
||||||
options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref))
|
options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref))
|
||||||
return types.BlobInfo{Digest: blobDigest, Size: sizeCounter.size}, nil
|
return private.UploadedBlob{Digest: blobDigest, Size: sizeCounter.size}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
|
// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
|
||||||
@ -299,34 +299,32 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
|
|||||||
// tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified
|
// tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified
|
||||||
// blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read.
|
// blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read.
|
||||||
// The caller must ensure info.Digest is set.
|
// The caller must ensure info.Digest is set.
|
||||||
func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, types.BlobInfo, error) {
|
func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, private.ReusedBlob, error) {
|
||||||
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
|
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, types.BlobInfo{}, err
|
return false, private.ReusedBlob{}, err
|
||||||
}
|
}
|
||||||
if exists {
|
if exists {
|
||||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
|
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
|
||||||
return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil
|
return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil
|
||||||
}
|
}
|
||||||
return false, types.BlobInfo{}, nil
|
return false, private.ReusedBlob{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
// If the blob has been successfully reused, returns (true, info, nil).
|
||||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
|
||||||
// reflected in the manifest that will be written.
|
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||||
if info.Digest == "" {
|
if info.Digest == "" {
|
||||||
return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest")
|
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
|
||||||
}
|
}
|
||||||
|
|
||||||
// First, check whether the blob happens to already exist at the destination.
|
// First, check whether the blob happens to already exist at the destination.
|
||||||
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
|
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, types.BlobInfo{}, err
|
return false, private.ReusedBlob{}, err
|
||||||
}
|
}
|
||||||
if haveBlob {
|
if haveBlob {
|
||||||
return true, reusedInfo, nil
|
return true, reusedInfo, nil
|
||||||
@ -396,10 +394,14 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, types.BlobInfo{Digest: candidate.Digest, MediaType: info.MediaType, Size: size, CompressionOperation: compressionOperation, CompressionAlgorithm: compressionAlgorithm}, nil
|
return true, private.ReusedBlob{
|
||||||
|
Digest: candidate.Digest,
|
||||||
|
Size: size,
|
||||||
|
CompressionOperation: compressionOperation,
|
||||||
|
CompressionAlgorithm: compressionAlgorithm}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, types.BlobInfo{}, nil
|
return false, private.ReusedBlob{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutManifest writes manifest to the destination.
|
// PutManifest writes manifest to the destination.
|
||||||
|
28
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
28
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
@ -76,15 +76,15 @@ func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
|
|||||||
// inputInfo.MediaType describes the blob format, if known.
|
// inputInfo.MediaType describes the blob format, if known.
|
||||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
// to any other readers for download using the supplied digest.
|
// to any other readers for download using the supplied digest.
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
|
||||||
func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
|
func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
|
||||||
// Ouch, we need to stream the blob into a temporary file just to determine the size.
|
// Ouch, we need to stream the blob into a temporary file just to determine the size.
|
||||||
// When the layer is decompressed, we also have to generate the digest on uncompressed data.
|
// When the layer is decompressed, we also have to generate the digest on uncompressed data.
|
||||||
if inputInfo.Size == -1 || inputInfo.Digest == "" {
|
if inputInfo.Size == -1 || inputInfo.Digest == "" {
|
||||||
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
|
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
|
||||||
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.sysCtx, stream, &inputInfo)
|
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.sysCtx, stream, &inputInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
stream = streamCopy
|
stream = streamCopy
|
||||||
@ -92,47 +92,45 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := d.archive.lock(); err != nil {
|
if err := d.archive.lock(); err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
defer d.archive.unlock()
|
defer d.archive.unlock()
|
||||||
|
|
||||||
// Maybe the blob has been already sent
|
// Maybe the blob has been already sent
|
||||||
ok, reusedInfo, err := d.archive.tryReusingBlobLocked(inputInfo)
|
ok, reusedInfo, err := d.archive.tryReusingBlobLocked(inputInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
if ok {
|
if ok {
|
||||||
return reusedInfo, nil
|
return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if options.IsConfig {
|
if options.IsConfig {
|
||||||
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
|
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, fmt.Errorf("reading Config file stream: %w", err)
|
return private.UploadedBlob{}, fmt.Errorf("reading Config file stream: %w", err)
|
||||||
}
|
}
|
||||||
d.config = buf
|
d.config = buf
|
||||||
if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
|
if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
|
||||||
return types.BlobInfo{}, fmt.Errorf("writing Config file: %w", err)
|
return private.UploadedBlob{}, fmt.Errorf("writing Config file: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
|
if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
d.archive.recordBlobLocked(types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size})
|
d.archive.recordBlobLocked(types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size})
|
||||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
|
return private.UploadedBlob{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
// If the blob has been successfully reused, returns (true, info, nil).
|
||||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
|
||||||
// reflected in the manifest that will be written.
|
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||||
if err := d.archive.lock(); err != nil {
|
if err := d.archive.lock(); err != nil {
|
||||||
return false, types.BlobInfo{}, err
|
return false, private.ReusedBlob{}, err
|
||||||
}
|
}
|
||||||
defer d.archive.unlock()
|
defer d.archive.unlock()
|
||||||
|
|
||||||
|
11
vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
generated
vendored
11
vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
generated
vendored
@ -13,6 +13,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containers/image/v5/docker/reference"
|
"github.com/containers/image/v5/docker/reference"
|
||||||
|
"github.com/containers/image/v5/internal/private"
|
||||||
"github.com/containers/image/v5/internal/set"
|
"github.com/containers/image/v5/internal/set"
|
||||||
"github.com/containers/image/v5/manifest"
|
"github.com/containers/image/v5/manifest"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
@ -69,17 +70,17 @@ func (w *Writer) unlock() {
|
|||||||
|
|
||||||
// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata.
|
// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata.
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil).
|
||||||
// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// The caller must have locked the Writer.
|
// The caller must have locked the Writer.
|
||||||
func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) {
|
func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, private.ReusedBlob, error) {
|
||||||
if info.Digest == "" {
|
if info.Digest == "" {
|
||||||
return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest")
|
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
|
||||||
}
|
}
|
||||||
if blob, ok := w.blobs[info.Digest]; ok {
|
if blob, ok := w.blobs[info.Digest]; ok {
|
||||||
return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
|
return true, private.ReusedBlob{Digest: info.Digest, Size: blob.Size}, nil
|
||||||
}
|
}
|
||||||
return false, types.BlobInfo{}, nil
|
return false, private.ReusedBlob{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// recordBlob records metadata of a recorded blob, which must contain at least a digest and size.
|
// recordBlob records metadata of a recorded blob, which must contain at least a digest and size.
|
||||||
|
27
vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
generated
vendored
27
vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
generated
vendored
@ -43,10 +43,17 @@ func AddCompat(dest private.ImageDestinationInternalOnly) Compat {
|
|||||||
// to any other readers for download using the supplied digest.
|
// to any other readers for download using the supplied digest.
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||||
return c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{
|
res, err := c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{
|
||||||
Cache: blobinfocache.FromBlobInfoCache(cache),
|
Cache: blobinfocache.FromBlobInfoCache(cache),
|
||||||
IsConfig: isConfig,
|
IsConfig: isConfig,
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return types.BlobInfo{}, err
|
||||||
|
}
|
||||||
|
return types.BlobInfo{
|
||||||
|
Digest: res.Digest,
|
||||||
|
Size: res.Size,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||||
@ -59,10 +66,26 @@ func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.
|
|||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (c *Compat) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (c *Compat) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
return c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{
|
reused, blob, err := c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{
|
||||||
Cache: blobinfocache.FromBlobInfoCache(cache),
|
Cache: blobinfocache.FromBlobInfoCache(cache),
|
||||||
CanSubstitute: canSubstitute,
|
CanSubstitute: canSubstitute,
|
||||||
})
|
})
|
||||||
|
if !reused || err != nil {
|
||||||
|
return reused, types.BlobInfo{}, err
|
||||||
|
}
|
||||||
|
res := types.BlobInfo{
|
||||||
|
Digest: blob.Digest,
|
||||||
|
Size: blob.Size,
|
||||||
|
CompressionOperation: blob.CompressionOperation,
|
||||||
|
CompressionAlgorithm: blob.CompressionAlgorithm,
|
||||||
|
}
|
||||||
|
// This is probably not necessary; we preserve MediaType to decrease risks of breaking for external callers.
|
||||||
|
// Some transports were not setting the MediaType field anyway, and others were setting the old value on substitution;
|
||||||
|
// provide the value in cases where it is likely to be correct.
|
||||||
|
if blob.Digest == info.Digest {
|
||||||
|
res.MediaType = info.MediaType
|
||||||
|
}
|
||||||
|
return true, res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutSignatures writes a set of signatures to the destination.
|
// PutSignatures writes a set of signatures to the destination.
|
||||||
|
@ -39,8 +39,8 @@ func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool {
|
|||||||
// It is available only if SupportsPutBlobPartial().
|
// It is available only if SupportsPutBlobPartial().
|
||||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||||
// should fall back to PutBlobWithOptions.
|
// should fall back to PutBlobWithOptions.
|
||||||
func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
|
func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
|
||||||
return types.BlobInfo{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName)
|
return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImplementsPutBlobPartial implements SupportsPutBlobPartial() that returns true.
|
// ImplementsPutBlobPartial implements SupportsPutBlobPartial() that returns true.
|
||||||
|
30
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
30
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
@ -46,20 +46,34 @@ func FromPublic(dest types.ImageDestination) private.ImageDestination {
|
|||||||
// inputInfo.MediaType describes the blob format, if known.
|
// inputInfo.MediaType describes the blob format, if known.
|
||||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
// to any other readers for download using the supplied digest.
|
// to any other readers for download using the supplied digest.
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
|
||||||
func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
|
func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
|
||||||
return w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig)
|
res, err := w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig)
|
||||||
|
if err != nil {
|
||||||
|
return private.UploadedBlob{}, err
|
||||||
|
}
|
||||||
|
return private.UploadedBlob{
|
||||||
|
Digest: res.Digest,
|
||||||
|
Size: res.Size,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
// If the blob has been successfully reused, returns (true, info, nil).
|
||||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
|
||||||
// reflected in the manifest that will be written.
|
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||||
return w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute)
|
reused, blob, err := w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute)
|
||||||
|
if !reused || err != nil {
|
||||||
|
return reused, private.ReusedBlob{}, err
|
||||||
|
}
|
||||||
|
return true, private.ReusedBlob{
|
||||||
|
Digest: blob.Digest,
|
||||||
|
Size: blob.Size,
|
||||||
|
CompressionOperation: blob.CompressionOperation,
|
||||||
|
CompressionAlgorithm: blob.CompressionAlgorithm,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutSignaturesWithFormat writes a set of signatures to the destination.
|
// PutSignaturesWithFormat writes a set of signatures to the destination.
|
||||||
|
31
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
31
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/containers/image/v5/docker/reference"
|
"github.com/containers/image/v5/docker/reference"
|
||||||
"github.com/containers/image/v5/internal/blobinfocache"
|
"github.com/containers/image/v5/internal/blobinfocache"
|
||||||
"github.com/containers/image/v5/internal/signature"
|
"github.com/containers/image/v5/internal/signature"
|
||||||
|
compression "github.com/containers/image/v5/pkg/compression/types"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
)
|
)
|
||||||
@ -46,24 +47,22 @@ type ImageDestinationInternalOnly interface {
|
|||||||
// inputInfo.MediaType describes the blob format, if known.
|
// inputInfo.MediaType describes the blob format, if known.
|
||||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
// to any other readers for download using the supplied digest.
|
// to any other readers for download using the supplied digest.
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
|
||||||
PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (types.BlobInfo, error)
|
PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (UploadedBlob, error)
|
||||||
|
|
||||||
// PutBlobPartial attempts to create a blob using the data that is already present
|
// PutBlobPartial attempts to create a blob using the data that is already present
|
||||||
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
||||||
// It is available only if SupportsPutBlobPartial().
|
// It is available only if SupportsPutBlobPartial().
|
||||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||||
// should fall back to PutBlobWithOptions.
|
// should fall back to PutBlobWithOptions.
|
||||||
PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error)
|
PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (UploadedBlob, error)
|
||||||
|
|
||||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
// If the blob has been successfully reused, returns (true, info, nil).
|
||||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
|
||||||
// reflected in the manifest that will be written.
|
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, types.BlobInfo, error)
|
TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, ReusedBlob, error)
|
||||||
|
|
||||||
// PutSignaturesWithFormat writes a set of signatures to the destination.
|
// PutSignaturesWithFormat writes a set of signatures to the destination.
|
||||||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
|
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
|
||||||
@ -79,6 +78,13 @@ type ImageDestination interface {
|
|||||||
ImageDestinationInternalOnly
|
ImageDestinationInternalOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UploadedBlob is information about a blob written to a destination.
|
||||||
|
// It is the subset of types.BlobInfo fields the transport is responsible for setting; all fields must be provided.
|
||||||
|
type UploadedBlob struct {
|
||||||
|
Digest digest.Digest
|
||||||
|
Size int64
|
||||||
|
}
|
||||||
|
|
||||||
// PutBlobOptions are used in PutBlobWithOptions.
|
// PutBlobOptions are used in PutBlobWithOptions.
|
||||||
type PutBlobOptions struct {
|
type PutBlobOptions struct {
|
||||||
Cache blobinfocache.BlobInfoCache2 // Cache to optionally update with the uploaded bloblook up blob infos.
|
Cache blobinfocache.BlobInfoCache2 // Cache to optionally update with the uploaded bloblook up blob infos.
|
||||||
@ -112,6 +118,17 @@ type TryReusingBlobOptions struct {
|
|||||||
SrcRef reference.Named // A reference to the source image that contains the input blob.
|
SrcRef reference.Named // A reference to the source image that contains the input blob.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReusedBlob is information about a blob reused in a destination.
|
||||||
|
// It is the subset of types.BlobInfo fields the transport is responsible for setting.
|
||||||
|
type ReusedBlob struct {
|
||||||
|
Digest digest.Digest // Must be provided
|
||||||
|
Size int64 // Must be provided
|
||||||
|
// The following compression fields should be set when the reuse substitutes
|
||||||
|
// a differently-compressed blob.
|
||||||
|
CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A
|
||||||
|
CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A
|
||||||
|
}
|
||||||
|
|
||||||
// ImageSourceChunk is a portion of a blob.
|
// ImageSourceChunk is a portion of a blob.
|
||||||
// This API is experimental and can be changed without bumping the major version number.
|
// This API is experimental and can be changed without bumping the major version number.
|
||||||
type ImageSourceChunk struct {
|
type ImageSourceChunk struct {
|
||||||
|
12
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
12
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
@ -109,8 +109,8 @@ func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool {
|
|||||||
// inputInfo.MediaType describes the blob format, if known.
|
// inputInfo.MediaType describes the blob format, if known.
|
||||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
// to any other readers for download using the supplied digest.
|
// to any other readers for download using the supplied digest.
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
|
||||||
func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
|
func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
|
||||||
return d.unpackedDest.PutBlobWithOptions(ctx, stream, inputInfo, options)
|
return d.unpackedDest.PutBlobWithOptions(ctx, stream, inputInfo, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -119,18 +119,16 @@ func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, str
|
|||||||
// It is available only if SupportsPutBlobPartial().
|
// It is available only if SupportsPutBlobPartial().
|
||||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||||
// should fall back to PutBlobWithOptions.
|
// should fall back to PutBlobWithOptions.
|
||||||
func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
|
func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
|
||||||
return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
|
return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
// If the blob has been successfully reused, returns (true, info, nil).
|
||||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
|
||||||
// reflected in the manifest that will be written.
|
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
func (d *ociArchiveImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
func (d *ociArchiveImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||||
return d.unpackedDest.TryReusingBlobWithOptions(ctx, info, options)
|
return d.unpackedDest.TryReusingBlobWithOptions(ctx, info, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
38
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
38
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
@ -107,11 +107,11 @@ func (d *ociImageDestination) Close() error {
|
|||||||
// inputInfo.MediaType describes the blob format, if known.
|
// inputInfo.MediaType describes the blob format, if known.
|
||||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
// to any other readers for download using the supplied digest.
|
// to any other readers for download using the supplied digest.
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
|
||||||
func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
|
func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
|
||||||
blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
|
blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
succeeded := false
|
succeeded := false
|
||||||
explicitClosed := false
|
explicitClosed := false
|
||||||
@ -128,14 +128,14 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
|||||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||||
size, err := io.Copy(blobFile, stream)
|
size, err := io.Copy(blobFile, stream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
blobDigest := digester.Digest()
|
blobDigest := digester.Digest()
|
||||||
if inputInfo.Size != -1 && size != inputInfo.Size {
|
if inputInfo.Size != -1 && size != inputInfo.Size {
|
||||||
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
|
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
|
||||||
}
|
}
|
||||||
if err := blobFile.Sync(); err != nil {
|
if err := blobFile.Sync(); err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
|
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
|
||||||
@ -144,52 +144,50 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
|||||||
// always fails on Windows.
|
// always fails on Windows.
|
||||||
if runtime.GOOS != "windows" {
|
if runtime.GOOS != "windows" {
|
||||||
if err := blobFile.Chmod(0644); err != nil {
|
if err := blobFile.Chmod(0644); err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir)
|
blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// need to explicitly close the file, since a rename won't otherwise not work on Windows
|
// need to explicitly close the file, since a rename won't otherwise not work on Windows
|
||||||
blobFile.Close()
|
blobFile.Close()
|
||||||
explicitClosed = true
|
explicitClosed = true
|
||||||
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
succeeded = true
|
succeeded = true
|
||||||
return types.BlobInfo{Digest: blobDigest, Size: size}, nil
|
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
// If the blob has been successfully reused, returns (true, info, nil).
|
||||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
|
||||||
// reflected in the manifest that will be written.
|
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||||
if info.Digest == "" {
|
if info.Digest == "" {
|
||||||
return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest")
|
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
|
||||||
}
|
}
|
||||||
blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir)
|
blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, types.BlobInfo{}, err
|
return false, private.ReusedBlob{}, err
|
||||||
}
|
}
|
||||||
finfo, err := os.Stat(blobPath)
|
finfo, err := os.Stat(blobPath)
|
||||||
if err != nil && os.IsNotExist(err) {
|
if err != nil && os.IsNotExist(err) {
|
||||||
return false, types.BlobInfo{}, nil
|
return false, private.ReusedBlob{}, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, types.BlobInfo{}, err
|
return false, private.ReusedBlob{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
|
return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutManifest writes a manifest to the destination. Per our list of supported manifest MIME types,
|
// PutManifest writes a manifest to the destination. Per our list of supported manifest MIME types,
|
||||||
|
12
vendor/github.com/containers/image/v5/openshift/openshift_dest.go
generated
vendored
12
vendor/github.com/containers/image/v5/openshift/openshift_dest.go
generated
vendored
@ -116,8 +116,8 @@ func (d *openshiftImageDestination) SupportsPutBlobPartial() bool {
|
|||||||
// inputInfo.MediaType describes the blob format, if known.
|
// inputInfo.MediaType describes the blob format, if known.
|
||||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
// to any other readers for download using the supplied digest.
|
// to any other readers for download using the supplied digest.
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
|
||||||
func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
|
func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
|
||||||
return d.docker.PutBlobWithOptions(ctx, stream, inputInfo, options)
|
return d.docker.PutBlobWithOptions(ctx, stream, inputInfo, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,18 +126,16 @@ func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stre
|
|||||||
// It is available only if SupportsPutBlobPartial().
|
// It is available only if SupportsPutBlobPartial().
|
||||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||||
// should fall back to PutBlobWithOptions.
|
// should fall back to PutBlobWithOptions.
|
||||||
func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
|
func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
|
||||||
return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
|
return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
// If the blob has been successfully reused, returns (true, info, nil).
|
||||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
|
||||||
// reflected in the manifest that will be written.
|
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
func (d *openshiftImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
func (d *openshiftImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||||
return d.docker.TryReusingBlobWithOptions(ctx, info, options)
|
return d.docker.TryReusingBlobWithOptions(ctx, info, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
28
vendor/github.com/containers/image/v5/ostree/ostree_dest.go
generated
vendored
28
vendor/github.com/containers/image/v5/ostree/ostree_dest.go
generated
vendored
@ -135,16 +135,16 @@ func (d *ostreeImageDestination) Close() error {
|
|||||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
// to any other readers for download using the supplied digest.
|
// to any other readers for download using the supplied digest.
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
|
func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
|
||||||
tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob")
|
tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
blobPath := filepath.Join(tmpDir, "content")
|
blobPath := filepath.Join(tmpDir, "content")
|
||||||
blobFile, err := os.Create(blobPath)
|
blobFile, err := os.Create(blobPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
defer blobFile.Close()
|
defer blobFile.Close()
|
||||||
|
|
||||||
@ -152,19 +152,19 @@ func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||||
size, err := io.Copy(blobFile, stream)
|
size, err := io.Copy(blobFile, stream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
blobDigest := digester.Digest()
|
blobDigest := digester.Digest()
|
||||||
if inputInfo.Size != -1 && size != inputInfo.Size {
|
if inputInfo.Size != -1 && size != inputInfo.Size {
|
||||||
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
|
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
|
||||||
}
|
}
|
||||||
if err := blobFile.Sync(); err != nil {
|
if err := blobFile.Sync(); err != nil {
|
||||||
return types.BlobInfo{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
hash := blobDigest.Hex()
|
hash := blobDigest.Hex()
|
||||||
d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath}
|
d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath}
|
||||||
return types.BlobInfo{Digest: blobDigest, Size: size}, nil
|
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error {
|
func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error {
|
||||||
@ -334,11 +334,11 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
|
|||||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||||
// reflected in the manifest that will be written.
|
// reflected in the manifest that will be written.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||||
if d.repo == nil {
|
if d.repo == nil {
|
||||||
repo, err := openRepo(d.ref.repo)
|
repo, err := openRepo(d.ref.repo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, types.BlobInfo{}, err
|
return false, private.ReusedBlob{}, err
|
||||||
}
|
}
|
||||||
d.repo = repo
|
d.repo = repo
|
||||||
}
|
}
|
||||||
@ -346,25 +346,25 @@ func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||||||
|
|
||||||
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
|
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
|
||||||
if err != nil || !found {
|
if err != nil || !found {
|
||||||
return found, types.BlobInfo{}, err
|
return found, private.ReusedBlob{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size")
|
found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size")
|
||||||
if err != nil || !found {
|
if err != nil || !found {
|
||||||
return found, types.BlobInfo{}, err
|
return found, private.ReusedBlob{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
found, data, err = readMetadata(d.repo, branch, "docker.size")
|
found, data, err = readMetadata(d.repo, branch, "docker.size")
|
||||||
if err != nil || !found {
|
if err != nil || !found {
|
||||||
return found, types.BlobInfo{}, err
|
return found, private.ReusedBlob{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
size, err := strconv.ParseInt(data, 10, 64)
|
size, err := strconv.ParseInt(data, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, types.BlobInfo{}, err
|
return false, private.ReusedBlob{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
|
return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutManifest writes manifest to the destination.
|
// PutManifest writes manifest to the destination.
|
||||||
|
20
vendor/github.com/containers/image/v5/pkg/blobcache/dest.go
generated
vendored
20
vendor/github.com/containers/image/v5/pkg/blobcache/dest.go
generated
vendored
@ -134,8 +134,8 @@ func (d *blobCacheDestination) HasThreadSafePutBlob() bool {
|
|||||||
// inputInfo.MediaType describes the blob format, if known.
|
// inputInfo.MediaType describes the blob format, if known.
|
||||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
// to any other readers for download using the supplied digest.
|
// to any other readers for download using the supplied digest.
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
|
||||||
func (d *blobCacheDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
|
func (d *blobCacheDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
|
||||||
var tempfile *os.File
|
var tempfile *os.File
|
||||||
var err error
|
var err error
|
||||||
var n int
|
var n int
|
||||||
@ -227,18 +227,16 @@ func (d *blobCacheDestination) SupportsPutBlobPartial() bool {
|
|||||||
// It is available only if SupportsPutBlobPartial().
|
// It is available only if SupportsPutBlobPartial().
|
||||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||||
// should fall back to PutBlobWithOptions.
|
// should fall back to PutBlobWithOptions.
|
||||||
func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
|
func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
|
||||||
return d.destination.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
|
return d.destination.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
// If the blob has been successfully reused, returns (true, info, nil).
|
||||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
|
||||||
// reflected in the manifest that will be written.
|
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
func (d *blobCacheDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
func (d *blobCacheDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||||
present, reusedInfo, err := d.destination.TryReusingBlobWithOptions(ctx, info, options)
|
present, reusedInfo, err := d.destination.TryReusingBlobWithOptions(ctx, info, options)
|
||||||
if err != nil || present {
|
if err != nil || present {
|
||||||
return present, reusedInfo, err
|
return present, reusedInfo, err
|
||||||
@ -246,7 +244,7 @@ func (d *blobCacheDestination) TryReusingBlobWithOptions(ctx context.Context, in
|
|||||||
|
|
||||||
blobPath, _, isConfig, err := d.reference.findBlob(info)
|
blobPath, _, isConfig, err := d.reference.findBlob(info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, types.BlobInfo{}, err
|
return false, private.ReusedBlob{}, err
|
||||||
}
|
}
|
||||||
if blobPath != "" {
|
if blobPath != "" {
|
||||||
f, err := os.Open(blobPath)
|
f, err := os.Open(blobPath)
|
||||||
@ -259,13 +257,13 @@ func (d *blobCacheDestination) TryReusingBlobWithOptions(ctx context.Context, in
|
|||||||
LayerIndex: options.LayerIndex,
|
LayerIndex: options.LayerIndex,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, types.BlobInfo{}, err
|
return false, private.ReusedBlob{}, err
|
||||||
}
|
}
|
||||||
return true, uploadedInfo, nil
|
return true, private.ReusedBlob{Digest: uploadedInfo.Digest, Size: uploadedInfo.Size}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, types.BlobInfo{}, nil
|
return false, private.ReusedBlob{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte, instanceDigest *digest.Digest) error {
|
func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte, instanceDigest *digest.Digest) error {
|
||||||
|
255
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
255
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
@ -77,13 +77,19 @@ type storageImageDestination struct {
|
|||||||
indexToStorageID map[int]*string
|
indexToStorageID map[int]*string
|
||||||
// All accesses to below data are protected by `lock` which is made
|
// All accesses to below data are protected by `lock` which is made
|
||||||
// *explicit* in the code.
|
// *explicit* in the code.
|
||||||
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
|
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
|
||||||
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
|
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
|
||||||
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
|
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
|
||||||
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
|
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
|
||||||
indexToPulledLayerInfo map[int]*manifest.LayerInfo // Mapping from layer (by index) to pulled down blob
|
indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image
|
||||||
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
|
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
|
||||||
diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
|
diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
|
||||||
|
}
|
||||||
|
|
||||||
|
// addedLayerInfo records data about a layer to use in this image.
|
||||||
|
type addedLayerInfo struct {
|
||||||
|
digest digest.Digest
|
||||||
|
emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept.
|
||||||
}
|
}
|
||||||
|
|
||||||
// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
|
// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
|
||||||
@ -111,18 +117,18 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*
|
|||||||
HasThreadSafePutBlob: true,
|
HasThreadSafePutBlob: true,
|
||||||
}),
|
}),
|
||||||
|
|
||||||
imageRef: imageRef,
|
imageRef: imageRef,
|
||||||
directory: directory,
|
directory: directory,
|
||||||
signatureses: make(map[digest.Digest][]byte),
|
signatureses: make(map[digest.Digest][]byte),
|
||||||
blobDiffIDs: make(map[digest.Digest]digest.Digest),
|
blobDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||||
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
|
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
|
||||||
fileSizes: make(map[digest.Digest]int64),
|
fileSizes: make(map[digest.Digest]int64),
|
||||||
filenames: make(map[digest.Digest]string),
|
filenames: make(map[digest.Digest]string),
|
||||||
SignatureSizes: []int{},
|
SignatureSizes: []int{},
|
||||||
SignaturesSizes: make(map[digest.Digest][]int),
|
SignaturesSizes: make(map[digest.Digest][]int),
|
||||||
indexToStorageID: make(map[int]*string),
|
indexToStorageID: make(map[int]*string),
|
||||||
indexToPulledLayerInfo: make(map[int]*manifest.LayerInfo),
|
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
|
||||||
diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
|
diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
|
||||||
}
|
}
|
||||||
dest.Compat = impl.AddCompat(dest)
|
dest.Compat = impl.AddCompat(dest)
|
||||||
return dest, nil
|
return dest, nil
|
||||||
@ -158,7 +164,7 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
|
|||||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
// to any other readers for download using the supplied digest.
|
// to any other readers for download using the supplied digest.
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
|
func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
|
||||||
info, err := s.putBlobToPendingFile(stream, blobinfo, &options)
|
info, err := s.putBlobToPendingFile(stream, blobinfo, &options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return info, err
|
return info, err
|
||||||
@ -168,21 +174,20 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer)
|
return info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{
|
||||||
|
digest: info.Digest,
|
||||||
|
emptyLayer: options.EmptyLayer,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file.
|
// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file.
|
||||||
// The caller must arrange the blob to be eventually committed using s.commitLayer().
|
// The caller must arrange the blob to be eventually committed using s.commitLayer().
|
||||||
func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) {
|
func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (private.UploadedBlob, error) {
|
||||||
// Stores a layer or data blob in our temporary directory, checking that any information
|
// Stores a layer or data blob in our temporary directory, checking that any information
|
||||||
// in the blobinfo matches the incoming data.
|
// in the blobinfo matches the incoming data.
|
||||||
errorBlobInfo := types.BlobInfo{
|
|
||||||
Digest: "",
|
|
||||||
Size: -1,
|
|
||||||
}
|
|
||||||
if blobinfo.Digest != "" {
|
if blobinfo.Digest != "" {
|
||||||
if err := blobinfo.Digest.Validate(); err != nil {
|
if err := blobinfo.Digest.Validate(); err != nil {
|
||||||
return errorBlobInfo, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err)
|
return private.UploadedBlob{}, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -190,7 +195,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
|
|||||||
filename := s.computeNextBlobCacheFile()
|
filename := s.computeNextBlobCacheFile()
|
||||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
|
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorBlobInfo, fmt.Errorf("creating temporary file %q: %w", filename, err)
|
return private.UploadedBlob{}, fmt.Errorf("creating temporary file %q: %w", filename, err)
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
counter := ioutils.NewWriteCounter(file)
|
counter := ioutils.NewWriteCounter(file)
|
||||||
@ -198,7 +203,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
|
|||||||
digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
|
digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
|
||||||
decompressed, err := archive.DecompressStream(stream)
|
decompressed, err := archive.DecompressStream(stream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorBlobInfo, fmt.Errorf("setting up to decompress blob: %w", err)
|
return private.UploadedBlob{}, fmt.Errorf("setting up to decompress blob: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
diffID := digest.Canonical.Digester()
|
diffID := digest.Canonical.Digester()
|
||||||
@ -207,7 +212,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
|
|||||||
_, err = io.Copy(diffID.Hash(), decompressed)
|
_, err = io.Copy(diffID.Hash(), decompressed)
|
||||||
decompressed.Close()
|
decompressed.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorBlobInfo, fmt.Errorf("storing blob to file %q: %w", filename, err)
|
return private.UploadedBlob{}, fmt.Errorf("storing blob to file %q: %w", filename, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine blob properties, and fail if information that we were given about the blob
|
// Determine blob properties, and fail if information that we were given about the blob
|
||||||
@ -217,7 +222,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
|
|||||||
if blobSize < 0 {
|
if blobSize < 0 {
|
||||||
blobSize = counter.Count
|
blobSize = counter.Count
|
||||||
} else if blobinfo.Size != counter.Count {
|
} else if blobinfo.Size != counter.Count {
|
||||||
return errorBlobInfo, ErrBlobSizeMismatch
|
return private.UploadedBlob{}, ErrBlobSizeMismatch
|
||||||
}
|
}
|
||||||
|
|
||||||
// Record information about the blob.
|
// Record information about the blob.
|
||||||
@ -229,10 +234,9 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
|
|||||||
// This is safe because we have just computed diffID, and blobDigest was either computed
|
// This is safe because we have just computed diffID, and blobDigest was either computed
|
||||||
// by us, or validated by the caller (usually copy.digestingReader).
|
// by us, or validated by the caller (usually copy.digestingReader).
|
||||||
options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
|
options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
|
||||||
return types.BlobInfo{
|
return private.UploadedBlob{
|
||||||
Digest: blobDigest,
|
Digest: blobDigest,
|
||||||
Size: blobSize,
|
Size: blobSize,
|
||||||
MediaType: blobinfo.MediaType,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -265,7 +269,7 @@ func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.Read
|
|||||||
// It is available only if SupportsPutBlobPartial().
|
// It is available only if SupportsPutBlobPartial().
|
||||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||||
// should fall back to PutBlobWithOptions.
|
// should fall back to PutBlobWithOptions.
|
||||||
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
|
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
|
||||||
fetcher := zstdFetcher{
|
fetcher := zstdFetcher{
|
||||||
chunkAccessor: chunkAccessor,
|
chunkAccessor: chunkAccessor,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
@ -274,12 +278,12 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
|||||||
|
|
||||||
differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher)
|
differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return srcInfo, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ)
|
out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return srcInfo, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
blobDigest := srcInfo.Digest
|
blobDigest := srcInfo.Digest
|
||||||
@ -291,124 +295,126 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
|||||||
s.diffOutputs[blobDigest] = out
|
s.diffOutputs[blobDigest] = out
|
||||||
s.lock.Unlock()
|
s.lock.Unlock()
|
||||||
|
|
||||||
return srcInfo, nil
|
return private.UploadedBlob{
|
||||||
|
Digest: blobDigest,
|
||||||
|
Size: srcInfo.Size,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
// If the blob has been successfully reused, returns (true, info, nil).
|
||||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
|
||||||
// reflected in the manifest that will be written.
|
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||||
reused, info, err := s.tryReusingBlobAsPending(blobinfo, &options)
|
reused, info, err := s.tryReusingBlobAsPending(blobinfo.Digest, blobinfo.Size, &options)
|
||||||
if err != nil || !reused || options.LayerIndex == nil {
|
if err != nil || !reused || options.LayerIndex == nil {
|
||||||
return reused, info, err
|
return reused, info, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer)
|
return reused, info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{
|
||||||
|
digest: info.Digest,
|
||||||
|
emptyLayer: options.EmptyLayer,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata.
|
// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.blobDiffIDs and other metadata.
|
||||||
// The caller must arrange the blob to be eventually committed using s.commitLayer().
|
// The caller must arrange the blob to be eventually committed using s.commitLayer().
|
||||||
func (s *storageImageDestination) tryReusingBlobAsPending(blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||||
// lock the entire method as it executes fairly quickly
|
// lock the entire method as it executes fairly quickly
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
defer s.lock.Unlock()
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
if options.SrcRef != nil {
|
if options.SrcRef != nil {
|
||||||
// Check if we have the layer in the underlying additional layer store.
|
// Check if we have the layer in the underlying additional layer store.
|
||||||
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, options.SrcRef.String())
|
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(digest, options.SrcRef.String())
|
||||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||||
return false, types.BlobInfo{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobinfo.Digest, err)
|
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, digest, err)
|
||||||
} else if err == nil {
|
} else if err == nil {
|
||||||
// Record the uncompressed value so that we can use it to calculate layer IDs.
|
// Record the uncompressed value so that we can use it to calculate layer IDs.
|
||||||
s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest()
|
s.blobDiffIDs[digest] = aLayer.UncompressedDigest()
|
||||||
s.blobAdditionalLayer[blobinfo.Digest] = aLayer
|
s.blobAdditionalLayer[digest] = aLayer
|
||||||
return true, types.BlobInfo{
|
return true, private.ReusedBlob{
|
||||||
Digest: blobinfo.Digest,
|
Digest: digest,
|
||||||
Size: aLayer.CompressedSize(),
|
Size: aLayer.CompressedSize(),
|
||||||
MediaType: blobinfo.MediaType,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if blobinfo.Digest == "" {
|
if digest == "" {
|
||||||
return false, types.BlobInfo{}, errors.New(`Can not check for a blob with unknown digest`)
|
return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`)
|
||||||
}
|
}
|
||||||
if err := blobinfo.Digest.Validate(); err != nil {
|
if err := digest.Validate(); err != nil {
|
||||||
return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
|
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we've already cached it in a file.
|
// Check if we've already cached it in a file.
|
||||||
if size, ok := s.fileSizes[blobinfo.Digest]; ok {
|
if size, ok := s.fileSizes[digest]; ok {
|
||||||
return true, types.BlobInfo{
|
return true, private.ReusedBlob{
|
||||||
Digest: blobinfo.Digest,
|
Digest: digest,
|
||||||
Size: size,
|
Size: size,
|
||||||
MediaType: blobinfo.MediaType,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
|
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
|
||||||
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
|
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(digest)
|
||||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||||
return false, types.BlobInfo{}, fmt.Errorf(`looking for layers with digest %q: %w`, blobinfo.Digest, err)
|
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, digest, err)
|
||||||
}
|
}
|
||||||
if len(layers) > 0 {
|
if len(layers) > 0 {
|
||||||
// Save this for completeness.
|
// Save this for completeness.
|
||||||
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
|
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
|
||||||
return true, types.BlobInfo{
|
return true, private.ReusedBlob{
|
||||||
Digest: blobinfo.Digest,
|
Digest: digest,
|
||||||
Size: layers[0].UncompressedSize,
|
Size: layers[0].UncompressedSize,
|
||||||
MediaType: blobinfo.MediaType,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we have a was-compressed layer in storage that's based on that blob.
|
// Check if we have a was-compressed layer in storage that's based on that blob.
|
||||||
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
|
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(digest)
|
||||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||||
return false, types.BlobInfo{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, blobinfo.Digest, err)
|
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, digest, err)
|
||||||
}
|
}
|
||||||
if len(layers) > 0 {
|
if len(layers) > 0 {
|
||||||
// Record the uncompressed value so that we can use it to calculate layer IDs.
|
// Record the uncompressed value so that we can use it to calculate layer IDs.
|
||||||
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
|
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
|
||||||
return true, types.BlobInfo{
|
return true, private.ReusedBlob{
|
||||||
Digest: blobinfo.Digest,
|
Digest: digest,
|
||||||
Size: layers[0].CompressedSize,
|
Size: layers[0].CompressedSize,
|
||||||
MediaType: blobinfo.MediaType,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Does the blob correspond to a known DiffID which we already have available?
|
// Does the blob correspond to a known DiffID which we already have available?
|
||||||
// Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
|
// Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
|
||||||
// uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size.
|
// uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size.
|
||||||
if options.CanSubstitute || blobinfo.Size != -1 {
|
if options.CanSubstitute || size != -1 {
|
||||||
if uncompressedDigest := options.Cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
|
if uncompressedDigest := options.Cache.UncompressedDigest(digest); uncompressedDigest != "" && uncompressedDigest != digest {
|
||||||
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
|
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
|
||||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||||
return false, types.BlobInfo{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
|
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
|
||||||
}
|
}
|
||||||
if len(layers) > 0 {
|
if len(layers) > 0 {
|
||||||
if blobinfo.Size != -1 {
|
if size != -1 {
|
||||||
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
|
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
|
||||||
return true, blobinfo, nil
|
return true, private.ReusedBlob{
|
||||||
|
Digest: digest,
|
||||||
|
Size: size,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
if !options.CanSubstitute {
|
if !options.CanSubstitute {
|
||||||
return false, types.BlobInfo{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blobInfo %v", blobinfo)
|
return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", digest)
|
||||||
}
|
}
|
||||||
s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
|
s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
|
||||||
return true, types.BlobInfo{
|
return true, private.ReusedBlob{
|
||||||
Digest: uncompressedDigest,
|
Digest: uncompressedDigest,
|
||||||
Size: layers[0].UncompressedSize,
|
Size: layers[0].UncompressedSize,
|
||||||
MediaType: blobinfo.MediaType,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Nope, we don't have it.
|
// Nope, we don't have it.
|
||||||
return false, types.BlobInfo{}, nil
|
return false, private.ReusedBlob{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// computeID computes a recommended image ID based on information we have so far. If
|
// computeID computes a recommended image ID based on information we have so far. If
|
||||||
@ -470,10 +476,10 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er
|
|||||||
return nil, errors.New("blob not found")
|
return nil, errors.New("blob not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
// queueOrCommit queues in the specified blob to be committed to the storage.
|
// queueOrCommit queues the specified layer to be committed to the storage.
|
||||||
// If no other goroutine is already committing layers, the layer and all
|
// If no other goroutine is already committing layers, the layer and all
|
||||||
// subsequent layers (if already queued) will be committed to the storage.
|
// subsequent layers (if already queued) will be committed to the storage.
|
||||||
func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.BlobInfo, index int, emptyLayer bool) error {
|
func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) error {
|
||||||
// NOTE: whenever the code below is touched, make sure that all code
|
// NOTE: whenever the code below is touched, make sure that all code
|
||||||
// paths unlock the lock and to unlock it exactly once.
|
// paths unlock the lock and to unlock it exactly once.
|
||||||
//
|
//
|
||||||
@ -493,10 +499,7 @@ func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.
|
|||||||
// caller is the "worker" routine committing layers. All other routines
|
// caller is the "worker" routine committing layers. All other routines
|
||||||
// can continue pulling and queuing in layers.
|
// can continue pulling and queuing in layers.
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
s.indexToPulledLayerInfo[index] = &manifest.LayerInfo{
|
s.indexToAddedLayerInfo[index] = info
|
||||||
BlobInfo: blob,
|
|
||||||
EmptyLayer: emptyLayer,
|
|
||||||
}
|
|
||||||
|
|
||||||
// We're still waiting for at least one previous/parent layer to be
|
// We're still waiting for at least one previous/parent layer to be
|
||||||
// committed, so there's nothing to do.
|
// committed, so there's nothing to do.
|
||||||
@ -505,10 +508,14 @@ func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for info := s.indexToPulledLayerInfo[index]; info != nil; info = s.indexToPulledLayerInfo[index] {
|
for {
|
||||||
|
info, ok := s.indexToAddedLayerInfo[index]
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
s.lock.Unlock()
|
s.lock.Unlock()
|
||||||
// Note: commitLayer locks on-demand.
|
// Note: commitLayer locks on-demand.
|
||||||
if err := s.commitLayer(ctx, *info, index); err != nil {
|
if err := s.commitLayer(index, info, -1); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
@ -522,13 +529,15 @@ func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// commitLayer commits the specified blob with the given index to the storage.
|
// commitLayer commits the specified layer with the given index to the storage.
|
||||||
|
// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs.
|
||||||
|
//
|
||||||
// Note that the previous layer is expected to already be committed.
|
// Note that the previous layer is expected to already be committed.
|
||||||
//
|
//
|
||||||
// Caution: this function must be called without holding `s.lock`. Callers
|
// Caution: this function must be called without holding `s.lock`. Callers
|
||||||
// must guarantee that, at any given time, at most one goroutine may execute
|
// must guarantee that, at any given time, at most one goroutine may execute
|
||||||
// `commitLayer()`.
|
// `commitLayer()`.
|
||||||
func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest.LayerInfo, index int) error {
|
func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) error {
|
||||||
// Already committed? Return early.
|
// Already committed? Return early.
|
||||||
if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
|
if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
|
||||||
return nil
|
return nil
|
||||||
@ -543,7 +552,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Carry over the previous ID for empty non-base layers.
|
// Carry over the previous ID for empty non-base layers.
|
||||||
if blob.EmptyLayer {
|
if info.emptyLayer {
|
||||||
s.indexToStorageID[index] = &lastLayer
|
s.indexToStorageID[index] = &lastLayer
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -551,7 +560,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
|
|||||||
// Check if there's already a layer with the ID that we'd give to the result of applying
|
// Check if there's already a layer with the ID that we'd give to the result of applying
|
||||||
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
|
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
diffID, haveDiffID := s.blobDiffIDs[blob.Digest]
|
diffID, haveDiffID := s.blobDiffIDs[info.digest]
|
||||||
s.lock.Unlock()
|
s.lock.Unlock()
|
||||||
if !haveDiffID {
|
if !haveDiffID {
|
||||||
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
|
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
|
||||||
@ -560,18 +569,21 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
|
|||||||
// that relies on using a blob digest that has never been seen by the store had better call
|
// that relies on using a blob digest that has never been seen by the store had better call
|
||||||
// TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
|
// TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
|
||||||
// so far we are going to accommodate that (if we should be doing that at all).
|
// so far we are going to accommodate that (if we should be doing that at all).
|
||||||
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
|
logrus.Debugf("looking for diffID for blob %+v", info.digest)
|
||||||
// NOTE: use `TryReusingBlob` to prevent recursion.
|
// Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit.
|
||||||
has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false)
|
has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{
|
||||||
|
Cache: none.NoCache,
|
||||||
|
CanSubstitute: false,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("checking for a layer based on blob %q: %w", blob.Digest.String(), err)
|
return fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err)
|
||||||
}
|
}
|
||||||
if !has {
|
if !has {
|
||||||
return fmt.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String())
|
return fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String())
|
||||||
}
|
}
|
||||||
diffID, haveDiffID = s.blobDiffIDs[blob.Digest]
|
diffID, haveDiffID = s.blobDiffIDs[info.digest]
|
||||||
if !haveDiffID {
|
if !haveDiffID {
|
||||||
return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String())
|
return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", info.digest.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
id := diffID.Hex()
|
id := diffID.Hex()
|
||||||
@ -586,7 +598,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
|
|||||||
}
|
}
|
||||||
|
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
diffOutput, ok := s.diffOutputs[blob.Digest]
|
diffOutput, ok := s.diffOutputs[info.digest]
|
||||||
s.lock.Unlock()
|
s.lock.Unlock()
|
||||||
if ok {
|
if ok {
|
||||||
layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
|
layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
|
||||||
@ -595,7 +607,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: what to do with the uncompressed digest?
|
// FIXME: what to do with the uncompressed digest?
|
||||||
diffOutput.UncompressedDigest = blob.Digest
|
diffOutput.UncompressedDigest = info.digest
|
||||||
|
|
||||||
if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil {
|
if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil {
|
||||||
_ = s.imageRef.transport.store.Delete(layer.ID)
|
_ = s.imageRef.transport.store.Delete(layer.ID)
|
||||||
@ -607,7 +619,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
|
|||||||
}
|
}
|
||||||
|
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
al, ok := s.blobAdditionalLayer[blob.Digest]
|
al, ok := s.blobAdditionalLayer[info.digest]
|
||||||
s.lock.Unlock()
|
s.lock.Unlock()
|
||||||
if ok {
|
if ok {
|
||||||
layer, err := al.PutAs(id, lastLayer, nil)
|
layer, err := al.PutAs(id, lastLayer, nil)
|
||||||
@ -622,7 +634,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
|
|||||||
// Check if we previously cached a file with that blob's contents. If we didn't,
|
// Check if we previously cached a file with that blob's contents. If we didn't,
|
||||||
// then we need to read the desired contents from a layer.
|
// then we need to read the desired contents from a layer.
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
filename, ok := s.filenames[blob.Digest]
|
filename, ok := s.filenames[info.digest]
|
||||||
s.lock.Unlock()
|
s.lock.Unlock()
|
||||||
if !ok {
|
if !ok {
|
||||||
// Try to find the layer with contents matching that blobsum.
|
// Try to find the layer with contents matching that blobsum.
|
||||||
@ -631,13 +643,13 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
|
|||||||
if err2 == nil && len(layers) > 0 {
|
if err2 == nil && len(layers) > 0 {
|
||||||
layer = layers[0].ID
|
layer = layers[0].ID
|
||||||
} else {
|
} else {
|
||||||
layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest)
|
layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(info.digest)
|
||||||
if err2 == nil && len(layers) > 0 {
|
if err2 == nil && len(layers) > 0 {
|
||||||
layer = layers[0].ID
|
layer = layers[0].ID
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if layer == "" {
|
if layer == "" {
|
||||||
return fmt.Errorf("locating layer for blob %q: %w", blob.Digest, err2)
|
return fmt.Errorf("locating layer for blob %q: %w", info.digest, err2)
|
||||||
}
|
}
|
||||||
// Read the layer's contents.
|
// Read the layer's contents.
|
||||||
noCompression := archive.Uncompressed
|
noCompression := archive.Uncompressed
|
||||||
@ -646,7 +658,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
|
|||||||
}
|
}
|
||||||
diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
|
diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
|
||||||
if err2 != nil {
|
if err2 != nil {
|
||||||
return fmt.Errorf("reading layer %q for blob %q: %w", layer, blob.Digest, err2)
|
return fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2)
|
||||||
}
|
}
|
||||||
// Copy the layer diff to a file. Diff() takes a lock that it holds
|
// Copy the layer diff to a file. Diff() takes a lock that it holds
|
||||||
// until the ReadCloser that it returns is closed, and PutLayer() wants
|
// until the ReadCloser that it returns is closed, and PutLayer() wants
|
||||||
@ -670,7 +682,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
|
|||||||
// Make sure that we can find this file later, should we need the layer's
|
// Make sure that we can find this file later, should we need the layer's
|
||||||
// contents again.
|
// contents again.
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
s.filenames[blob.Digest] = filename
|
s.filenames[info.digest] = filename
|
||||||
s.lock.Unlock()
|
s.lock.Unlock()
|
||||||
}
|
}
|
||||||
// Read the cached blob and use it as a diff.
|
// Read the cached blob and use it as a diff.
|
||||||
@ -682,11 +694,11 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
|
|||||||
// Build the new layer using the diff, regardless of where it came from.
|
// Build the new layer using the diff, regardless of where it came from.
|
||||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||||
layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{
|
layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{
|
||||||
OriginalDigest: blob.Digest,
|
OriginalDigest: info.digest,
|
||||||
UncompressedDigest: diffID,
|
UncompressedDigest: diffID,
|
||||||
}, file)
|
}, file)
|
||||||
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
|
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
|
||||||
return fmt.Errorf("adding layer with blob %q: %w", blob.Digest, err)
|
return fmt.Errorf("adding layer with blob %q: %w", info.digest, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.indexToStorageID[index] = &layer.ID
|
s.indexToStorageID[index] = &layer.ID
|
||||||
@ -737,7 +749,10 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
|
|||||||
|
|
||||||
// Extract, commit, or find the layers.
|
// Extract, commit, or find the layers.
|
||||||
for i, blob := range layerBlobs {
|
for i, blob := range layerBlobs {
|
||||||
if err := s.commitLayer(ctx, blob, i); err != nil {
|
if err := s.commitLayer(i, addedLayerInfo{
|
||||||
|
digest: blob.Digest,
|
||||||
|
emptyLayer: blob.EmptyLayer,
|
||||||
|
}, blob.Size); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
generated
vendored
4
vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
generated
vendored
@ -32,3 +32,7 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error {
|
|||||||
func (q *Control) GetQuota(targetPath string, quota *Quota) error {
|
func (q *Control) GetQuota(targetPath string, quota *Quota) error {
|
||||||
return errors.New("filesystem does not support, or has not enabled quotas")
|
return errors.New("filesystem does not support, or has not enabled quotas")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ClearQuota removes the map entry in the quotas map for targetPath.
|
||||||
|
// It does so to prevent the map leaking entries as directories are deleted.
|
||||||
|
func (q *Control) ClearQuota(targetPath string) {}
|
||||||
|
59
vendor/github.com/go-openapi/strfmt/.golangci.yml
generated
vendored
59
vendor/github.com/go-openapi/strfmt/.golangci.yml
generated
vendored
@ -14,31 +14,40 @@ linters-settings:
|
|||||||
min-occurrences: 4
|
min-occurrences: 4
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
enable-all: true
|
enable:
|
||||||
disable:
|
- revive
|
||||||
- maligned
|
- goimports
|
||||||
- lll
|
- gosec
|
||||||
- gochecknoinits
|
- unparam
|
||||||
- gochecknoglobals
|
- unconvert
|
||||||
- godox
|
- predeclared
|
||||||
- gocognit
|
- prealloc
|
||||||
- whitespace
|
- misspell
|
||||||
- wsl
|
|
||||||
- funlen
|
# disable:
|
||||||
- wrapcheck
|
# - maligned
|
||||||
- testpackage
|
# - lll
|
||||||
- nlreturn
|
# - gochecknoinits
|
||||||
- gofumpt
|
# - gochecknoglobals
|
||||||
- goerr113
|
# - godox
|
||||||
- gci
|
# - gocognit
|
||||||
- gomnd
|
# - whitespace
|
||||||
- godot
|
# - wsl
|
||||||
- exhaustivestruct
|
# - funlen
|
||||||
- paralleltest
|
# - wrapcheck
|
||||||
- varnamelen
|
# - testpackage
|
||||||
- ireturn
|
# - nlreturn
|
||||||
- exhaustruct
|
# - gofumpt
|
||||||
#- thelper
|
# - goerr113
|
||||||
|
# - gci
|
||||||
|
# - gomnd
|
||||||
|
# - godot
|
||||||
|
# - exhaustivestruct
|
||||||
|
# - paralleltest
|
||||||
|
# - varnamelen
|
||||||
|
# - ireturn
|
||||||
|
# - exhaustruct
|
||||||
|
# #- thelper
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
|
6
vendor/github.com/go-openapi/strfmt/date.go
generated
vendored
6
vendor/github.com/go-openapi/strfmt/date.go
generated
vendored
@ -57,7 +57,7 @@ func (d *Date) UnmarshalText(text []byte) error {
|
|||||||
if len(text) == 0 {
|
if len(text) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
dd, err := time.Parse(RFC3339FullDate, string(text))
|
dd, err := time.ParseInLocation(RFC3339FullDate, string(text), DefaultTimeLocation)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -107,7 +107,7 @@ func (d *Date) UnmarshalJSON(data []byte) error {
|
|||||||
if err := json.Unmarshal(data, &strdate); err != nil {
|
if err := json.Unmarshal(data, &strdate); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tt, err := time.Parse(RFC3339FullDate, strdate)
|
tt, err := time.ParseInLocation(RFC3339FullDate, strdate, DefaultTimeLocation)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -126,7 +126,7 @@ func (d *Date) UnmarshalBSON(data []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if data, ok := m["data"].(string); ok {
|
if data, ok := m["data"].(string); ok {
|
||||||
rd, err := time.Parse(RFC3339FullDate, data)
|
rd, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/go-openapi/strfmt/format.go
generated
vendored
2
vendor/github.com/go-openapi/strfmt/format.go
generated
vendored
@ -109,7 +109,7 @@ func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { //
|
|||||||
if to == tpe {
|
if to == tpe {
|
||||||
switch v.Name {
|
switch v.Name {
|
||||||
case "date":
|
case "date":
|
||||||
d, err := time.Parse(RFC3339FullDate, data)
|
d, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
21
vendor/github.com/go-openapi/strfmt/time.go
generated
vendored
21
vendor/github.com/go-openapi/strfmt/time.go
generated
vendored
@ -29,6 +29,12 @@ import (
|
|||||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// UnixZero sets the zero unix timestamp we want to compare against.
|
||||||
|
// Unix 0 for an EST timezone is not equivalent to a UTC timezone.
|
||||||
|
UnixZero = time.Unix(0, 0).UTC()
|
||||||
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
dt := DateTime{}
|
dt := DateTime{}
|
||||||
Default.Add("datetime", &dt, IsDateTime)
|
Default.Add("datetime", &dt, IsDateTime)
|
||||||
@ -86,6 +92,9 @@ var (
|
|||||||
// NormalizeTimeForMarshal provides a normalization function on time befeore marshalling (e.g. time.UTC).
|
// NormalizeTimeForMarshal provides a normalization function on time befeore marshalling (e.g. time.UTC).
|
||||||
// By default, the time value is not changed.
|
// By default, the time value is not changed.
|
||||||
NormalizeTimeForMarshal = func(t time.Time) time.Time { return t }
|
NormalizeTimeForMarshal = func(t time.Time) time.Time { return t }
|
||||||
|
|
||||||
|
// DefaultTimeLocation provides a location for a time when the time zone is not encoded in the string (ex: ISO8601 Local variants).
|
||||||
|
DefaultTimeLocation = time.UTC
|
||||||
)
|
)
|
||||||
|
|
||||||
// ParseDateTime parses a string that represents an ISO8601 time or a unix epoch
|
// ParseDateTime parses a string that represents an ISO8601 time or a unix epoch
|
||||||
@ -95,7 +104,7 @@ func ParseDateTime(data string) (DateTime, error) {
|
|||||||
}
|
}
|
||||||
var lastError error
|
var lastError error
|
||||||
for _, layout := range DateTimeFormats {
|
for _, layout := range DateTimeFormats {
|
||||||
dd, err := time.Parse(layout, data)
|
dd, err := time.ParseInLocation(layout, data, DefaultTimeLocation)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lastError = err
|
lastError = err
|
||||||
continue
|
continue
|
||||||
@ -123,6 +132,16 @@ func (t DateTime) String() string {
|
|||||||
return NormalizeTimeForMarshal(time.Time(t)).Format(MarshalFormat)
|
return NormalizeTimeForMarshal(time.Time(t)).Format(MarshalFormat)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsZero returns whether the date time is a zero value
|
||||||
|
func (t DateTime) IsZero() bool {
|
||||||
|
return time.Time(t).IsZero()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsUnixZerom returns whether the date time is equivalent to time.Unix(0, 0).UTC().
|
||||||
|
func (t DateTime) IsUnixZero() bool {
|
||||||
|
return time.Time(t) == UnixZero
|
||||||
|
}
|
||||||
|
|
||||||
// MarshalText implements the text marshaller interface
|
// MarshalText implements the text marshaller interface
|
||||||
func (t DateTime) MarshalText() ([]byte, error) {
|
func (t DateTime) MarshalText() ([]byte, error) {
|
||||||
return []byte(t.String()), nil
|
return []byte(t.String()), nil
|
||||||
|
11
vendor/github.com/go-openapi/strfmt/ulid.go
generated
vendored
11
vendor/github.com/go-openapi/strfmt/ulid.go
generated
vendored
@ -15,9 +15,12 @@ import (
|
|||||||
|
|
||||||
// ULID represents a ulid string format
|
// ULID represents a ulid string format
|
||||||
// ref:
|
// ref:
|
||||||
// https://github.com/ulid/spec
|
//
|
||||||
|
// https://github.com/ulid/spec
|
||||||
|
//
|
||||||
// impl:
|
// impl:
|
||||||
// https://github.com/oklog/ulid
|
//
|
||||||
|
// https://github.com/oklog/ulid
|
||||||
//
|
//
|
||||||
// swagger:strfmt ulid
|
// swagger:strfmt ulid
|
||||||
type ULID struct {
|
type ULID struct {
|
||||||
@ -89,7 +92,9 @@ func NewULIDZero() ULID {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewULID generates new unique ULID value and a error if any
|
// NewULID generates new unique ULID value and a error if any
|
||||||
func NewULID() (u ULID, err error) {
|
func NewULID() (ULID, error) {
|
||||||
|
var u ULID
|
||||||
|
|
||||||
obj := ulidEntropyPool.Get()
|
obj := ulidEntropyPool.Get()
|
||||||
entropy, ok := obj.(io.Reader)
|
entropy, ok := obj.(io.Reader)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
112
vendor/github.com/imdario/mergo/CONTRIBUTING.md
generated
vendored
Normal file
112
vendor/github.com/imdario/mergo/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
<!-- omit in toc -->
|
||||||
|
# Contributing to mergo
|
||||||
|
|
||||||
|
First off, thanks for taking the time to contribute! ❤️
|
||||||
|
|
||||||
|
All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉
|
||||||
|
|
||||||
|
> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about:
|
||||||
|
> - Star the project
|
||||||
|
> - Tweet about it
|
||||||
|
> - Refer this project in your project's readme
|
||||||
|
> - Mention the project at local meetups and tell your friends/colleagues
|
||||||
|
|
||||||
|
<!-- omit in toc -->
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
- [Code of Conduct](#code-of-conduct)
|
||||||
|
- [I Have a Question](#i-have-a-question)
|
||||||
|
- [I Want To Contribute](#i-want-to-contribute)
|
||||||
|
- [Reporting Bugs](#reporting-bugs)
|
||||||
|
- [Suggesting Enhancements](#suggesting-enhancements)
|
||||||
|
|
||||||
|
## Code of Conduct
|
||||||
|
|
||||||
|
This project and everyone participating in it is governed by the
|
||||||
|
[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md).
|
||||||
|
By participating, you are expected to uphold this code. Please report unacceptable behavior
|
||||||
|
to <>.
|
||||||
|
|
||||||
|
|
||||||
|
## I Have a Question
|
||||||
|
|
||||||
|
> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo).
|
||||||
|
|
||||||
|
Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first.
|
||||||
|
|
||||||
|
If you then still feel the need to ask a question and need clarification, we recommend the following:
|
||||||
|
|
||||||
|
- Open an [Issue](https://github.com/imdario/mergo/issues/new).
|
||||||
|
- Provide as much context as you can about what you're running into.
|
||||||
|
- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant.
|
||||||
|
|
||||||
|
We will then take care of the issue as soon as possible.
|
||||||
|
|
||||||
|
## I Want To Contribute
|
||||||
|
|
||||||
|
> ### Legal Notice <!-- omit in toc -->
|
||||||
|
> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license.
|
||||||
|
|
||||||
|
### Reporting Bugs
|
||||||
|
|
||||||
|
<!-- omit in toc -->
|
||||||
|
#### Before Submitting a Bug Report
|
||||||
|
|
||||||
|
A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible.
|
||||||
|
|
||||||
|
- Make sure that you are using the latest version.
|
||||||
|
- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)).
|
||||||
|
- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug).
|
||||||
|
- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue.
|
||||||
|
- Collect information about the bug:
|
||||||
|
- Stack trace (Traceback)
|
||||||
|
- OS, Platform and Version (Windows, Linux, macOS, x86, ARM)
|
||||||
|
- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant.
|
||||||
|
- Possibly your input and the output
|
||||||
|
- Can you reliably reproduce the issue? And can you also reproduce it with older versions?
|
||||||
|
|
||||||
|
<!-- omit in toc -->
|
||||||
|
#### How Do I Submit a Good Bug Report?
|
||||||
|
|
||||||
|
> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to .
|
||||||
|
<!-- You may add a PGP key to allow the messages to be sent encrypted as well. -->
|
||||||
|
|
||||||
|
We use GitHub issues to track bugs and errors. If you run into an issue with the project:
|
||||||
|
|
||||||
|
- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.)
|
||||||
|
- Explain the behavior you would expect and the actual behavior.
|
||||||
|
- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case.
|
||||||
|
- Provide the information you collected in the previous section.
|
||||||
|
|
||||||
|
Once it's filed:
|
||||||
|
|
||||||
|
- The project team will label the issue accordingly.
|
||||||
|
- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced.
|
||||||
|
- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone.
|
||||||
|
|
||||||
|
### Suggesting Enhancements
|
||||||
|
|
||||||
|
This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions.
|
||||||
|
|
||||||
|
<!-- omit in toc -->
|
||||||
|
#### Before Submitting an Enhancement
|
||||||
|
|
||||||
|
- Make sure that you are using the latest version.
|
||||||
|
- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration.
|
||||||
|
- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one.
|
||||||
|
- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library.
|
||||||
|
|
||||||
|
<!-- omit in toc -->
|
||||||
|
#### How Do I Submit a Good Enhancement Suggestion?
|
||||||
|
|
||||||
|
Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues).
|
||||||
|
|
||||||
|
- Use a **clear and descriptive title** for the issue to identify the suggestion.
|
||||||
|
- Provide a **step-by-step description of the suggested enhancement** in as many details as possible.
|
||||||
|
- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you.
|
||||||
|
- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. <!-- this should only be included if the project has a GUI -->
|
||||||
|
- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration.
|
||||||
|
|
||||||
|
<!-- omit in toc -->
|
||||||
|
## Attribution
|
||||||
|
This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)!
|
5
vendor/github.com/imdario/mergo/README.md
generated
vendored
5
vendor/github.com/imdario/mergo/README.md
generated
vendored
@ -1,6 +1,5 @@
|
|||||||
# Mergo
|
# Mergo
|
||||||
|
|
||||||
|
|
||||||
[![GoDoc][3]][4]
|
[![GoDoc][3]][4]
|
||||||
[![GitHub release][5]][6]
|
[![GitHub release][5]][6]
|
||||||
[![GoCard][7]][8]
|
[![GoCard][7]][8]
|
||||||
@ -9,6 +8,7 @@
|
|||||||
[![Sourcegraph][11]][12]
|
[![Sourcegraph][11]][12]
|
||||||
[![FOSSA Status][13]][14]
|
[![FOSSA Status][13]][14]
|
||||||
[![Become my sponsor][15]][16]
|
[![Become my sponsor][15]][16]
|
||||||
|
[![Tidelift][17]][18]
|
||||||
|
|
||||||
[1]: https://travis-ci.org/imdario/mergo.png
|
[1]: https://travis-ci.org/imdario/mergo.png
|
||||||
[2]: https://travis-ci.org/imdario/mergo
|
[2]: https://travis-ci.org/imdario/mergo
|
||||||
@ -26,6 +26,8 @@
|
|||||||
[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
|
[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
|
||||||
[15]: https://img.shields.io/github/sponsors/imdario
|
[15]: https://img.shields.io/github/sponsors/imdario
|
||||||
[16]: https://github.com/sponsors/imdario
|
[16]: https://github.com/sponsors/imdario
|
||||||
|
[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo
|
||||||
|
[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo
|
||||||
|
|
||||||
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
|
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
|
||||||
|
|
||||||
@ -55,7 +57,6 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont
|
|||||||
|
|
||||||
### Mergo in the wild
|
### Mergo in the wild
|
||||||
|
|
||||||
- [cli/cli](https://github.com/cli/cli)
|
|
||||||
- [moby/moby](https://github.com/moby/moby)
|
- [moby/moby](https://github.com/moby/moby)
|
||||||
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
|
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
|
||||||
- [vmware/dispatch](https://github.com/vmware/dispatch)
|
- [vmware/dispatch](https://github.com/vmware/dispatch)
|
||||||
|
14
vendor/github.com/imdario/mergo/SECURITY.md
generated
vendored
Normal file
14
vendor/github.com/imdario/mergo/SECURITY.md
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
# Security Policy
|
||||||
|
|
||||||
|
## Supported Versions
|
||||||
|
|
||||||
|
| Version | Supported |
|
||||||
|
| ------- | ------------------ |
|
||||||
|
| 0.3.x | :white_check_mark: |
|
||||||
|
| < 0.3 | :x: |
|
||||||
|
|
||||||
|
## Security contact information
|
||||||
|
|
||||||
|
To report a security vulnerability, please use the
|
||||||
|
[Tidelift security contact](https://tidelift.com/security).
|
||||||
|
Tidelift will coordinate the fix and disclosure.
|
6
vendor/github.com/imdario/mergo/map.go
generated
vendored
6
vendor/github.com/imdario/mergo/map.go
generated
vendored
@ -44,7 +44,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Remember, remember...
|
// Remember, remember...
|
||||||
visited[h] = &visit{addr, typ, seen}
|
visited[h] = &visit{typ, seen, addr}
|
||||||
}
|
}
|
||||||
zeroValue := reflect.Value{}
|
zeroValue := reflect.Value{}
|
||||||
switch dst.Kind() {
|
switch dst.Kind() {
|
||||||
@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
|
|||||||
}
|
}
|
||||||
fieldName := field.Name
|
fieldName := field.Name
|
||||||
fieldName = changeInitialCase(fieldName, unicode.ToLower)
|
fieldName = changeInitialCase(fieldName, unicode.ToLower)
|
||||||
if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
|
if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) {
|
||||||
dstMap[fieldName] = src.Field(i).Interface()
|
dstMap[fieldName] = src.Field(i).Interface()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -142,7 +142,7 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
|
|||||||
|
|
||||||
func _map(dst, src interface{}, opts ...func(*Config)) error {
|
func _map(dst, src interface{}, opts ...func(*Config)) error {
|
||||||
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
|
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
|
||||||
return ErrNonPointerAgument
|
return ErrNonPointerArgument
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
vDst, vSrc reflect.Value
|
vDst, vSrc reflect.Value
|
||||||
|
59
vendor/github.com/imdario/mergo/merge.go
generated
vendored
59
vendor/github.com/imdario/mergo/merge.go
generated
vendored
@ -38,10 +38,11 @@ func isExportedComponent(field *reflect.StructField) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
|
Transformers Transformers
|
||||||
Overwrite bool
|
Overwrite bool
|
||||||
|
ShouldNotDereference bool
|
||||||
AppendSlice bool
|
AppendSlice bool
|
||||||
TypeCheck bool
|
TypeCheck bool
|
||||||
Transformers Transformers
|
|
||||||
overwriteWithEmptyValue bool
|
overwriteWithEmptyValue bool
|
||||||
overwriteSliceWithEmptyValue bool
|
overwriteSliceWithEmptyValue bool
|
||||||
sliceDeepCopy bool
|
sliceDeepCopy bool
|
||||||
@ -76,7 +77,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Remember, remember...
|
// Remember, remember...
|
||||||
visited[h] = &visit{addr, typ, seen}
|
visited[h] = &visit{typ, seen, addr}
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
|
if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
|
||||||
@ -95,7 +96,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) {
|
if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) {
|
||||||
dst.Set(src)
|
dst.Set(src)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -110,7 +111,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
|||||||
}
|
}
|
||||||
|
|
||||||
if src.Kind() != reflect.Map {
|
if src.Kind() != reflect.Map {
|
||||||
if overwrite {
|
if overwrite && dst.CanSet() {
|
||||||
dst.Set(src)
|
dst.Set(src)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@ -162,7 +163,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
|||||||
dstSlice = reflect.ValueOf(dstElement.Interface())
|
dstSlice = reflect.ValueOf(dstElement.Interface())
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
|
if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
|
||||||
if typeCheck && srcSlice.Type() != dstSlice.Type() {
|
if typeCheck && srcSlice.Type() != dstSlice.Type() {
|
||||||
return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
|
return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
|
||||||
}
|
}
|
||||||
@ -194,22 +195,38 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
|||||||
dst.SetMapIndex(key, dstSlice)
|
dst.SetMapIndex(key, dstSlice)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) {
|
|
||||||
continue
|
if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) {
|
||||||
|
if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map {
|
||||||
|
continue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) {
|
if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) {
|
||||||
if dst.IsNil() {
|
if dst.IsNil() {
|
||||||
dst.Set(reflect.MakeMap(dst.Type()))
|
dst.Set(reflect.MakeMap(dst.Type()))
|
||||||
}
|
}
|
||||||
dst.SetMapIndex(key, srcElement)
|
dst.SetMapIndex(key, srcElement)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure that all keys in dst are deleted if they are not in src.
|
||||||
|
if overwriteWithEmptySrc {
|
||||||
|
for _, key := range dst.MapKeys() {
|
||||||
|
srcElement := src.MapIndex(key)
|
||||||
|
if !srcElement.IsValid() {
|
||||||
|
dst.SetMapIndex(key, reflect.Value{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
if !dst.CanSet() {
|
if !dst.CanSet() {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
|
if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
|
||||||
dst.Set(src)
|
dst.Set(src)
|
||||||
} else if config.AppendSlice {
|
} else if config.AppendSlice {
|
||||||
if src.Type() != dst.Type() {
|
if src.Type() != dst.Type() {
|
||||||
@ -244,12 +261,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
|||||||
|
|
||||||
if src.Kind() != reflect.Interface {
|
if src.Kind() != reflect.Interface {
|
||||||
if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
|
if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
|
||||||
if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
|
if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
|
||||||
dst.Set(src)
|
dst.Set(src)
|
||||||
}
|
}
|
||||||
} else if src.Kind() == reflect.Ptr {
|
} else if src.Kind() == reflect.Ptr {
|
||||||
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
|
if !config.ShouldNotDereference {
|
||||||
return
|
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() {
|
||||||
|
dst.Set(src)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if dst.Elem().Type() == src.Type() {
|
} else if dst.Elem().Type() == src.Type() {
|
||||||
if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
|
if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
|
||||||
@ -262,7 +285,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
|||||||
}
|
}
|
||||||
|
|
||||||
if dst.IsNil() || overwrite {
|
if dst.IsNil() || overwrite {
|
||||||
if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
|
if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
|
||||||
dst.Set(src)
|
dst.Set(src)
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
@ -275,7 +298,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc)
|
mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc)
|
||||||
if mustSet {
|
if mustSet {
|
||||||
if dst.CanSet() {
|
if dst.CanSet() {
|
||||||
dst.Set(src)
|
dst.Set(src)
|
||||||
@ -326,6 +349,12 @@ func WithOverrideEmptySlice(config *Config) {
|
|||||||
config.overwriteSliceWithEmptyValue = true
|
config.overwriteSliceWithEmptyValue = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty
|
||||||
|
// (i.e. a non-nil pointer is never considered empty).
|
||||||
|
func WithoutDereference(config *Config) {
|
||||||
|
config.ShouldNotDereference = true
|
||||||
|
}
|
||||||
|
|
||||||
// WithAppendSlice will make merge append slices instead of overwriting it.
|
// WithAppendSlice will make merge append slices instead of overwriting it.
|
||||||
func WithAppendSlice(config *Config) {
|
func WithAppendSlice(config *Config) {
|
||||||
config.AppendSlice = true
|
config.AppendSlice = true
|
||||||
@ -344,7 +373,7 @@ func WithSliceDeepCopy(config *Config) {
|
|||||||
|
|
||||||
func merge(dst, src interface{}, opts ...func(*Config)) error {
|
func merge(dst, src interface{}, opts ...func(*Config)) error {
|
||||||
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
|
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
|
||||||
return ErrNonPointerAgument
|
return ErrNonPointerArgument
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
vDst, vSrc reflect.Value
|
vDst, vSrc reflect.Value
|
||||||
|
11
vendor/github.com/imdario/mergo/mergo.go
generated
vendored
11
vendor/github.com/imdario/mergo/mergo.go
generated
vendored
@ -20,7 +20,7 @@ var (
|
|||||||
ErrNotSupported = errors.New("only structs, maps, and slices are supported")
|
ErrNotSupported = errors.New("only structs, maps, and slices are supported")
|
||||||
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
|
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
|
||||||
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
|
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
|
||||||
ErrNonPointerAgument = errors.New("dst must be a pointer")
|
ErrNonPointerArgument = errors.New("dst must be a pointer")
|
||||||
)
|
)
|
||||||
|
|
||||||
// During deepMerge, must keep track of checks that are
|
// During deepMerge, must keep track of checks that are
|
||||||
@ -28,13 +28,13 @@ var (
|
|||||||
// checks in progress are true when it reencounters them.
|
// checks in progress are true when it reencounters them.
|
||||||
// Visited are stored in a map indexed by 17 * a1 + a2;
|
// Visited are stored in a map indexed by 17 * a1 + a2;
|
||||||
type visit struct {
|
type visit struct {
|
||||||
ptr uintptr
|
|
||||||
typ reflect.Type
|
typ reflect.Type
|
||||||
next *visit
|
next *visit
|
||||||
|
ptr uintptr
|
||||||
}
|
}
|
||||||
|
|
||||||
// From src/pkg/encoding/json/encode.go.
|
// From src/pkg/encoding/json/encode.go.
|
||||||
func isEmptyValue(v reflect.Value) bool {
|
func isEmptyValue(v reflect.Value, shouldDereference bool) bool {
|
||||||
switch v.Kind() {
|
switch v.Kind() {
|
||||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||||
return v.Len() == 0
|
return v.Len() == 0
|
||||||
@ -50,7 +50,10 @@ func isEmptyValue(v reflect.Value) bool {
|
|||||||
if v.IsNil() {
|
if v.IsNil() {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return isEmptyValue(v.Elem())
|
if shouldDereference {
|
||||||
|
return isEmptyValue(v.Elem(), shouldDereference)
|
||||||
|
}
|
||||||
|
return false
|
||||||
case reflect.Func:
|
case reflect.Func:
|
||||||
return v.IsNil()
|
return v.IsNil()
|
||||||
case reflect.Invalid:
|
case reflect.Invalid:
|
||||||
|
42
vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
generated
vendored
42
vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
generated
vendored
@ -251,7 +251,7 @@ func createContainer(rw ReadWriter, co createOpts) (*FileImage, error) {
|
|||||||
// By default, the image ID is set to a randomly generated value. To override this, consider using
|
// By default, the image ID is set to a randomly generated value. To override this, consider using
|
||||||
// OptCreateDeterministic or OptCreateWithID.
|
// OptCreateDeterministic or OptCreateWithID.
|
||||||
//
|
//
|
||||||
// By default, the image creation time is set to time.Now(). To override this, consider using
|
// By default, the image creation time is set to the current time. To override this, consider using
|
||||||
// OptCreateDeterministic or OptCreateWithTime.
|
// OptCreateDeterministic or OptCreateWithTime.
|
||||||
//
|
//
|
||||||
// By default, the image will support a maximum of 48 descriptors. To change this, consider using
|
// By default, the image will support a maximum of 48 descriptors. To change this, consider using
|
||||||
@ -296,7 +296,7 @@ func CreateContainer(rw ReadWriter, opts ...CreateOpt) (*FileImage, error) {
|
|||||||
// By default, the image ID is set to a randomly generated value. To override this, consider using
|
// By default, the image ID is set to a randomly generated value. To override this, consider using
|
||||||
// OptCreateDeterministic or OptCreateWithID.
|
// OptCreateDeterministic or OptCreateWithID.
|
||||||
//
|
//
|
||||||
// By default, the image creation time is set to time.Now(). To override this, consider using
|
// By default, the image creation time is set to the current time. To override this, consider using
|
||||||
// OptCreateDeterministic or OptCreateWithTime.
|
// OptCreateDeterministic or OptCreateWithTime.
|
||||||
//
|
//
|
||||||
// By default, the image will support a maximum of 48 descriptors. To change this, consider using
|
// By default, the image will support a maximum of 48 descriptors. To change this, consider using
|
||||||
@ -393,11 +393,13 @@ func OptAddWithTime(t time.Time) AddOpt {
|
|||||||
|
|
||||||
// AddObject adds a new data object and its descriptor into the specified SIF file.
|
// AddObject adds a new data object and its descriptor into the specified SIF file.
|
||||||
//
|
//
|
||||||
// By default, the image modification time is set to the current time. To override this, consider
|
// By default, the image modification time is set to the current time for non-deterministic images,
|
||||||
// using OptAddDeterministic or OptAddWithTime.
|
// and unset otherwise. To override this, consider using OptAddDeterministic or OptAddWithTime.
|
||||||
func (f *FileImage) AddObject(di DescriptorInput, opts ...AddOpt) error {
|
func (f *FileImage) AddObject(di DescriptorInput, opts ...AddOpt) error {
|
||||||
ao := addOpts{
|
ao := addOpts{}
|
||||||
t: time.Now(),
|
|
||||||
|
if !f.isDeterministic() {
|
||||||
|
ao.t = time.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
@ -449,11 +451,7 @@ func (f *FileImage) isLast(d *rawDescriptor) bool {
|
|||||||
func (f *FileImage) truncateAt(d *rawDescriptor) error {
|
func (f *FileImage) truncateAt(d *rawDescriptor) error {
|
||||||
start := d.Offset + d.Size - d.SizeWithPadding
|
start := d.Offset + d.Size - d.SizeWithPadding
|
||||||
|
|
||||||
if err := f.rw.Truncate(start); err != nil {
|
return f.rw.Truncate(start)
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// deleteOpts accumulates object deletion options.
|
// deleteOpts accumulates object deletion options.
|
||||||
@ -506,11 +504,14 @@ var errCompactNotImplemented = errors.New("compact not implemented for non-last
|
|||||||
// To zero the data region of the deleted object, use OptDeleteZero. To compact the file following
|
// To zero the data region of the deleted object, use OptDeleteZero. To compact the file following
|
||||||
// object deletion, use OptDeleteCompact.
|
// object deletion, use OptDeleteCompact.
|
||||||
//
|
//
|
||||||
// By default, the image modification time is set to time.Now(). To override this, consider using
|
// By default, the image modification time is set to the current time for non-deterministic images,
|
||||||
// OptDeleteDeterministic or OptDeleteWithTime.
|
// and unset otherwise. To override this, consider using OptDeleteDeterministic or
|
||||||
|
// OptDeleteWithTime.
|
||||||
func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error {
|
func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error {
|
||||||
do := deleteOpts{
|
do := deleteOpts{}
|
||||||
t: time.Now(),
|
|
||||||
|
if !f.isDeterministic() {
|
||||||
|
do.t = time.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
@ -596,11 +597,14 @@ var (
|
|||||||
|
|
||||||
// SetPrimPart sets the specified system partition to be the primary one.
|
// SetPrimPart sets the specified system partition to be the primary one.
|
||||||
//
|
//
|
||||||
// By default, the image/object modification times are set to time.Now(). To override this,
|
// By default, the image/object modification times are set to the current time for
|
||||||
// consider using OptSetDeterministic or OptSetWithTime.
|
// non-deterministic images, and unset otherwise. To override this, consider using
|
||||||
|
// OptSetDeterministic or OptSetWithTime.
|
||||||
func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error {
|
func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error {
|
||||||
so := setOpts{
|
so := setOpts{}
|
||||||
t: time.Now(),
|
|
||||||
|
if !f.isDeterministic() {
|
||||||
|
so.t = time.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
|
8
vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go
generated
vendored
8
vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// Copyright (c) 2018-2022, Sylabs Inc. All rights reserved.
|
// Copyright (c) 2018-2023, Sylabs Inc. All rights reserved.
|
||||||
// Copyright (c) 2017, SingularityWare, LLC. All rights reserved.
|
// Copyright (c) 2017, SingularityWare, LLC. All rights reserved.
|
||||||
// Copyright (c) 2017, Yannick Cote <yhcote@gmail.com> All rights reserved.
|
// Copyright (c) 2017, Yannick Cote <yhcote@gmail.com> All rights reserved.
|
||||||
// This software is licensed under a 3-clause BSD license. Please consult the
|
// This software is licensed under a 3-clause BSD license. Please consult the
|
||||||
@ -402,3 +402,9 @@ func (f *FileImage) DataSize() int64 { return f.h.DataSize }
|
|||||||
func (f *FileImage) GetHeaderIntegrityReader() io.Reader {
|
func (f *FileImage) GetHeaderIntegrityReader() io.Reader {
|
||||||
return f.h.GetIntegrityReader()
|
return f.h.GetIntegrityReader()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isDeterministic returns true if the UUID and timestamps in the header of f are set to
|
||||||
|
// deterministic values.
|
||||||
|
func (f *FileImage) isDeterministic() bool {
|
||||||
|
return f.h.ID == uuid.Nil && f.CreatedAt().IsZero() && f.ModifiedAt().IsZero()
|
||||||
|
}
|
||||||
|
12
vendor/modules.txt
vendored
12
vendor/modules.txt
vendored
@ -123,7 +123,7 @@ github.com/containers/buildah/pkg/rusage
|
|||||||
github.com/containers/buildah/pkg/sshagent
|
github.com/containers/buildah/pkg/sshagent
|
||||||
github.com/containers/buildah/pkg/util
|
github.com/containers/buildah/pkg/util
|
||||||
github.com/containers/buildah/util
|
github.com/containers/buildah/util
|
||||||
# github.com/containers/common v0.51.1-0.20230323135459-03a2cc01973c
|
# github.com/containers/common v0.51.1-0.20230329113838-e27c30ee9b1b
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
github.com/containers/common/libimage
|
github.com/containers/common/libimage
|
||||||
github.com/containers/common/libimage/define
|
github.com/containers/common/libimage/define
|
||||||
@ -177,7 +177,7 @@ github.com/containers/common/version
|
|||||||
# github.com/containers/conmon v2.0.20+incompatible
|
# github.com/containers/conmon v2.0.20+incompatible
|
||||||
## explicit
|
## explicit
|
||||||
github.com/containers/conmon/runner/config
|
github.com/containers/conmon/runner/config
|
||||||
# github.com/containers/image/v5 v5.24.3-0.20230314083015-0c6d07e02a9a
|
# github.com/containers/image/v5 v5.24.3-0.20230324204529-08b04b816eb8
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
github.com/containers/image/v5/copy
|
github.com/containers/image/v5/copy
|
||||||
github.com/containers/image/v5/directory
|
github.com/containers/image/v5/directory
|
||||||
@ -284,7 +284,7 @@ github.com/containers/psgo/internal/dev
|
|||||||
github.com/containers/psgo/internal/host
|
github.com/containers/psgo/internal/host
|
||||||
github.com/containers/psgo/internal/proc
|
github.com/containers/psgo/internal/proc
|
||||||
github.com/containers/psgo/internal/process
|
github.com/containers/psgo/internal/process
|
||||||
# github.com/containers/storage v1.45.5-0.20230315220505-1c6287eea927
|
# github.com/containers/storage v1.45.5-0.20230326103843-b1216421c44b
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/containers/storage
|
github.com/containers/storage
|
||||||
github.com/containers/storage/drivers
|
github.com/containers/storage/drivers
|
||||||
@ -486,7 +486,7 @@ github.com/go-openapi/runtime/yamlpc
|
|||||||
# github.com/go-openapi/spec v0.20.7
|
# github.com/go-openapi/spec v0.20.7
|
||||||
## explicit; go 1.13
|
## explicit; go 1.13
|
||||||
github.com/go-openapi/spec
|
github.com/go-openapi/spec
|
||||||
# github.com/go-openapi/strfmt v0.21.3
|
# github.com/go-openapi/strfmt v0.21.5
|
||||||
## explicit; go 1.13
|
## explicit; go 1.13
|
||||||
github.com/go-openapi/strfmt
|
github.com/go-openapi/strfmt
|
||||||
# github.com/go-openapi/swag v0.22.3
|
# github.com/go-openapi/swag v0.22.3
|
||||||
@ -573,7 +573,7 @@ github.com/hashicorp/go-multierror
|
|||||||
# github.com/hashicorp/go-retryablehttp v0.7.2
|
# github.com/hashicorp/go-retryablehttp v0.7.2
|
||||||
## explicit; go 1.13
|
## explicit; go 1.13
|
||||||
github.com/hashicorp/go-retryablehttp
|
github.com/hashicorp/go-retryablehttp
|
||||||
# github.com/imdario/mergo v0.3.13
|
# github.com/imdario/mergo v0.3.15
|
||||||
## explicit; go 1.13
|
## explicit; go 1.13
|
||||||
github.com/imdario/mergo
|
github.com/imdario/mergo
|
||||||
# github.com/inconshreveable/mousetrap v1.0.1
|
# github.com/inconshreveable/mousetrap v1.0.1
|
||||||
@ -841,7 +841,7 @@ github.com/stefanberger/go-pkcs11uri
|
|||||||
## explicit; go 1.13
|
## explicit; go 1.13
|
||||||
github.com/stretchr/testify/assert
|
github.com/stretchr/testify/assert
|
||||||
github.com/stretchr/testify/require
|
github.com/stretchr/testify/require
|
||||||
# github.com/sylabs/sif/v2 v2.11.0
|
# github.com/sylabs/sif/v2 v2.11.1
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
github.com/sylabs/sif/v2/pkg/sif
|
github.com/sylabs/sif/v2/pkg/sif
|
||||||
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||||
|
Reference in New Issue
Block a user