mirror of
https://github.com/containers/podman.git
synced 2025-06-30 07:26:39 +08:00
Merge pull request #16578 from rhatdan/VENDOR
Update vendor of containers/(buildah, common, storage, image)
This commit is contained in:
14
go.mod
14
go.mod
@ -11,13 +11,13 @@ require (
|
|||||||
github.com/container-orchestrated-devices/container-device-interface v0.5.3
|
github.com/container-orchestrated-devices/container-device-interface v0.5.3
|
||||||
github.com/containernetworking/cni v1.1.2
|
github.com/containernetworking/cni v1.1.2
|
||||||
github.com/containernetworking/plugins v1.1.1
|
github.com/containernetworking/plugins v1.1.1
|
||||||
github.com/containers/buildah v1.28.1-0.20221029151733-c2cf9fa47ab6
|
github.com/containers/buildah v1.28.1-0.20221122135051-c9f30d81ae37
|
||||||
github.com/containers/common v0.50.2-0.20221111184705-791b83e1cdf1
|
github.com/containers/common v0.50.2-0.20221121202831-385be9a25125
|
||||||
github.com/containers/conmon v2.0.20+incompatible
|
github.com/containers/conmon v2.0.20+incompatible
|
||||||
github.com/containers/image/v5 v5.23.1-0.20221109193300-0d85878d7a77
|
github.com/containers/image/v5 v5.23.1-0.20221121174826-d8eb9dd60533
|
||||||
github.com/containers/ocicrypt v1.1.6
|
github.com/containers/ocicrypt v1.1.6
|
||||||
github.com/containers/psgo v1.8.0
|
github.com/containers/psgo v1.8.0
|
||||||
github.com/containers/storage v1.44.1-0.20221110192950-67e9778710f8
|
github.com/containers/storage v1.44.1-0.20221121144727-71fd3e87df7a
|
||||||
github.com/coreos/go-systemd/v22 v22.5.0
|
github.com/coreos/go-systemd/v22 v22.5.0
|
||||||
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
|
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
|
||||||
github.com/cyphar/filepath-securejoin v0.2.3
|
github.com/cyphar/filepath-securejoin v0.2.3
|
||||||
@ -78,8 +78,8 @@ require (
|
|||||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||||
github.com/chzyer/readline v1.5.1 // indirect
|
github.com/chzyer/readline v1.5.1 // indirect
|
||||||
github.com/containerd/cgroups v1.0.4 // indirect
|
github.com/containerd/cgroups v1.0.4 // indirect
|
||||||
github.com/containerd/containerd v1.6.9 // indirect
|
github.com/containerd/containerd v1.6.10 // indirect
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.12.1 // indirect
|
github.com/containerd/stargz-snapshotter/estargz v0.13.0 // indirect
|
||||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
|
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
|
||||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
|
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
@ -133,7 +133,7 @@ require (
|
|||||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||||
go.opencensus.io v0.23.0 // indirect
|
go.opencensus.io v0.23.0 // indirect
|
||||||
golang.org/x/crypto v0.2.0 // indirect
|
golang.org/x/crypto v0.3.0 // indirect
|
||||||
golang.org/x/mod v0.6.0 // indirect
|
golang.org/x/mod v0.6.0 // indirect
|
||||||
golang.org/x/tools v0.2.0 // indirect
|
golang.org/x/tools v0.2.0 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a // indirect
|
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a // indirect
|
||||||
|
31
go.sum
31
go.sum
@ -203,8 +203,8 @@ github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoT
|
|||||||
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
|
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
|
||||||
github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
|
github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
|
||||||
github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ=
|
github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ=
|
||||||
github.com/containerd/containerd v1.6.9 h1:IN/r8DUes/B5lEGTNfIiUkfZBtIQJGx2ai703dV6lRA=
|
github.com/containerd/containerd v1.6.10 h1:8aiav7I2ZyQLbTlNMcBXyAU1FtFvp6VuyuW13qSd6Hk=
|
||||||
github.com/containerd/containerd v1.6.9/go.mod h1:XVicUvkxOrftE2Q1YWUXgZwkkAxwQYNOFzYWvfVfEfQ=
|
github.com/containerd/containerd v1.6.10/go.mod h1:CVqfxdJ95PDgORwA219AwwLrREZgrTFybXu2HfMKRG0=
|
||||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||||
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||||
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||||
@ -236,8 +236,8 @@ github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3
|
|||||||
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
|
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0=
|
github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0=
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.12.0/go.mod h1:AIQ59TewBFJ4GOPEQXujcrJ/EKxh5xXZegW1rkR1P/M=
|
github.com/containerd/stargz-snapshotter/estargz v0.12.0/go.mod h1:AIQ59TewBFJ4GOPEQXujcrJ/EKxh5xXZegW1rkR1P/M=
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.12.1 h1:+7nYmHJb0tEkcRaAW+MHqoKaJYZmkikupxCqVtmPuY0=
|
github.com/containerd/stargz-snapshotter/estargz v0.13.0 h1:fD7AwuVV+B40p0d9qVkH/Au1qhp8hn/HWJHIYjpEcfw=
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.12.1/go.mod h1:12VUuCq3qPq4y8yUW+l5w3+oXV3cx2Po3KSe/SmPGqw=
|
github.com/containerd/stargz-snapshotter/estargz v0.13.0/go.mod h1:m+9VaGJGlhCnrcEUod8mYumTmRgblwd3rC5UCEh2Yp0=
|
||||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||||
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||||
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
||||||
@ -262,14 +262,14 @@ github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHV
|
|||||||
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
|
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
|
||||||
github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE=
|
github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE=
|
||||||
github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
|
github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
|
||||||
github.com/containers/buildah v1.28.1-0.20221029151733-c2cf9fa47ab6 h1:6bFoF3QIUzza8NWAsHS1ZGDDEr+r5do46dXEbzkZb3Y=
|
github.com/containers/buildah v1.28.1-0.20221122135051-c9f30d81ae37 h1:XwZSJY+6fHAFp+6/TnG6IowKSBCR2BRn4iHgrMi4ks4=
|
||||||
github.com/containers/buildah v1.28.1-0.20221029151733-c2cf9fa47ab6/go.mod h1:skMuWv4FIebpsAFT7fBv2Ll0e0w2j71IUWCIrw9iTV0=
|
github.com/containers/buildah v1.28.1-0.20221122135051-c9f30d81ae37/go.mod h1:0HcSoS6BHXWzMKqtxY1L0gupebEX33oPC+X62lPi6+c=
|
||||||
github.com/containers/common v0.50.2-0.20221111184705-791b83e1cdf1 h1:AmN1j+GzK4+fmtOljYVbxAEJeXKkPs3ofB/uxJt4SCU=
|
github.com/containers/common v0.50.2-0.20221121202831-385be9a25125 h1:xNFc3vQA1QaqTKvjy0E4B7maflTTKMSzCgsScdlqETg=
|
||||||
github.com/containers/common v0.50.2-0.20221111184705-791b83e1cdf1/go.mod h1:VBycGm+y123zhrbvGu5GykZiYJbtSqm7kN2tXCu2INM=
|
github.com/containers/common v0.50.2-0.20221121202831-385be9a25125/go.mod h1:Oq+8c+9jzXe/57g9A95jXD4gWRc9T1TW0uC0WGm07sk=
|
||||||
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
||||||
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||||
github.com/containers/image/v5 v5.23.1-0.20221109193300-0d85878d7a77 h1:zLn8X9uD1jgjC7mTK/SwS1tmXTMLzfw1Lbc0Rn+6rFY=
|
github.com/containers/image/v5 v5.23.1-0.20221121174826-d8eb9dd60533 h1:VxrXA+okqhSOLOBtprMwbd1oJCUFTZowW3diaRmRGQw=
|
||||||
github.com/containers/image/v5 v5.23.1-0.20221109193300-0d85878d7a77/go.mod h1:T17ZmftW9GT2/gOv6b35kGqxB/caOnTn2k3dyh3VH34=
|
github.com/containers/image/v5 v5.23.1-0.20221121174826-d8eb9dd60533/go.mod h1:V6DfpgeUBS0W5KrbZXKpY/DmVJVSPabfBXYUfGDO3EI=
|
||||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU=
|
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU=
|
||||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||||
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
||||||
@ -281,8 +281,8 @@ github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY
|
|||||||
github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc=
|
github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc=
|
||||||
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
|
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
|
||||||
github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
|
github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
|
||||||
github.com/containers/storage v1.44.1-0.20221110192950-67e9778710f8 h1:MrQjgoKVQpD/16sfYe9C3T3y2gLvfBPADMFQ7Oq93zo=
|
github.com/containers/storage v1.44.1-0.20221121144727-71fd3e87df7a h1:Kds8yAenoKQ7d95T+2oOfnLJpxPAwG9grUf0lIA4JJs=
|
||||||
github.com/containers/storage v1.44.1-0.20221110192950-67e9778710f8/go.mod h1:HSfx7vUXwKPatPMqhgMw3mI3c3ijIJPZV5O0sj/mVxI=
|
github.com/containers/storage v1.44.1-0.20221121144727-71fd3e87df7a/go.mod h1:pYkSXaKIGAuEQmIf/melI5wbS/JBM++6Xp4JuVTqY7U=
|
||||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||||
@ -615,7 +615,6 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY
|
|||||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||||
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||||
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
|
||||||
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
|
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
|
||||||
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
@ -729,7 +728,7 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv
|
|||||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||||
github.com/onsi/ginkgo/v2 v2.5.0 h1:TRtrvv2vdQqzkwrQ1ke6vtXf7IK34RBUJafIy1wMwls=
|
github.com/onsi/ginkgo/v2 v2.5.1 h1:auzK7OI497k6x4OvWq+TKAcpcSAlod0doAH72oIN0Jw=
|
||||||
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
@ -1017,8 +1016,8 @@ golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPh
|
|||||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/crypto v0.2.0 h1:BRXPfhNivWL5Yq0BGQ39a2sW6t44aODpfxkWjYdzewE=
|
golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A=
|
||||||
golang.org/x/crypto v0.2.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
|
48
vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
generated
vendored
48
vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
generated
vendored
@ -49,6 +49,7 @@ type options struct {
|
|||||||
missedPrioritizedFiles *[]string
|
missedPrioritizedFiles *[]string
|
||||||
compression Compression
|
compression Compression
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
|
minChunkSize int
|
||||||
}
|
}
|
||||||
|
|
||||||
type Option func(o *options) error
|
type Option func(o *options) error
|
||||||
@ -63,6 +64,7 @@ func WithChunkSize(chunkSize int) Option {
|
|||||||
|
|
||||||
// WithCompressionLevel option specifies the gzip compression level.
|
// WithCompressionLevel option specifies the gzip compression level.
|
||||||
// The default is gzip.BestCompression.
|
// The default is gzip.BestCompression.
|
||||||
|
// This option will be ignored if WithCompression option is used.
|
||||||
// See also: https://godoc.org/compress/gzip#pkg-constants
|
// See also: https://godoc.org/compress/gzip#pkg-constants
|
||||||
func WithCompressionLevel(level int) Option {
|
func WithCompressionLevel(level int) Option {
|
||||||
return func(o *options) error {
|
return func(o *options) error {
|
||||||
@ -113,6 +115,18 @@ func WithContext(ctx context.Context) Option {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithMinChunkSize option specifies the minimal number of bytes of data
|
||||||
|
// must be written in one gzip stream.
|
||||||
|
// By increasing this number, one gzip stream can contain multiple files
|
||||||
|
// and it hopefully leads to smaller result blob.
|
||||||
|
// NOTE: This adds a TOC property that old reader doesn't understand.
|
||||||
|
func WithMinChunkSize(minChunkSize int) Option {
|
||||||
|
return func(o *options) error {
|
||||||
|
o.minChunkSize = minChunkSize
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Blob is an eStargz blob.
|
// Blob is an eStargz blob.
|
||||||
type Blob struct {
|
type Blob struct {
|
||||||
io.ReadCloser
|
io.ReadCloser
|
||||||
@ -180,7 +194,14 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
tarParts := divideEntries(entries, runtime.GOMAXPROCS(0))
|
var tarParts [][]*entry
|
||||||
|
if opts.minChunkSize > 0 {
|
||||||
|
// Each entry needs to know the size of the current gzip stream so they
|
||||||
|
// cannot be processed in parallel.
|
||||||
|
tarParts = [][]*entry{entries}
|
||||||
|
} else {
|
||||||
|
tarParts = divideEntries(entries, runtime.GOMAXPROCS(0))
|
||||||
|
}
|
||||||
writers := make([]*Writer, len(tarParts))
|
writers := make([]*Writer, len(tarParts))
|
||||||
payloads := make([]*os.File, len(tarParts))
|
payloads := make([]*os.File, len(tarParts))
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
@ -195,6 +216,13 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
|
|||||||
}
|
}
|
||||||
sw := NewWriterWithCompressor(esgzFile, opts.compression)
|
sw := NewWriterWithCompressor(esgzFile, opts.compression)
|
||||||
sw.ChunkSize = opts.chunkSize
|
sw.ChunkSize = opts.chunkSize
|
||||||
|
sw.MinChunkSize = opts.minChunkSize
|
||||||
|
if sw.needsOpenGzEntries == nil {
|
||||||
|
sw.needsOpenGzEntries = make(map[string]struct{})
|
||||||
|
}
|
||||||
|
for _, f := range []string{PrefetchLandmark, NoPrefetchLandmark} {
|
||||||
|
sw.needsOpenGzEntries[f] = struct{}{}
|
||||||
|
}
|
||||||
if err := sw.AppendTar(readerFromEntries(parts...)); err != nil {
|
if err := sw.AppendTar(readerFromEntries(parts...)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -209,7 +237,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
|
|||||||
rErr = err
|
rErr = err
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...)
|
tocAndFooter, tocDgst, err := closeWithCombine(writers...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rErr = err
|
rErr = err
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -252,7 +280,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
|
|||||||
// Writers doesn't write TOC and footer to the underlying writers so they can be
|
// Writers doesn't write TOC and footer to the underlying writers so they can be
|
||||||
// combined into a single eStargz and tocAndFooter returned by this function can
|
// combined into a single eStargz and tocAndFooter returned by this function can
|
||||||
// be appended at the tail of that combined blob.
|
// be appended at the tail of that combined blob.
|
||||||
func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) {
|
func closeWithCombine(ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) {
|
||||||
if len(ws) == 0 {
|
if len(ws) == 0 {
|
||||||
return nil, "", fmt.Errorf("at least one writer must be passed")
|
return nil, "", fmt.Errorf("at least one writer must be passed")
|
||||||
}
|
}
|
||||||
@ -395,7 +423,7 @@ func readerFromEntries(entries ...*entry) io.Reader {
|
|||||||
|
|
||||||
func importTar(in io.ReaderAt) (*tarFile, error) {
|
func importTar(in io.ReaderAt) (*tarFile, error) {
|
||||||
tf := &tarFile{}
|
tf := &tarFile{}
|
||||||
pw, err := newCountReader(in)
|
pw, err := newCountReadSeeker(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to make position watcher: %w", err)
|
return nil, fmt.Errorf("failed to make position watcher: %w", err)
|
||||||
}
|
}
|
||||||
@ -571,19 +599,19 @@ func (tf *tempFiles) cleanupAll() error {
|
|||||||
return errorutil.Aggregate(allErr)
|
return errorutil.Aggregate(allErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCountReader(r io.ReaderAt) (*countReader, error) {
|
func newCountReadSeeker(r io.ReaderAt) (*countReadSeeker, error) {
|
||||||
pos := int64(0)
|
pos := int64(0)
|
||||||
return &countReader{r: r, cPos: &pos}, nil
|
return &countReadSeeker{r: r, cPos: &pos}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type countReader struct {
|
type countReadSeeker struct {
|
||||||
r io.ReaderAt
|
r io.ReaderAt
|
||||||
cPos *int64
|
cPos *int64
|
||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *countReader) Read(p []byte) (int, error) {
|
func (cr *countReadSeeker) Read(p []byte) (int, error) {
|
||||||
cr.mu.Lock()
|
cr.mu.Lock()
|
||||||
defer cr.mu.Unlock()
|
defer cr.mu.Unlock()
|
||||||
|
|
||||||
@ -594,7 +622,7 @@ func (cr *countReader) Read(p []byte) (int, error) {
|
|||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *countReader) Seek(offset int64, whence int) (int64, error) {
|
func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||||
cr.mu.Lock()
|
cr.mu.Lock()
|
||||||
defer cr.mu.Unlock()
|
defer cr.mu.Unlock()
|
||||||
|
|
||||||
@ -615,7 +643,7 @@ func (cr *countReader) Seek(offset int64, whence int) (int64, error) {
|
|||||||
return offset, nil
|
return offset, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *countReader) currentPos() int64 {
|
func (cr *countReadSeeker) currentPos() int64 {
|
||||||
cr.mu.Lock()
|
cr.mu.Lock()
|
||||||
defer cr.mu.Unlock()
|
defer cr.mu.Unlock()
|
||||||
|
|
||||||
|
224
vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
generated
vendored
224
vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
generated
vendored
@ -150,10 +150,10 @@ func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) {
|
|||||||
allErr = append(allErr, err)
|
allErr = append(allErr, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if tocSize <= 0 {
|
if tocOffset >= 0 && tocSize <= 0 {
|
||||||
tocSize = sr.Size() - tocOffset - fSize
|
tocSize = sr.Size() - tocOffset - fSize
|
||||||
}
|
}
|
||||||
if tocSize < int64(len(maybeTocBytes)) {
|
if tocOffset >= 0 && tocSize < int64(len(maybeTocBytes)) {
|
||||||
maybeTocBytes = maybeTocBytes[:tocSize]
|
maybeTocBytes = maybeTocBytes[:tocSize]
|
||||||
}
|
}
|
||||||
r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts)
|
r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts)
|
||||||
@ -207,8 +207,16 @@ func (r *Reader) initFields() error {
|
|||||||
uname := map[int]string{}
|
uname := map[int]string{}
|
||||||
gname := map[int]string{}
|
gname := map[int]string{}
|
||||||
var lastRegEnt *TOCEntry
|
var lastRegEnt *TOCEntry
|
||||||
for _, ent := range r.toc.Entries {
|
var chunkTopIndex int
|
||||||
|
for i, ent := range r.toc.Entries {
|
||||||
ent.Name = cleanEntryName(ent.Name)
|
ent.Name = cleanEntryName(ent.Name)
|
||||||
|
switch ent.Type {
|
||||||
|
case "reg", "chunk":
|
||||||
|
if ent.Offset != r.toc.Entries[chunkTopIndex].Offset {
|
||||||
|
chunkTopIndex = i
|
||||||
|
}
|
||||||
|
ent.chunkTopIndex = chunkTopIndex
|
||||||
|
}
|
||||||
if ent.Type == "reg" {
|
if ent.Type == "reg" {
|
||||||
lastRegEnt = ent
|
lastRegEnt = ent
|
||||||
}
|
}
|
||||||
@ -294,7 +302,7 @@ func (r *Reader) initFields() error {
|
|||||||
if e.isDataType() {
|
if e.isDataType() {
|
||||||
e.nextOffset = lastOffset
|
e.nextOffset = lastOffset
|
||||||
}
|
}
|
||||||
if e.Offset != 0 {
|
if e.Offset != 0 && e.InnerOffset == 0 {
|
||||||
lastOffset = e.Offset
|
lastOffset = e.Offset
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -488,6 +496,14 @@ func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) {
|
|||||||
//
|
//
|
||||||
// Name must be absolute path or one that is relative to root.
|
// Name must be absolute path or one that is relative to root.
|
||||||
func (r *Reader) OpenFile(name string) (*io.SectionReader, error) {
|
func (r *Reader) OpenFile(name string) (*io.SectionReader, error) {
|
||||||
|
fr, err := r.newFileReader(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return io.NewSectionReader(fr, 0, fr.size), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) newFileReader(name string) (*fileReader, error) {
|
||||||
name = cleanEntryName(name)
|
name = cleanEntryName(name)
|
||||||
ent, ok := r.Lookup(name)
|
ent, ok := r.Lookup(name)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -505,11 +521,19 @@ func (r *Reader) OpenFile(name string) (*io.SectionReader, error) {
|
|||||||
Err: errors.New("not a regular file"),
|
Err: errors.New("not a regular file"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fr := &fileReader{
|
return &fileReader{
|
||||||
r: r,
|
r: r,
|
||||||
size: ent.Size,
|
size: ent.Size,
|
||||||
ents: r.getChunks(ent),
|
ents: r.getChunks(ent),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) OpenFileWithPreReader(name string, preRead func(*TOCEntry, io.Reader) error) (*io.SectionReader, error) {
|
||||||
|
fr, err := r.newFileReader(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
fr.preRead = preRead
|
||||||
return io.NewSectionReader(fr, 0, fr.size), nil
|
return io.NewSectionReader(fr, 0, fr.size), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -521,9 +545,10 @@ func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type fileReader struct {
|
type fileReader struct {
|
||||||
r *Reader
|
r *Reader
|
||||||
size int64
|
size int64
|
||||||
ents []*TOCEntry // 1 or more reg/chunk entries
|
ents []*TOCEntry // 1 or more reg/chunk entries
|
||||||
|
preRead func(*TOCEntry, io.Reader) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
|
func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
|
||||||
@ -578,10 +603,48 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
|
|||||||
return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err)
|
return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err)
|
||||||
}
|
}
|
||||||
defer dr.Close()
|
defer dr.Close()
|
||||||
if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil {
|
|
||||||
return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err)
|
if fr.preRead == nil {
|
||||||
|
if n, err := io.CopyN(io.Discard, dr, ent.InnerOffset+off); n != ent.InnerOffset+off || err != nil {
|
||||||
|
return 0, fmt.Errorf("discard of %d bytes != %v, %v", ent.InnerOffset+off, n, err)
|
||||||
|
}
|
||||||
|
return io.ReadFull(dr, p)
|
||||||
}
|
}
|
||||||
return io.ReadFull(dr, p)
|
|
||||||
|
var retN int
|
||||||
|
var retErr error
|
||||||
|
var found bool
|
||||||
|
var nr int64
|
||||||
|
for _, e := range fr.r.toc.Entries[ent.chunkTopIndex:] {
|
||||||
|
if !e.isDataType() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if e.Offset != fr.r.toc.Entries[ent.chunkTopIndex].Offset {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if in, err := io.CopyN(io.Discard, dr, e.InnerOffset-nr); err != nil || in != e.InnerOffset-nr {
|
||||||
|
return 0, fmt.Errorf("discard of remaining %d bytes != %v, %v", e.InnerOffset-nr, in, err)
|
||||||
|
}
|
||||||
|
nr = e.InnerOffset
|
||||||
|
if e == ent {
|
||||||
|
found = true
|
||||||
|
if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil {
|
||||||
|
return 0, fmt.Errorf("discard of offset %d bytes != %v, %v", off, n, err)
|
||||||
|
}
|
||||||
|
retN, retErr = io.ReadFull(dr, p)
|
||||||
|
nr += off + int64(retN)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cr := &countReader{r: io.LimitReader(dr, e.ChunkSize)}
|
||||||
|
if err := fr.preRead(e, cr); err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to pre read: %w", err)
|
||||||
|
}
|
||||||
|
nr += cr.n
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
return 0, fmt.Errorf("fileReader.ReadAt: target entry not found")
|
||||||
|
}
|
||||||
|
return retN, retErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// A Writer writes stargz files.
|
// A Writer writes stargz files.
|
||||||
@ -599,11 +662,20 @@ type Writer struct {
|
|||||||
lastGroupname map[int]string
|
lastGroupname map[int]string
|
||||||
compressor Compressor
|
compressor Compressor
|
||||||
|
|
||||||
|
uncompressedCounter *countWriteFlusher
|
||||||
|
|
||||||
// ChunkSize optionally controls the maximum number of bytes
|
// ChunkSize optionally controls the maximum number of bytes
|
||||||
// of data of a regular file that can be written in one gzip
|
// of data of a regular file that can be written in one gzip
|
||||||
// stream before a new gzip stream is started.
|
// stream before a new gzip stream is started.
|
||||||
// Zero means to use a default, currently 4 MiB.
|
// Zero means to use a default, currently 4 MiB.
|
||||||
ChunkSize int
|
ChunkSize int
|
||||||
|
|
||||||
|
// MinChunkSize optionally controls the minimum number of bytes
|
||||||
|
// of data must be written in one gzip stream before a new gzip
|
||||||
|
// NOTE: This adds a TOC property that stargz snapshotter < v0.13.0 doesn't understand.
|
||||||
|
MinChunkSize int
|
||||||
|
|
||||||
|
needsOpenGzEntries map[string]struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// currentCompressionWriter writes to the current w.gz field, which can
|
// currentCompressionWriter writes to the current w.gz field, which can
|
||||||
@ -646,6 +718,9 @@ func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to parse footer: %w", err)
|
return nil, fmt.Errorf("failed to parse footer: %w", err)
|
||||||
}
|
}
|
||||||
|
if blobPayloadSize < 0 {
|
||||||
|
blobPayloadSize = sr.Size()
|
||||||
|
}
|
||||||
return c.Reader(io.LimitReader(sr, blobPayloadSize))
|
return c.Reader(io.LimitReader(sr, blobPayloadSize))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -672,11 +747,12 @@ func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer {
|
|||||||
bw := bufio.NewWriter(w)
|
bw := bufio.NewWriter(w)
|
||||||
cw := &countWriter{w: bw}
|
cw := &countWriter{w: bw}
|
||||||
return &Writer{
|
return &Writer{
|
||||||
bw: bw,
|
bw: bw,
|
||||||
cw: cw,
|
cw: cw,
|
||||||
toc: &JTOC{Version: 1},
|
toc: &JTOC{Version: 1},
|
||||||
diffHash: sha256.New(),
|
diffHash: sha256.New(),
|
||||||
compressor: c,
|
compressor: c,
|
||||||
|
uncompressedCounter: &countWriteFlusher{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -717,6 +793,20 @@ func (w *Writer) closeGz() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *Writer) flushGz() error {
|
||||||
|
if w.closed {
|
||||||
|
return errors.New("flush on closed Writer")
|
||||||
|
}
|
||||||
|
if w.gz != nil {
|
||||||
|
if f, ok := w.gz.(interface {
|
||||||
|
Flush() error
|
||||||
|
}); ok {
|
||||||
|
return f.Flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// nameIfChanged returns name, unless it was the already the value of (*mp)[id],
|
// nameIfChanged returns name, unless it was the already the value of (*mp)[id],
|
||||||
// in which case it returns the empty string.
|
// in which case it returns the empty string.
|
||||||
func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string {
|
func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string {
|
||||||
@ -736,6 +826,9 @@ func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string {
|
|||||||
func (w *Writer) condOpenGz() (err error) {
|
func (w *Writer) condOpenGz() (err error) {
|
||||||
if w.gz == nil {
|
if w.gz == nil {
|
||||||
w.gz, err = w.compressor.Writer(w.cw)
|
w.gz, err = w.compressor.Writer(w.cw)
|
||||||
|
if w.gz != nil {
|
||||||
|
w.gz = w.uncompressedCounter.register(w.gz)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -784,6 +877,8 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
|
|||||||
if lossless {
|
if lossless {
|
||||||
tr.RawAccounting = true
|
tr.RawAccounting = true
|
||||||
}
|
}
|
||||||
|
prevOffset := w.cw.n
|
||||||
|
var prevOffsetUncompressed int64
|
||||||
for {
|
for {
|
||||||
h, err := tr.Next()
|
h, err := tr.Next()
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
@ -883,10 +978,6 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
|
|||||||
totalSize := ent.Size // save it before we destroy ent
|
totalSize := ent.Size // save it before we destroy ent
|
||||||
tee := io.TeeReader(tr, payloadDigest.Hash())
|
tee := io.TeeReader(tr, payloadDigest.Hash())
|
||||||
for written < totalSize {
|
for written < totalSize {
|
||||||
if err := w.closeGz(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
chunkSize := int64(w.chunkSize())
|
chunkSize := int64(w.chunkSize())
|
||||||
remain := totalSize - written
|
remain := totalSize - written
|
||||||
if remain < chunkSize {
|
if remain < chunkSize {
|
||||||
@ -894,7 +985,23 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
|
|||||||
} else {
|
} else {
|
||||||
ent.ChunkSize = chunkSize
|
ent.ChunkSize = chunkSize
|
||||||
}
|
}
|
||||||
ent.Offset = w.cw.n
|
|
||||||
|
// We flush the underlying compression writer here to correctly calculate "w.cw.n".
|
||||||
|
if err := w.flushGz(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if w.needsOpenGz(ent) || w.cw.n-prevOffset >= int64(w.MinChunkSize) {
|
||||||
|
if err := w.closeGz(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ent.Offset = w.cw.n
|
||||||
|
prevOffset = ent.Offset
|
||||||
|
prevOffsetUncompressed = w.uncompressedCounter.n
|
||||||
|
} else {
|
||||||
|
ent.Offset = prevOffset
|
||||||
|
ent.InnerOffset = w.uncompressedCounter.n - prevOffsetUncompressed
|
||||||
|
}
|
||||||
|
|
||||||
ent.ChunkOffset = written
|
ent.ChunkOffset = written
|
||||||
chunkDigest := digest.Canonical.Digester()
|
chunkDigest := digest.Canonical.Digester()
|
||||||
|
|
||||||
@ -940,6 +1047,17 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *Writer) needsOpenGz(ent *TOCEntry) bool {
|
||||||
|
if ent.Type != "reg" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if w.needsOpenGzEntries == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_, ok := w.needsOpenGzEntries[ent.Name]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
// DiffID returns the SHA-256 of the uncompressed tar bytes.
|
// DiffID returns the SHA-256 of the uncompressed tar bytes.
|
||||||
// It is only valid to call DiffID after Close.
|
// It is only valid to call DiffID after Close.
|
||||||
func (w *Writer) DiffID() string {
|
func (w *Writer) DiffID() string {
|
||||||
@ -956,6 +1074,28 @@ func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) {
|
func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) {
|
||||||
|
if tocOff < 0 {
|
||||||
|
// This means that TOC isn't contained in the blob.
|
||||||
|
// We pass nil reader to ParseTOC and expect that ParseTOC acquire TOC from
|
||||||
|
// the external location.
|
||||||
|
start := time.Now()
|
||||||
|
toc, tocDgst, err := d.ParseTOC(nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil {
|
||||||
|
opts.telemetry.GetTocLatency(start)
|
||||||
|
}
|
||||||
|
if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil {
|
||||||
|
opts.telemetry.DeserializeTocLatency(start)
|
||||||
|
}
|
||||||
|
return &Reader{
|
||||||
|
sr: sr,
|
||||||
|
toc: toc,
|
||||||
|
tocDigest: tocDgst,
|
||||||
|
decompressor: d,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
if len(tocBytes) > 0 {
|
if len(tocBytes) > 0 {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes))
|
toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes))
|
||||||
@ -1021,6 +1161,37 @@ func (cw *countWriter) Write(p []byte) (n int, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type countWriteFlusher struct {
|
||||||
|
io.WriteCloser
|
||||||
|
n int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wc *countWriteFlusher) register(w io.WriteCloser) io.WriteCloser {
|
||||||
|
wc.WriteCloser = w
|
||||||
|
return wc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wc *countWriteFlusher) Write(p []byte) (n int, err error) {
|
||||||
|
n, err = wc.WriteCloser.Write(p)
|
||||||
|
wc.n += int64(n)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wc *countWriteFlusher) Flush() error {
|
||||||
|
if f, ok := wc.WriteCloser.(interface {
|
||||||
|
Flush() error
|
||||||
|
}); ok {
|
||||||
|
return f.Flush()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wc *countWriteFlusher) Close() error {
|
||||||
|
err := wc.WriteCloser.Close()
|
||||||
|
wc.WriteCloser = nil
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// isGzip reports whether br is positioned right before an upcoming gzip stream.
|
// isGzip reports whether br is positioned right before an upcoming gzip stream.
|
||||||
// It does not consume any bytes from br.
|
// It does not consume any bytes from br.
|
||||||
func isGzip(br *bufio.Reader) bool {
|
func isGzip(br *bufio.Reader) bool {
|
||||||
@ -1039,3 +1210,14 @@ func positive(n int64) int64 {
|
|||||||
}
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type countReader struct {
|
||||||
|
r io.Reader
|
||||||
|
n int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cr *countReader) Read(p []byte) (n int, err error) {
|
||||||
|
n, err = cr.r.Read(p)
|
||||||
|
cr.n += int64(n)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
2
vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
generated
vendored
2
vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
generated
vendored
@ -60,7 +60,7 @@ type GzipCompressor struct {
|
|||||||
compressionLevel int
|
compressionLevel int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gc *GzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) {
|
func (gc *GzipCompressor) Writer(w io.Writer) (WriteFlushCloser, error) {
|
||||||
return gzip.NewWriterLevel(w, gc.compressionLevel)
|
return gzip.NewWriterLevel(w, gc.compressionLevel)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
627
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
627
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
@ -31,6 +31,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -44,21 +45,27 @@ import (
|
|||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
}
|
||||||
|
|
||||||
// TestingController is Compression with some helper methods necessary for testing.
|
// TestingController is Compression with some helper methods necessary for testing.
|
||||||
type TestingController interface {
|
type TestingController interface {
|
||||||
Compression
|
Compression
|
||||||
CountStreams(*testing.T, []byte) int
|
TestStreams(t *testing.T, b []byte, streams []int64)
|
||||||
DiffIDOf(*testing.T, []byte) string
|
DiffIDOf(*testing.T, []byte) string
|
||||||
String() string
|
String() string
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them.
|
// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them.
|
||||||
func CompressionTestSuite(t *testing.T, controllers ...TestingController) {
|
func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) {
|
||||||
t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) })
|
t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) })
|
||||||
t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) })
|
t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) })
|
||||||
t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) })
|
t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type TestingControllerFactory func() TestingController
|
||||||
|
|
||||||
const (
|
const (
|
||||||
uncompressedType int = iota
|
uncompressedType int = iota
|
||||||
gzipType
|
gzipType
|
||||||
@ -75,11 +82,12 @@ var allowedPrefix = [4]string{"", "./", "/", "../"}
|
|||||||
|
|
||||||
// testBuild tests the resulting stargz blob built by this pkg has the same
|
// testBuild tests the resulting stargz blob built by this pkg has the same
|
||||||
// contents as the normal stargz blob.
|
// contents as the normal stargz blob.
|
||||||
func testBuild(t *testing.T, controllers ...TestingController) {
|
func testBuild(t *testing.T, controllers ...TestingControllerFactory) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
chunkSize int
|
chunkSize int
|
||||||
in []tarEntry
|
minChunkSize []int
|
||||||
|
in []tarEntry
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "regfiles and directories",
|
name: "regfiles and directories",
|
||||||
@ -108,11 +116,14 @@ func testBuild(t *testing.T, controllers ...TestingController) {
|
|||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "various files",
|
name: "various files",
|
||||||
chunkSize: 4,
|
chunkSize: 4,
|
||||||
|
minChunkSize: []int{0, 64000},
|
||||||
in: tarOf(
|
in: tarOf(
|
||||||
file("baz.txt", "bazbazbazbazbazbazbaz"),
|
file("baz.txt", "bazbazbazbazbazbazbaz"),
|
||||||
file("foo.txt", "a"),
|
file("foo1.txt", "a"),
|
||||||
|
file("bar/foo2.txt", "b"),
|
||||||
|
file("foo3.txt", "c"),
|
||||||
symlink("barlink", "test/bar.txt"),
|
symlink("barlink", "test/bar.txt"),
|
||||||
dir("test/"),
|
dir("test/"),
|
||||||
dir("dev/"),
|
dir("dev/"),
|
||||||
@ -144,99 +155,112 @@ func testBuild(t *testing.T, controllers ...TestingController) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
if len(tt.minChunkSize) == 0 {
|
||||||
|
tt.minChunkSize = []int{0}
|
||||||
|
}
|
||||||
for _, srcCompression := range srcCompressions {
|
for _, srcCompression := range srcCompressions {
|
||||||
srcCompression := srcCompression
|
srcCompression := srcCompression
|
||||||
for _, cl := range controllers {
|
for _, newCL := range controllers {
|
||||||
cl := cl
|
newCL := newCL
|
||||||
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
|
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
|
||||||
srcTarFormat := srcTarFormat
|
srcTarFormat := srcTarFormat
|
||||||
for _, prefix := range allowedPrefix {
|
for _, prefix := range allowedPrefix {
|
||||||
prefix := prefix
|
prefix := prefix
|
||||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s", cl, prefix, srcCompression, srcTarFormat), func(t *testing.T) {
|
for _, minChunkSize := range tt.minChunkSize {
|
||||||
tarBlob := buildTar(t, tt.in, prefix, srcTarFormat)
|
minChunkSize := minChunkSize
|
||||||
// Test divideEntries()
|
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) {
|
||||||
entries, err := sortEntries(tarBlob, nil, nil) // identical order
|
tarBlob := buildTar(t, tt.in, prefix, srcTarFormat)
|
||||||
if err != nil {
|
// Test divideEntries()
|
||||||
t.Fatalf("failed to parse tar: %v", err)
|
entries, err := sortEntries(tarBlob, nil, nil) // identical order
|
||||||
}
|
if err != nil {
|
||||||
var merged []*entry
|
t.Fatalf("failed to parse tar: %v", err)
|
||||||
for _, part := range divideEntries(entries, 4) {
|
|
||||||
merged = append(merged, part...)
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(entries, merged) {
|
|
||||||
for _, e := range entries {
|
|
||||||
t.Logf("Original: %v", e.header)
|
|
||||||
}
|
}
|
||||||
for _, e := range merged {
|
var merged []*entry
|
||||||
t.Logf("Merged: %v", e.header)
|
for _, part := range divideEntries(entries, 4) {
|
||||||
|
merged = append(merged, part...)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(entries, merged) {
|
||||||
|
for _, e := range entries {
|
||||||
|
t.Logf("Original: %v", e.header)
|
||||||
|
}
|
||||||
|
for _, e := range merged {
|
||||||
|
t.Logf("Merged: %v", e.header)
|
||||||
|
}
|
||||||
|
t.Errorf("divided entries couldn't be merged")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
t.Errorf("divided entries couldn't be merged")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare sample data
|
// Prepare sample data
|
||||||
wantBuf := new(bytes.Buffer)
|
cl1 := newCL()
|
||||||
sw := NewWriterWithCompressor(wantBuf, cl)
|
wantBuf := new(bytes.Buffer)
|
||||||
sw.ChunkSize = tt.chunkSize
|
sw := NewWriterWithCompressor(wantBuf, cl1)
|
||||||
if err := sw.AppendTar(tarBlob); err != nil {
|
sw.MinChunkSize = minChunkSize
|
||||||
t.Fatalf("failed to append tar to want stargz: %v", err)
|
sw.ChunkSize = tt.chunkSize
|
||||||
}
|
if err := sw.AppendTar(tarBlob); err != nil {
|
||||||
if _, err := sw.Close(); err != nil {
|
t.Fatalf("failed to append tar to want stargz: %v", err)
|
||||||
t.Fatalf("failed to prepare want stargz: %v", err)
|
}
|
||||||
}
|
if _, err := sw.Close(); err != nil {
|
||||||
wantData := wantBuf.Bytes()
|
t.Fatalf("failed to prepare want stargz: %v", err)
|
||||||
want, err := Open(io.NewSectionReader(
|
}
|
||||||
bytes.NewReader(wantData), 0, int64(len(wantData))),
|
wantData := wantBuf.Bytes()
|
||||||
WithDecompressors(cl),
|
want, err := Open(io.NewSectionReader(
|
||||||
)
|
bytes.NewReader(wantData), 0, int64(len(wantData))),
|
||||||
if err != nil {
|
WithDecompressors(cl1),
|
||||||
t.Fatalf("failed to parse the want stargz: %v", err)
|
)
|
||||||
}
|
if err != nil {
|
||||||
|
t.Fatalf("failed to parse the want stargz: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Prepare testing data
|
// Prepare testing data
|
||||||
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
var opts []Option
|
||||||
WithChunkSize(tt.chunkSize), WithCompression(cl))
|
if minChunkSize > 0 {
|
||||||
if err != nil {
|
opts = append(opts, WithMinChunkSize(minChunkSize))
|
||||||
t.Fatalf("failed to build stargz: %v", err)
|
}
|
||||||
}
|
cl2 := newCL()
|
||||||
defer rc.Close()
|
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
||||||
gotBuf := new(bytes.Buffer)
|
append(opts, WithChunkSize(tt.chunkSize), WithCompression(cl2))...)
|
||||||
if _, err := io.Copy(gotBuf, rc); err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to copy built stargz blob: %v", err)
|
t.Fatalf("failed to build stargz: %v", err)
|
||||||
}
|
}
|
||||||
gotData := gotBuf.Bytes()
|
defer rc.Close()
|
||||||
got, err := Open(io.NewSectionReader(
|
gotBuf := new(bytes.Buffer)
|
||||||
bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
|
if _, err := io.Copy(gotBuf, rc); err != nil {
|
||||||
WithDecompressors(cl),
|
t.Fatalf("failed to copy built stargz blob: %v", err)
|
||||||
)
|
}
|
||||||
if err != nil {
|
gotData := gotBuf.Bytes()
|
||||||
t.Fatalf("failed to parse the got stargz: %v", err)
|
got, err := Open(io.NewSectionReader(
|
||||||
}
|
bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
|
||||||
|
WithDecompressors(cl2),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to parse the got stargz: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Check DiffID is properly calculated
|
// Check DiffID is properly calculated
|
||||||
rc.Close()
|
rc.Close()
|
||||||
diffID := rc.DiffID()
|
diffID := rc.DiffID()
|
||||||
wantDiffID := cl.DiffIDOf(t, gotData)
|
wantDiffID := cl2.DiffIDOf(t, gotData)
|
||||||
if diffID.String() != wantDiffID {
|
if diffID.String() != wantDiffID {
|
||||||
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compare as stargz
|
// Compare as stargz
|
||||||
if !isSameVersion(t, cl, wantData, gotData) {
|
if !isSameVersion(t, cl1, wantData, cl2, gotData) {
|
||||||
t.Errorf("built stargz hasn't same json")
|
t.Errorf("built stargz hasn't same json")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !isSameEntries(t, want, got) {
|
if !isSameEntries(t, want, got) {
|
||||||
t.Errorf("built stargz isn't same as the original")
|
t.Errorf("built stargz isn't same as the original")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compare as tar.gz
|
// Compare as tar.gz
|
||||||
if !isSameTarGz(t, cl, wantData, gotData) {
|
if !isSameTarGz(t, cl1, wantData, cl2, gotData) {
|
||||||
t.Errorf("built stargz isn't same tar.gz")
|
t.Errorf("built stargz isn't same tar.gz")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -244,13 +268,13 @@ func testBuild(t *testing.T, controllers ...TestingController) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool {
|
func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
|
||||||
aGz, err := controller.Reader(bytes.NewReader(a))
|
aGz, err := cla.Reader(bytes.NewReader(a))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to read A")
|
t.Fatalf("failed to read A")
|
||||||
}
|
}
|
||||||
defer aGz.Close()
|
defer aGz.Close()
|
||||||
bGz, err := controller.Reader(bytes.NewReader(b))
|
bGz, err := clb.Reader(bytes.NewReader(b))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to read B")
|
t.Fatalf("failed to read B")
|
||||||
}
|
}
|
||||||
@ -304,12 +328,12 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func isSameVersion(t *testing.T, controller TestingController, a, b []byte) bool {
|
func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
|
||||||
aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), controller)
|
aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to parse A: %v", err)
|
t.Fatalf("failed to parse A: %v", err)
|
||||||
}
|
}
|
||||||
bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), controller)
|
bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), clb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to parse B: %v", err)
|
t.Fatalf("failed to parse B: %v", err)
|
||||||
}
|
}
|
||||||
@ -463,7 +487,7 @@ func equalEntry(a, b *TOCEntry) bool {
|
|||||||
a.GID == b.GID &&
|
a.GID == b.GID &&
|
||||||
a.Uname == b.Uname &&
|
a.Uname == b.Uname &&
|
||||||
a.Gname == b.Gname &&
|
a.Gname == b.Gname &&
|
||||||
(a.Offset > 0) == (b.Offset > 0) &&
|
(a.Offset >= 0) == (b.Offset >= 0) &&
|
||||||
(a.NextOffset() > 0) == (b.NextOffset() > 0) &&
|
(a.NextOffset() > 0) == (b.NextOffset() > 0) &&
|
||||||
a.DevMajor == b.DevMajor &&
|
a.DevMajor == b.DevMajor &&
|
||||||
a.DevMinor == b.DevMinor &&
|
a.DevMinor == b.DevMinor &&
|
||||||
@ -510,14 +534,15 @@ func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string {
|
|||||||
const chunkSize = 3
|
const chunkSize = 3
|
||||||
|
|
||||||
// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int)
|
// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int)
|
||||||
type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController)
|
type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory)
|
||||||
|
|
||||||
// testDigestAndVerify runs specified checks against sample stargz blobs.
|
// testDigestAndVerify runs specified checks against sample stargz blobs.
|
||||||
func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry)
|
tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry)
|
||||||
checks []check
|
checks []check
|
||||||
|
minChunkSize []int
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "no-regfile",
|
name: "no-regfile",
|
||||||
@ -544,6 +569,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
|||||||
regDigest(t, "test/bar.txt", "bbb", dgstMap),
|
regDigest(t, "test/bar.txt", "bbb", dgstMap),
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
|
minChunkSize: []int{0, 64000},
|
||||||
checks: []check{
|
checks: []check{
|
||||||
checkStargzTOC,
|
checkStargzTOC,
|
||||||
checkVerifyTOC,
|
checkVerifyTOC,
|
||||||
@ -581,11 +607,14 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "with-non-regfiles",
|
name: "with-non-regfiles",
|
||||||
|
minChunkSize: []int{0, 64000},
|
||||||
tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
|
tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
|
||||||
return tarOf(
|
return tarOf(
|
||||||
regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap),
|
regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap),
|
||||||
regDigest(t, "foo.txt", "a", dgstMap),
|
regDigest(t, "foo.txt", "a", dgstMap),
|
||||||
|
regDigest(t, "bar/foo2.txt", "b", dgstMap),
|
||||||
|
regDigest(t, "foo3.txt", "c", dgstMap),
|
||||||
symlink("barlink", "test/bar.txt"),
|
symlink("barlink", "test/bar.txt"),
|
||||||
dir("test/"),
|
dir("test/"),
|
||||||
regDigest(t, "test/bar.txt", "testbartestbar", dgstMap),
|
regDigest(t, "test/bar.txt", "testbartestbar", dgstMap),
|
||||||
@ -599,6 +628,8 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
|||||||
checkVerifyInvalidStargzFail(buildTar(t, tarOf(
|
checkVerifyInvalidStargzFail(buildTar(t, tarOf(
|
||||||
file("baz.txt", "bazbazbazbazbazbazbaz"),
|
file("baz.txt", "bazbazbazbazbazbazbaz"),
|
||||||
file("foo.txt", "a"),
|
file("foo.txt", "a"),
|
||||||
|
file("bar/foo2.txt", "b"),
|
||||||
|
file("foo3.txt", "c"),
|
||||||
symlink("barlink", "test/bar.txt"),
|
symlink("barlink", "test/bar.txt"),
|
||||||
dir("test/"),
|
dir("test/"),
|
||||||
file("test/bar.txt", "testbartestbar"),
|
file("test/bar.txt", "testbartestbar"),
|
||||||
@ -612,38 +643,45 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
if len(tt.minChunkSize) == 0 {
|
||||||
|
tt.minChunkSize = []int{0}
|
||||||
|
}
|
||||||
for _, srcCompression := range srcCompressions {
|
for _, srcCompression := range srcCompressions {
|
||||||
srcCompression := srcCompression
|
srcCompression := srcCompression
|
||||||
for _, cl := range controllers {
|
for _, newCL := range controllers {
|
||||||
cl := cl
|
newCL := newCL
|
||||||
for _, prefix := range allowedPrefix {
|
for _, prefix := range allowedPrefix {
|
||||||
prefix := prefix
|
prefix := prefix
|
||||||
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
|
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
|
||||||
srcTarFormat := srcTarFormat
|
srcTarFormat := srcTarFormat
|
||||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s", cl, prefix, srcTarFormat), func(t *testing.T) {
|
for _, minChunkSize := range tt.minChunkSize {
|
||||||
// Get original tar file and chunk digests
|
minChunkSize := minChunkSize
|
||||||
dgstMap := make(map[string]digest.Digest)
|
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) {
|
||||||
tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat)
|
// Get original tar file and chunk digests
|
||||||
|
dgstMap := make(map[string]digest.Digest)
|
||||||
|
tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat)
|
||||||
|
|
||||||
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
cl := newCL()
|
||||||
WithChunkSize(chunkSize), WithCompression(cl))
|
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
||||||
if err != nil {
|
WithChunkSize(chunkSize), WithCompression(cl))
|
||||||
t.Fatalf("failed to convert stargz: %v", err)
|
if err != nil {
|
||||||
}
|
t.Fatalf("failed to convert stargz: %v", err)
|
||||||
tocDigest := rc.TOCDigest()
|
}
|
||||||
defer rc.Close()
|
tocDigest := rc.TOCDigest()
|
||||||
buf := new(bytes.Buffer)
|
defer rc.Close()
|
||||||
if _, err := io.Copy(buf, rc); err != nil {
|
buf := new(bytes.Buffer)
|
||||||
t.Fatalf("failed to copy built stargz blob: %v", err)
|
if _, err := io.Copy(buf, rc); err != nil {
|
||||||
}
|
t.Fatalf("failed to copy built stargz blob: %v", err)
|
||||||
newStargz := buf.Bytes()
|
}
|
||||||
// NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
|
newStargz := buf.Bytes()
|
||||||
dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
|
// NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
|
||||||
|
dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
|
||||||
|
|
||||||
for _, check := range tt.checks {
|
for _, check := range tt.checks {
|
||||||
check(t, newStargz, tocDigest, dgstMap, cl)
|
check(t, newStargz, tocDigest, dgstMap, cl, newCL)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -654,7 +692,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
|||||||
// checkStargzTOC checks the TOC JSON of the passed stargz has the expected
|
// checkStargzTOC checks the TOC JSON of the passed stargz has the expected
|
||||||
// digest and contains valid chunks. It walks all entries in the stargz and
|
// digest and contains valid chunks. It walks all entries in the stargz and
|
||||||
// checks all chunk digests stored to the TOC JSON match the actual contents.
|
// checks all chunk digests stored to the TOC JSON match the actual contents.
|
||||||
func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) {
|
func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
|
||||||
sgz, err := Open(
|
sgz, err := Open(
|
||||||
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
|
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
|
||||||
WithDecompressors(controller),
|
WithDecompressors(controller),
|
||||||
@ -765,7 +803,7 @@ func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM
|
|||||||
// checkVerifyTOC checks the verification works for the TOC JSON of the passed
|
// checkVerifyTOC checks the verification works for the TOC JSON of the passed
|
||||||
// stargz. It walks all entries in the stargz and checks the verifications for
|
// stargz. It walks all entries in the stargz and checks the verifications for
|
||||||
// all chunks work.
|
// all chunks work.
|
||||||
func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) {
|
func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
|
||||||
sgz, err := Open(
|
sgz, err := Open(
|
||||||
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
|
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
|
||||||
WithDecompressors(controller),
|
WithDecompressors(controller),
|
||||||
@ -846,7 +884,7 @@ func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM
|
|||||||
// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be
|
// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be
|
||||||
// detected during the verification and the verification returns an error.
|
// detected during the verification and the verification returns an error.
|
||||||
func checkVerifyInvalidTOCEntryFail(filename string) check {
|
func checkVerifyInvalidTOCEntryFail(filename string) check {
|
||||||
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) {
|
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
|
||||||
funcs := map[string]rewriteFunc{
|
funcs := map[string]rewriteFunc{
|
||||||
"lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) {
|
"lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) {
|
||||||
var found bool
|
var found bool
|
||||||
@ -920,8 +958,9 @@ func checkVerifyInvalidTOCEntryFail(filename string) check {
|
|||||||
// checkVerifyInvalidStargzFail checks if the verification detects that the
|
// checkVerifyInvalidStargzFail checks if the verification detects that the
|
||||||
// given stargz file doesn't match to the expected digest and returns error.
|
// given stargz file doesn't match to the expected digest and returns error.
|
||||||
func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
|
func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
|
||||||
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) {
|
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
|
||||||
rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(controller))
|
cl := newController()
|
||||||
|
rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to convert stargz: %v", err)
|
t.Fatalf("failed to convert stargz: %v", err)
|
||||||
}
|
}
|
||||||
@ -934,7 +973,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
|
|||||||
|
|
||||||
sgz, err := Open(
|
sgz, err := Open(
|
||||||
io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))),
|
io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))),
|
||||||
WithDecompressors(controller),
|
WithDecompressors(cl),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to parse converted stargz: %v", err)
|
t.Fatalf("failed to parse converted stargz: %v", err)
|
||||||
@ -951,7 +990,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
|
|||||||
// checkVerifyBrokenContentFail checks if the verifier detects broken contents
|
// checkVerifyBrokenContentFail checks if the verifier detects broken contents
|
||||||
// that doesn't match to the expected digest and returns error.
|
// that doesn't match to the expected digest and returns error.
|
||||||
func checkVerifyBrokenContentFail(filename string) check {
|
func checkVerifyBrokenContentFail(filename string) check {
|
||||||
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) {
|
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
|
||||||
// Parse stargz file
|
// Parse stargz file
|
||||||
sgz, err := Open(
|
sgz, err := Open(
|
||||||
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
|
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
|
||||||
@ -1070,7 +1109,10 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Decode the TOC JSON
|
// Decode the TOC JSON
|
||||||
tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize)
|
var tocReader io.Reader
|
||||||
|
if tocOffset >= 0 {
|
||||||
|
tocReader = io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize)
|
||||||
|
}
|
||||||
decodedJTOC, _, err = controller.ParseTOC(tocReader)
|
decodedJTOC, _, err = controller.ParseTOC(tocReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, fmt.Errorf("failed to parse TOC: %w", err)
|
return nil, 0, fmt.Errorf("failed to parse TOC: %w", err)
|
||||||
@ -1078,28 +1120,31 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT
|
|||||||
return decodedJTOC, tocOffset, nil
|
return decodedJTOC, tocOffset, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) {
|
||||||
const content = "Some contents"
|
const content = "Some contents"
|
||||||
invalidUtf8 := "\xff\xfe\xfd"
|
invalidUtf8 := "\xff\xfe\xfd"
|
||||||
|
|
||||||
xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8}
|
xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8}
|
||||||
sampleOwner := owner{uid: 50, gid: 100}
|
sampleOwner := owner{uid: 50, gid: 100}
|
||||||
|
|
||||||
|
data64KB := randomContents(64000)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
chunkSize int
|
chunkSize int
|
||||||
in []tarEntry
|
minChunkSize int
|
||||||
want []stargzCheck
|
in []tarEntry
|
||||||
wantNumGz int // expected number of streams
|
want []stargzCheck
|
||||||
|
wantNumGz int // expected number of streams
|
||||||
|
|
||||||
wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz
|
wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz
|
||||||
wantFailOnLossLess bool
|
wantFailOnLossLess bool
|
||||||
|
wantTOCVersion int // default = 1
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "empty",
|
name: "empty",
|
||||||
in: tarOf(),
|
in: tarOf(),
|
||||||
wantNumGz: 2, // empty tar + TOC + footer
|
wantNumGz: 2, // (empty tar) + TOC + footer
|
||||||
wantNumGzLossLess: 3, // empty tar + TOC + footer
|
|
||||||
want: checks(
|
want: checks(
|
||||||
numTOCEntries(0),
|
numTOCEntries(0),
|
||||||
),
|
),
|
||||||
@ -1195,7 +1240,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
|||||||
dir("foo/"),
|
dir("foo/"),
|
||||||
file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"),
|
file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"),
|
||||||
),
|
),
|
||||||
wantNumGz: 9,
|
wantNumGz: 9, // dir + big.txt(6 chunks) + TOC + footer
|
||||||
want: checks(
|
want: checks(
|
||||||
numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file
|
numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file
|
||||||
hasDir("foo/"),
|
hasDir("foo/"),
|
||||||
@ -1326,23 +1371,108 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
|||||||
mustSameEntry("foo/foo1", "foolink"),
|
mustSameEntry("foo/foo1", "foolink"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "several_files_in_chunk",
|
||||||
|
minChunkSize: 8000,
|
||||||
|
in: tarOf(
|
||||||
|
dir("foo/"),
|
||||||
|
file("foo/foo1", data64KB),
|
||||||
|
file("foo2", "bb"),
|
||||||
|
file("foo22", "ccc"),
|
||||||
|
dir("bar/"),
|
||||||
|
file("bar/bar.txt", "aaa"),
|
||||||
|
file("foo3", data64KB),
|
||||||
|
),
|
||||||
|
// NOTE: we assume that the compressed "data64KB" is still larger than 8KB
|
||||||
|
wantNumGz: 4, // dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer
|
||||||
|
want: checks(
|
||||||
|
numTOCEntries(7), // dir, foo1, foo2, foo22, dir, bar.txt, foo3
|
||||||
|
hasDir("foo/"),
|
||||||
|
hasDir("bar/"),
|
||||||
|
hasFileLen("foo/foo1", len(data64KB)),
|
||||||
|
hasFileLen("foo2", len("bb")),
|
||||||
|
hasFileLen("foo22", len("ccc")),
|
||||||
|
hasFileLen("bar/bar.txt", len("aaa")),
|
||||||
|
hasFileLen("foo3", len(data64KB)),
|
||||||
|
hasFileDigest("foo/foo1", digestFor(data64KB)),
|
||||||
|
hasFileDigest("foo2", digestFor("bb")),
|
||||||
|
hasFileDigest("foo22", digestFor("ccc")),
|
||||||
|
hasFileDigest("bar/bar.txt", digestFor("aaa")),
|
||||||
|
hasFileDigest("foo3", digestFor(data64KB)),
|
||||||
|
hasFileContentsWithPreRead("foo22", 0, "ccc", chunkInfo{"foo2", "bb"}, chunkInfo{"bar/bar.txt", "aaa"}, chunkInfo{"foo3", data64KB}),
|
||||||
|
hasFileContentsRange("foo/foo1", 0, data64KB),
|
||||||
|
hasFileContentsRange("foo2", 0, "bb"),
|
||||||
|
hasFileContentsRange("foo2", 1, "b"),
|
||||||
|
hasFileContentsRange("foo22", 0, "ccc"),
|
||||||
|
hasFileContentsRange("foo22", 1, "cc"),
|
||||||
|
hasFileContentsRange("foo22", 2, "c"),
|
||||||
|
hasFileContentsRange("bar/bar.txt", 0, "aaa"),
|
||||||
|
hasFileContentsRange("bar/bar.txt", 1, "aa"),
|
||||||
|
hasFileContentsRange("bar/bar.txt", 2, "a"),
|
||||||
|
hasFileContentsRange("foo3", 0, data64KB),
|
||||||
|
hasFileContentsRange("foo3", 1, data64KB[1:]),
|
||||||
|
hasFileContentsRange("foo3", 2, data64KB[2:]),
|
||||||
|
hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]),
|
||||||
|
hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "several_files_in_chunk_chunked",
|
||||||
|
minChunkSize: 8000,
|
||||||
|
chunkSize: 32000,
|
||||||
|
in: tarOf(
|
||||||
|
dir("foo/"),
|
||||||
|
file("foo/foo1", data64KB),
|
||||||
|
file("foo2", "bb"),
|
||||||
|
dir("bar/"),
|
||||||
|
file("foo3", data64KB),
|
||||||
|
),
|
||||||
|
// NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB
|
||||||
|
wantNumGz: 6, // dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer
|
||||||
|
want: checks(
|
||||||
|
numTOCEntries(7), // dir, foo1(2 chunks), foo2, dir, foo3(2 chunks)
|
||||||
|
hasDir("foo/"),
|
||||||
|
hasDir("bar/"),
|
||||||
|
hasFileLen("foo/foo1", len(data64KB)),
|
||||||
|
hasFileLen("foo2", len("bb")),
|
||||||
|
hasFileLen("foo3", len(data64KB)),
|
||||||
|
hasFileDigest("foo/foo1", digestFor(data64KB)),
|
||||||
|
hasFileDigest("foo2", digestFor("bb")),
|
||||||
|
hasFileDigest("foo3", digestFor(data64KB)),
|
||||||
|
hasFileContentsWithPreRead("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000]}),
|
||||||
|
hasFileContentsRange("foo/foo1", 0, data64KB),
|
||||||
|
hasFileContentsRange("foo/foo1", 1, data64KB[1:]),
|
||||||
|
hasFileContentsRange("foo/foo1", 2, data64KB[2:]),
|
||||||
|
hasFileContentsRange("foo/foo1", len(data64KB)/2, data64KB[len(data64KB)/2:]),
|
||||||
|
hasFileContentsRange("foo/foo1", len(data64KB)-1, data64KB[len(data64KB)-1:]),
|
||||||
|
hasFileContentsRange("foo2", 0, "bb"),
|
||||||
|
hasFileContentsRange("foo2", 1, "b"),
|
||||||
|
hasFileContentsRange("foo3", 0, data64KB),
|
||||||
|
hasFileContentsRange("foo3", 1, data64KB[1:]),
|
||||||
|
hasFileContentsRange("foo3", 2, data64KB[2:]),
|
||||||
|
hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]),
|
||||||
|
hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]),
|
||||||
|
),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
for _, cl := range controllers {
|
for _, newCL := range controllers {
|
||||||
cl := cl
|
newCL := newCL
|
||||||
for _, prefix := range allowedPrefix {
|
for _, prefix := range allowedPrefix {
|
||||||
prefix := prefix
|
prefix := prefix
|
||||||
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
|
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
|
||||||
srcTarFormat := srcTarFormat
|
srcTarFormat := srcTarFormat
|
||||||
for _, lossless := range []bool{true, false} {
|
for _, lossless := range []bool{true, false} {
|
||||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", cl, prefix, lossless, srcTarFormat), func(t *testing.T) {
|
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) {
|
||||||
var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat)
|
var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat)
|
||||||
origTarDgstr := digest.Canonical.Digester()
|
origTarDgstr := digest.Canonical.Digester()
|
||||||
tr = io.TeeReader(tr, origTarDgstr.Hash())
|
tr = io.TeeReader(tr, origTarDgstr.Hash())
|
||||||
var stargzBuf bytes.Buffer
|
var stargzBuf bytes.Buffer
|
||||||
w := NewWriterWithCompressor(&stargzBuf, cl)
|
cl1 := newCL()
|
||||||
|
w := NewWriterWithCompressor(&stargzBuf, cl1)
|
||||||
w.ChunkSize = tt.chunkSize
|
w.ChunkSize = tt.chunkSize
|
||||||
|
w.MinChunkSize = tt.minChunkSize
|
||||||
if lossless {
|
if lossless {
|
||||||
err := w.AppendTarLossLess(tr)
|
err := w.AppendTarLossLess(tr)
|
||||||
if tt.wantFailOnLossLess {
|
if tt.wantFailOnLossLess {
|
||||||
@ -1366,7 +1496,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
|||||||
|
|
||||||
if lossless {
|
if lossless {
|
||||||
// Check if the result blob reserves original tar metadata
|
// Check if the result blob reserves original tar metadata
|
||||||
rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl)
|
rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("failed to decompress blob: %v", err)
|
t.Errorf("failed to decompress blob: %v", err)
|
||||||
return
|
return
|
||||||
@ -1385,32 +1515,71 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
diffID := w.DiffID()
|
diffID := w.DiffID()
|
||||||
wantDiffID := cl.DiffIDOf(t, b)
|
wantDiffID := cl1.DiffIDOf(t, b)
|
||||||
if diffID != wantDiffID {
|
if diffID != wantDiffID {
|
||||||
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
||||||
}
|
}
|
||||||
|
|
||||||
got := cl.CountStreams(t, b)
|
|
||||||
wantNumGz := tt.wantNumGz
|
|
||||||
if lossless && tt.wantNumGzLossLess > 0 {
|
|
||||||
wantNumGz = tt.wantNumGzLossLess
|
|
||||||
}
|
|
||||||
if got != wantNumGz {
|
|
||||||
t.Errorf("number of streams = %d; want %d", got, wantNumGz)
|
|
||||||
}
|
|
||||||
|
|
||||||
telemetry, checkCalled := newCalledTelemetry()
|
telemetry, checkCalled := newCalledTelemetry()
|
||||||
|
sr := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b)))
|
||||||
r, err := Open(
|
r, err := Open(
|
||||||
io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))),
|
sr,
|
||||||
WithDecompressors(cl),
|
WithDecompressors(cl1),
|
||||||
WithTelemetry(telemetry),
|
WithTelemetry(telemetry),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("stargz.Open: %v", err)
|
t.Fatalf("stargz.Open: %v", err)
|
||||||
}
|
}
|
||||||
if err := checkCalled(); err != nil {
|
wantTOCVersion := 1
|
||||||
|
if tt.wantTOCVersion > 0 {
|
||||||
|
wantTOCVersion = tt.wantTOCVersion
|
||||||
|
}
|
||||||
|
if r.toc.Version != wantTOCVersion {
|
||||||
|
t.Fatalf("invalid TOC Version %d; wanted %d", r.toc.Version, wantTOCVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
footerSize := cl1.FooterSize()
|
||||||
|
footerOffset := sr.Size() - footerSize
|
||||||
|
footer := make([]byte, footerSize)
|
||||||
|
if _, err := sr.ReadAt(footer, footerOffset); err != nil {
|
||||||
|
t.Errorf("failed to read footer: %v", err)
|
||||||
|
}
|
||||||
|
_, tocOffset, _, err := cl1.ParseFooter(footer)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to parse footer: %v", err)
|
||||||
|
}
|
||||||
|
if err := checkCalled(tocOffset >= 0); err != nil {
|
||||||
t.Errorf("telemetry failure: %v", err)
|
t.Errorf("telemetry failure: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
wantNumGz := tt.wantNumGz
|
||||||
|
if lossless && tt.wantNumGzLossLess > 0 {
|
||||||
|
wantNumGz = tt.wantNumGzLossLess
|
||||||
|
}
|
||||||
|
streamOffsets := []int64{0}
|
||||||
|
prevOffset := int64(-1)
|
||||||
|
streams := 0
|
||||||
|
for _, e := range r.toc.Entries {
|
||||||
|
if e.Offset > prevOffset {
|
||||||
|
streamOffsets = append(streamOffsets, e.Offset)
|
||||||
|
prevOffset = e.Offset
|
||||||
|
streams++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
streams++ // TOC
|
||||||
|
if tocOffset >= 0 {
|
||||||
|
// toc is in the blob
|
||||||
|
streamOffsets = append(streamOffsets, tocOffset)
|
||||||
|
}
|
||||||
|
streams++ // footer
|
||||||
|
streamOffsets = append(streamOffsets, footerOffset)
|
||||||
|
if streams != wantNumGz {
|
||||||
|
t.Errorf("number of streams in TOC = %d; want %d", streams, wantNumGz)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("testing streams: %+v", streamOffsets)
|
||||||
|
cl1.TestStreams(t, b, streamOffsets)
|
||||||
|
|
||||||
for _, want := range tt.want {
|
for _, want := range tt.want {
|
||||||
want.check(t, r)
|
want.check(t, r)
|
||||||
}
|
}
|
||||||
@ -1422,7 +1591,12 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCalledTelemetry() (telemetry *Telemetry, check func() error) {
|
type chunkInfo struct {
|
||||||
|
name string
|
||||||
|
data string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCalledTelemetry() (telemetry *Telemetry, check func(needsGetTOC bool) error) {
|
||||||
var getFooterLatencyCalled bool
|
var getFooterLatencyCalled bool
|
||||||
var getTocLatencyCalled bool
|
var getTocLatencyCalled bool
|
||||||
var deserializeTocLatencyCalled bool
|
var deserializeTocLatencyCalled bool
|
||||||
@ -1430,13 +1604,15 @@ func newCalledTelemetry() (telemetry *Telemetry, check func() error) {
|
|||||||
func(time.Time) { getFooterLatencyCalled = true },
|
func(time.Time) { getFooterLatencyCalled = true },
|
||||||
func(time.Time) { getTocLatencyCalled = true },
|
func(time.Time) { getTocLatencyCalled = true },
|
||||||
func(time.Time) { deserializeTocLatencyCalled = true },
|
func(time.Time) { deserializeTocLatencyCalled = true },
|
||||||
}, func() error {
|
}, func(needsGetTOC bool) error {
|
||||||
var allErr []error
|
var allErr []error
|
||||||
if !getFooterLatencyCalled {
|
if !getFooterLatencyCalled {
|
||||||
allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called"))
|
allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called"))
|
||||||
}
|
}
|
||||||
if !getTocLatencyCalled {
|
if needsGetTOC {
|
||||||
allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called"))
|
if !getTocLatencyCalled {
|
||||||
|
allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called"))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if !deserializeTocLatencyCalled {
|
if !deserializeTocLatencyCalled {
|
||||||
allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called"))
|
allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called"))
|
||||||
@ -1573,6 +1749,53 @@ func hasFileDigest(file string, digest string) stargzCheck {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck {
|
||||||
|
return stargzCheckFn(func(t *testing.T, r *Reader) {
|
||||||
|
extraMap := make(map[string]chunkInfo)
|
||||||
|
for _, e := range extra {
|
||||||
|
extraMap[e.name] = e
|
||||||
|
}
|
||||||
|
var extraNames []string
|
||||||
|
for n := range extraMap {
|
||||||
|
extraNames = append(extraNames, n)
|
||||||
|
}
|
||||||
|
f, err := r.OpenFileWithPreReader(file, func(e *TOCEntry, cr io.Reader) error {
|
||||||
|
t.Logf("On %q: got preread of %q", file, e.Name)
|
||||||
|
ex, ok := extraMap[e.Name]
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("fail on %q: unexpected entry %q: %+v, %+v", file, e.Name, e, extraNames)
|
||||||
|
}
|
||||||
|
got, err := io.ReadAll(cr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("fail on %q: failed to read %q: %v", file, e.Name, err)
|
||||||
|
}
|
||||||
|
if ex.data != string(got) {
|
||||||
|
t.Fatalf("fail on %q: unexpected contents of %q: len=%d; want=%d", file, e.Name, len(got), len(ex.data))
|
||||||
|
}
|
||||||
|
delete(extraMap, e.Name)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
got := make([]byte, len(want))
|
||||||
|
n, err := f.ReadAt(got, int64(offset))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadAt(len %d, offset %d, size %d) = %v, %v", len(got), offset, f.Size(), n, err)
|
||||||
|
}
|
||||||
|
if string(got) != want {
|
||||||
|
t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want)))
|
||||||
|
}
|
||||||
|
if len(extraMap) != 0 {
|
||||||
|
var exNames []string
|
||||||
|
for _, ex := range extraMap {
|
||||||
|
exNames = append(exNames, ex.name)
|
||||||
|
}
|
||||||
|
t.Fatalf("fail on %q: some entries aren't read: %+v", file, exNames)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func hasFileContentsRange(file string, offset int, want string) stargzCheck {
|
func hasFileContentsRange(file string, offset int, want string) stargzCheck {
|
||||||
return stargzCheckFn(func(t *testing.T, r *Reader) {
|
return stargzCheckFn(func(t *testing.T, r *Reader) {
|
||||||
f, err := r.OpenFile(file)
|
f, err := r.OpenFile(file)
|
||||||
@ -1585,7 +1808,7 @@ func hasFileContentsRange(file string, offset int, want string) stargzCheck {
|
|||||||
t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err)
|
t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err)
|
||||||
}
|
}
|
||||||
if string(got) != want {
|
if string(got) != want {
|
||||||
t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, got, want)
|
t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want)))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -1797,6 +2020,13 @@ func mustSameEntry(files ...string) stargzCheck {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func viewContent(c []byte) string {
|
||||||
|
if len(c) < 100 {
|
||||||
|
return string(c)
|
||||||
|
}
|
||||||
|
return string(c[:50]) + "...(omit)..." + string(c[50:100])
|
||||||
|
}
|
||||||
|
|
||||||
func tarOf(s ...tarEntry) []tarEntry { return s }
|
func tarOf(s ...tarEntry) []tarEntry { return s }
|
||||||
|
|
||||||
type tarEntry interface {
|
type tarEntry interface {
|
||||||
@ -2056,6 +2286,16 @@ func regDigest(t *testing.T, name string, contentStr string, digestMap map[strin
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
||||||
|
|
||||||
|
func randomContents(n int) string {
|
||||||
|
b := make([]rune, n)
|
||||||
|
for i := range b {
|
||||||
|
b[i] = runes[rand.Intn(len(runes))]
|
||||||
|
}
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
|
||||||
func fileModeToTarMode(mode os.FileMode) (int64, error) {
|
func fileModeToTarMode(mode os.FileMode) (int64, error) {
|
||||||
h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "")
|
h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -2073,3 +2313,54 @@ func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) }
|
|||||||
func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() }
|
func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() }
|
||||||
func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() }
|
func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() }
|
||||||
func (f fileInfoOnlyMode) Sys() interface{} { return nil }
|
func (f fileInfoOnlyMode) Sys() interface{} { return nil }
|
||||||
|
|
||||||
|
func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) {
|
||||||
|
if len(streams) == 0 {
|
||||||
|
return // nop
|
||||||
|
}
|
||||||
|
|
||||||
|
wants := map[int64]struct{}{}
|
||||||
|
for _, s := range streams {
|
||||||
|
wants[s] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
len0 := len(b)
|
||||||
|
br := bytes.NewReader(b)
|
||||||
|
zr := new(gzip.Reader)
|
||||||
|
t.Logf("got gzip streams:")
|
||||||
|
numStreams := 0
|
||||||
|
for {
|
||||||
|
zoff := len0 - br.Len()
|
||||||
|
if err := zr.Reset(br); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.Fatalf("countStreams(gzip), Reset: %v", err)
|
||||||
|
}
|
||||||
|
zr.Multistream(false)
|
||||||
|
n, err := io.Copy(io.Discard, zr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("countStreams(gzip), Copy: %v", err)
|
||||||
|
}
|
||||||
|
var extra string
|
||||||
|
if len(zr.Header.Extra) > 0 {
|
||||||
|
extra = fmt.Sprintf("; extra=%q", zr.Header.Extra)
|
||||||
|
}
|
||||||
|
t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra)
|
||||||
|
delete(wants, int64(zoff))
|
||||||
|
numStreams++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func GzipDiffIDOf(t *testing.T, b []byte) string {
|
||||||
|
h := sha256.New()
|
||||||
|
zr, err := gzip.NewReader(bytes.NewReader(b))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("diffIDOf(gzip): %v", err)
|
||||||
|
}
|
||||||
|
defer zr.Close()
|
||||||
|
if _, err := io.Copy(h, zr); err != nil {
|
||||||
|
t.Fatalf("diffIDOf(gzip).Copy: %v", err)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("sha256:%x", h.Sum(nil))
|
||||||
|
}
|
||||||
|
31
vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
generated
vendored
31
vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
generated
vendored
@ -149,6 +149,12 @@ type TOCEntry struct {
|
|||||||
// ChunkSize.
|
// ChunkSize.
|
||||||
Offset int64 `json:"offset,omitempty"`
|
Offset int64 `json:"offset,omitempty"`
|
||||||
|
|
||||||
|
// InnerOffset is an optional field indicates uncompressed offset
|
||||||
|
// of this "reg" or "chunk" payload in a stream starts from Offset.
|
||||||
|
// This field enables to put multiple "reg" or "chunk" payloads
|
||||||
|
// in one chunk with having the same Offset but different InnerOffset.
|
||||||
|
InnerOffset int64 `json:"innerOffset,omitempty"`
|
||||||
|
|
||||||
nextOffset int64 // the Offset of the next entry with a non-zero Offset
|
nextOffset int64 // the Offset of the next entry with a non-zero Offset
|
||||||
|
|
||||||
// DevMajor is the major device number for "char" and "block" types.
|
// DevMajor is the major device number for "char" and "block" types.
|
||||||
@ -186,6 +192,9 @@ type TOCEntry struct {
|
|||||||
ChunkDigest string `json:"chunkDigest,omitempty"`
|
ChunkDigest string `json:"chunkDigest,omitempty"`
|
||||||
|
|
||||||
children map[string]*TOCEntry
|
children map[string]*TOCEntry
|
||||||
|
|
||||||
|
// chunkTopIndex is index of the entry where Offset starts in the blob.
|
||||||
|
chunkTopIndex int
|
||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the entry's modification time.
|
// ModTime returns the entry's modification time.
|
||||||
@ -279,7 +288,10 @@ type Compressor interface {
|
|||||||
// Writer returns WriteCloser to be used for writing a chunk to eStargz.
|
// Writer returns WriteCloser to be used for writing a chunk to eStargz.
|
||||||
// Everytime a chunk is written, the WriteCloser is closed and Writer is
|
// Everytime a chunk is written, the WriteCloser is closed and Writer is
|
||||||
// called again for writing the next chunk.
|
// called again for writing the next chunk.
|
||||||
Writer(w io.Writer) (io.WriteCloser, error)
|
//
|
||||||
|
// The returned writer should implement "Flush() error" function that flushes
|
||||||
|
// any pending compressed data to the underlying writer.
|
||||||
|
Writer(w io.Writer) (WriteFlushCloser, error)
|
||||||
|
|
||||||
// WriteTOCAndFooter is called to write JTOC to the passed Writer.
|
// WriteTOCAndFooter is called to write JTOC to the passed Writer.
|
||||||
// diffHash calculates the DiffID (uncompressed sha256 hash) of the blob
|
// diffHash calculates the DiffID (uncompressed sha256 hash) of the blob
|
||||||
@ -303,8 +315,12 @@ type Decompressor interface {
|
|||||||
// payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between
|
// payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between
|
||||||
// the top until the TOC JSON).
|
// the top until the TOC JSON).
|
||||||
//
|
//
|
||||||
// Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range
|
// If tocOffset < 0, we assume that TOC isn't contained in the blob and pass nil reader
|
||||||
// from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize).
|
// to ParseTOC. We expect that ParseTOC acquire TOC from the external location and return it.
|
||||||
|
//
|
||||||
|
// tocSize is optional. If tocSize <= 0, it's by default the size of the range from tocOffset until the beginning of the
|
||||||
|
// footer (blob size - tocOff - FooterSize).
|
||||||
|
// If blobPayloadSize < 0, blobPayloadSize become the blob size.
|
||||||
ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error)
|
ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error)
|
||||||
|
|
||||||
// ParseTOC parses TOC from the passed reader. The reader provides the partial contents
|
// ParseTOC parses TOC from the passed reader. The reader provides the partial contents
|
||||||
@ -313,5 +329,14 @@ type Decompressor interface {
|
|||||||
// This function returns tocDgst that represents the digest of TOC that will be used
|
// This function returns tocDgst that represents the digest of TOC that will be used
|
||||||
// to verify this blob. This must match to the value returned from
|
// to verify this blob. This must match to the value returned from
|
||||||
// Compressor.WriteTOCAndFooter that is used when creating this blob.
|
// Compressor.WriteTOCAndFooter that is used when creating this blob.
|
||||||
|
//
|
||||||
|
// If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob.
|
||||||
|
// Pass nil reader to ParseTOC then we expect that ParseTOC acquire TOC from the external location
|
||||||
|
// and return it.
|
||||||
ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error)
|
ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type WriteFlushCloser interface {
|
||||||
|
io.WriteCloser
|
||||||
|
Flush() error
|
||||||
|
}
|
||||||
|
14
vendor/github.com/containers/buildah/chroot/run_linux.go
generated
vendored
14
vendor/github.com/containers/buildah/chroot/run_linux.go
generated
vendored
@ -423,7 +423,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||||||
file.Close()
|
file.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
requestFlags := bindFlags
|
requestFlags := uintptr(0)
|
||||||
expectedFlags := uintptr(0)
|
expectedFlags := uintptr(0)
|
||||||
for _, option := range m.Options {
|
for _, option := range m.Options {
|
||||||
switch option {
|
switch option {
|
||||||
@ -457,9 +457,19 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||||||
case "bind":
|
case "bind":
|
||||||
// Do the bind mount.
|
// Do the bind mount.
|
||||||
logrus.Debugf("bind mounting %q on %q", m.Destination, filepath.Join(spec.Root.Path, m.Destination))
|
logrus.Debugf("bind mounting %q on %q", m.Destination, filepath.Join(spec.Root.Path, m.Destination))
|
||||||
if err := unix.Mount(m.Source, target, "", requestFlags, ""); err != nil {
|
if err := unix.Mount(m.Source, target, "", bindFlags|requestFlags, ""); err != nil {
|
||||||
return undoBinds, fmt.Errorf("bind mounting %q from host to %q in mount namespace (%q): %w", m.Source, m.Destination, target, err)
|
return undoBinds, fmt.Errorf("bind mounting %q from host to %q in mount namespace (%q): %w", m.Source, m.Destination, target, err)
|
||||||
}
|
}
|
||||||
|
if (requestFlags & unix.MS_RDONLY) != 0 {
|
||||||
|
if err = unix.Statfs(target, &fs); err != nil {
|
||||||
|
return undoBinds, fmt.Errorf("checking if directory %q was bound read-only: %w", target, err)
|
||||||
|
}
|
||||||
|
// we need to make sure these flags are maintained in the REMOUNT operation
|
||||||
|
additionalFlags := uintptr(fs.Flags) & (unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_NODEV)
|
||||||
|
if err := unix.Mount("", target, "", unix.MS_REMOUNT|unix.MS_BIND|unix.MS_RDONLY|additionalFlags, ""); err != nil {
|
||||||
|
return undoBinds, fmt.Errorf("setting flags on the bind mount %q from host to %q in mount namespace (%q): %w", m.Source, m.Destination, target, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
logrus.Debugf("bind mounted %q to %q", m.Source, target)
|
logrus.Debugf("bind mounted %q to %q", m.Source, target)
|
||||||
case "tmpfs":
|
case "tmpfs":
|
||||||
// Mount a tmpfs.
|
// Mount a tmpfs.
|
||||||
|
27
vendor/github.com/containers/buildah/copier/copier.go
generated
vendored
27
vendor/github.com/containers/buildah/copier/copier.go
generated
vendored
@ -1571,15 +1571,15 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
makeDirectoryWriteable := func(directory string) error {
|
makeDirectoryWriteable := func(directory string) error {
|
||||||
st, err := os.Lstat(directory)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("copier: put: error reading permissions of directory %q: %w", directory, err)
|
|
||||||
}
|
|
||||||
mode := st.Mode() & os.ModePerm
|
|
||||||
if _, ok := directoryModes[directory]; !ok {
|
if _, ok := directoryModes[directory]; !ok {
|
||||||
|
st, err := os.Lstat(directory)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copier: put: error reading permissions of directory %q: %w", directory, err)
|
||||||
|
}
|
||||||
|
mode := st.Mode()
|
||||||
directoryModes[directory] = mode
|
directoryModes[directory] = mode
|
||||||
}
|
}
|
||||||
if err = os.Chmod(directory, 0o700); err != nil {
|
if err := os.Chmod(directory, 0o700); err != nil {
|
||||||
return fmt.Errorf("copier: put: error making directory %q writable: %w", directory, err)
|
return fmt.Errorf("copier: put: error making directory %q writable: %w", directory, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -1867,16 +1867,21 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
|
|||||||
// set other bits that might have been reset by chown()
|
// set other bits that might have been reset by chown()
|
||||||
if hdr.Typeflag != tar.TypeSymlink {
|
if hdr.Typeflag != tar.TypeSymlink {
|
||||||
if hdr.Mode&cISUID == cISUID {
|
if hdr.Mode&cISUID == cISUID {
|
||||||
mode |= syscall.S_ISUID
|
mode |= os.ModeSetuid
|
||||||
}
|
}
|
||||||
if hdr.Mode&cISGID == cISGID {
|
if hdr.Mode&cISGID == cISGID {
|
||||||
mode |= syscall.S_ISGID
|
mode |= os.ModeSetgid
|
||||||
}
|
}
|
||||||
if hdr.Mode&cISVTX == cISVTX {
|
if hdr.Mode&cISVTX == cISVTX {
|
||||||
mode |= syscall.S_ISVTX
|
mode |= os.ModeSticky
|
||||||
}
|
}
|
||||||
if err = syscall.Chmod(path, uint32(mode)); err != nil {
|
if hdr.Typeflag == tar.TypeDir {
|
||||||
return fmt.Errorf("setting additional permissions on %q to 0%o: %w", path, mode, err)
|
// if/when we do the final setting of permissions on this
|
||||||
|
// directory, make sure to incorporate these bits, too
|
||||||
|
directoryModes[path] = mode
|
||||||
|
}
|
||||||
|
if err = os.Chmod(path, mode); err != nil {
|
||||||
|
return fmt.Errorf("copier: put: setting additional permissions on %q to 0%o: %w", path, mode, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// set xattrs, including some that might have been reset by chown()
|
// set xattrs, including some that might have been reset by chown()
|
||||||
|
2
vendor/github.com/containers/buildah/define/build.go
generated
vendored
2
vendor/github.com/containers/buildah/define/build.go
generated
vendored
@ -67,6 +67,8 @@ type CommonBuildOptions struct {
|
|||||||
// NoHosts tells the builder not to create /etc/hosts content when running
|
// NoHosts tells the builder not to create /etc/hosts content when running
|
||||||
// containers.
|
// containers.
|
||||||
NoHosts bool
|
NoHosts bool
|
||||||
|
// NoNewPrivileges removes the ability for the container to gain privileges
|
||||||
|
NoNewPrivileges bool
|
||||||
// OmitTimestamp forces epoch 0 as created timestamp to allow for
|
// OmitTimestamp forces epoch 0 as created timestamp to allow for
|
||||||
// deterministic, content-addressable builds.
|
// deterministic, content-addressable builds.
|
||||||
OmitTimestamp bool
|
OmitTimestamp bool
|
||||||
|
17
vendor/github.com/containers/buildah/define/mount_unsupported.go
generated
vendored
Normal file
17
vendor/github.com/containers/buildah/define/mount_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
//go:build darwin || windows
|
||||||
|
// +build darwin windows
|
||||||
|
|
||||||
|
package define
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TypeBind is the type for mounting host dir
|
||||||
|
TypeBind = "bind"
|
||||||
|
|
||||||
|
// TempDir is the default for storing temporary files
|
||||||
|
TempDir = "/var/tmp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Mount potions for bind
|
||||||
|
BindOptions = []string{""}
|
||||||
|
)
|
16
vendor/github.com/containers/buildah/imagebuildah/build.go
generated
vendored
16
vendor/github.com/containers/buildah/imagebuildah/build.go
generated
vendored
@ -696,11 +696,17 @@ func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from stri
|
|||||||
}
|
}
|
||||||
base := child.Next.Value
|
base := child.Next.Value
|
||||||
if base != "scratch" && !nicknames[base] {
|
if base != "scratch" && !nicknames[base] {
|
||||||
// TODO: this didn't undergo variable and arg
|
headingArgs := argsMapToSlice(stage.Builder.HeadingArgs)
|
||||||
// expansion, so if the AS clause in another
|
userArgs := argsMapToSlice(stage.Builder.Args)
|
||||||
// FROM instruction uses argument values,
|
// append heading args so if --build-arg key=value is not
|
||||||
// we might not record the right value here.
|
// specified but default value is set in Containerfile
|
||||||
baseImages = append(baseImages, base)
|
// via `ARG key=value` so default value can be used.
|
||||||
|
userArgs = append(headingArgs, userArgs...)
|
||||||
|
baseWithArg, err := imagebuilder.ProcessWord(base, userArgs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("while replacing arg variables with values for format %q: %w", base, err)
|
||||||
|
}
|
||||||
|
baseImages = append(baseImages, baseWithArg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
5
vendor/github.com/containers/buildah/imagebuildah/executor.go
generated
vendored
5
vendor/github.com/containers/buildah/imagebuildah/executor.go
generated
vendored
@ -690,7 +690,12 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
|
|||||||
base = child.Next.Value
|
base = child.Next.Value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
headingArgs := argsMapToSlice(stage.Builder.HeadingArgs)
|
||||||
userArgs := argsMapToSlice(stage.Builder.Args)
|
userArgs := argsMapToSlice(stage.Builder.Args)
|
||||||
|
// append heading args so if --build-arg key=value is not
|
||||||
|
// specified but default value is set in Containerfile
|
||||||
|
// via `ARG key=value` so default value can be used.
|
||||||
|
userArgs = append(headingArgs, userArgs...)
|
||||||
baseWithArg, err := imagebuilder.ProcessWord(base, userArgs)
|
baseWithArg, err := imagebuilder.ProcessWord(base, userArgs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("while replacing arg variables with values for format %q: %w", base, err)
|
return "", nil, fmt.Errorf("while replacing arg variables with values for format %q: %w", base, err)
|
||||||
|
4
vendor/github.com/containers/buildah/install.md
generated
vendored
4
vendor/github.com/containers/buildah/install.md
generated
vendored
@ -52,9 +52,9 @@ rpm-ostree install buildah
|
|||||||
Note: [`podman`](https://podman.io) build is available by default.
|
Note: [`podman`](https://podman.io) build is available by default.
|
||||||
|
|
||||||
### [Gentoo](https://www.gentoo.org)
|
### [Gentoo](https://www.gentoo.org)
|
||||||
[app-containers/podman](https://packages.gentoo.org/packages/app-containers/podman)
|
[app-containers/buildah](https://packages.gentoo.org/packages/app-containers/buildah)
|
||||||
```bash
|
```bash
|
||||||
sudo emerge app-containers/podman
|
sudo emerge app-containers/buildah
|
||||||
```
|
```
|
||||||
|
|
||||||
### [openSUSE](https://www.opensuse.org)
|
### [openSUSE](https://www.opensuse.org)
|
||||||
|
16
vendor/github.com/containers/buildah/internal/parse/parse.go
generated
vendored
16
vendor/github.com/containers/buildah/internal/parse/parse.go
generated
vendored
@ -10,6 +10,7 @@ import (
|
|||||||
|
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
|
"github.com/containers/buildah/define"
|
||||||
"github.com/containers/buildah/internal"
|
"github.com/containers/buildah/internal"
|
||||||
internalUtil "github.com/containers/buildah/internal/util"
|
internalUtil "github.com/containers/buildah/internal/util"
|
||||||
"github.com/containers/common/pkg/parse"
|
"github.com/containers/common/pkg/parse"
|
||||||
@ -17,13 +18,12 @@ import (
|
|||||||
"github.com/containers/storage"
|
"github.com/containers/storage"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/pkg/lockfile"
|
"github.com/containers/storage/pkg/lockfile"
|
||||||
|
"github.com/containers/storage/pkg/unshare"
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
selinux "github.com/opencontainers/selinux/go-selinux"
|
selinux "github.com/opencontainers/selinux/go-selinux"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// TypeBind is the type for mounting host dir
|
|
||||||
TypeBind = "bind"
|
|
||||||
// TypeTmpfs is the type for mounting tmpfs
|
// TypeTmpfs is the type for mounting tmpfs
|
||||||
TypeTmpfs = "tmpfs"
|
TypeTmpfs = "tmpfs"
|
||||||
// TypeCache is the type for mounting a common persistent cache from host
|
// TypeCache is the type for mounting a common persistent cache from host
|
||||||
@ -51,7 +51,7 @@ var (
|
|||||||
// Caller is expected to perform unmount of any mounted images
|
// Caller is expected to perform unmount of any mounted images
|
||||||
func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails) (specs.Mount, string, error) {
|
func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails) (specs.Mount, string, error) {
|
||||||
newMount := specs.Mount{
|
newMount := specs.Mount{
|
||||||
Type: TypeBind,
|
Type: define.TypeBind,
|
||||||
}
|
}
|
||||||
|
|
||||||
mountReadability := false
|
mountReadability := false
|
||||||
@ -201,7 +201,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
|
|||||||
)
|
)
|
||||||
fromStage := ""
|
fromStage := ""
|
||||||
newMount := specs.Mount{
|
newMount := specs.Mount{
|
||||||
Type: TypeBind,
|
Type: define.TypeBind,
|
||||||
}
|
}
|
||||||
// if id is set a new subdirectory with `id` will be created under /host-temp/buildah-build-cache/id
|
// if id is set a new subdirectory with `id` will be created under /host-temp/buildah-build-cache/id
|
||||||
id := ""
|
id := ""
|
||||||
@ -331,8 +331,8 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
|
|||||||
// create a common cache directory, which persists on hosts within temp lifecycle
|
// create a common cache directory, which persists on hosts within temp lifecycle
|
||||||
// add subdirectory if specified
|
// add subdirectory if specified
|
||||||
|
|
||||||
// cache parent directory
|
// cache parent directory: creates separate cache parent for each user.
|
||||||
cacheParent := filepath.Join(internalUtil.GetTempDir(), BuildahCacheDir)
|
cacheParent := filepath.Join(internalUtil.GetTempDir(), BuildahCacheDir+"-"+strconv.Itoa(unshare.GetRootlessUID()))
|
||||||
// create cache on host if not present
|
// create cache on host if not present
|
||||||
err = os.MkdirAll(cacheParent, os.FileMode(0755))
|
err = os.MkdirAll(cacheParent, os.FileMode(0755))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -544,7 +544,7 @@ func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string,
|
|||||||
// If this function succeeds, the caller must unlock the returned lockfile.Lockers if any (when??).
|
// If this function succeeds, the caller must unlock the returned lockfile.Lockers if any (when??).
|
||||||
func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string) (map[string]specs.Mount, []string, []lockfile.Locker, error) {
|
func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string) (map[string]specs.Mount, []string, []lockfile.Locker, error) {
|
||||||
// If `type` is not set default to "bind"
|
// If `type` is not set default to "bind"
|
||||||
mountType := TypeBind
|
mountType := define.TypeBind
|
||||||
finalMounts := make(map[string]specs.Mount)
|
finalMounts := make(map[string]specs.Mount)
|
||||||
mountedImages := make([]string, 0)
|
mountedImages := make([]string, 0)
|
||||||
targetLocks := make([]lockfile.Locker, 0)
|
targetLocks := make([]lockfile.Locker, 0)
|
||||||
@ -575,7 +575,7 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
switch mountType {
|
switch mountType {
|
||||||
case TypeBind:
|
case define.TypeBind:
|
||||||
mount, image, err := GetBindMount(ctx, tokens, contextDir, store, "", nil)
|
mount, image, err := GetBindMount(ctx, tokens, contextDir, store, "", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, mountedImages, nil, err
|
return nil, mountedImages, nil, err
|
||||||
|
14
vendor/github.com/containers/buildah/pkg/parse/parse.go
generated
vendored
14
vendor/github.com/containers/buildah/pkg/parse/parse.go
generated
vendored
@ -222,13 +222,14 @@ func GetAdditionalBuildContext(value string) (define.AdditionalBuildContext, err
|
|||||||
func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOptions) error {
|
func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOptions) error {
|
||||||
for _, opt := range securityOpts {
|
for _, opt := range securityOpts {
|
||||||
if opt == "no-new-privileges" {
|
if opt == "no-new-privileges" {
|
||||||
return errors.New("no-new-privileges is not supported")
|
commonOpts.NoNewPrivileges = true
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
con := strings.SplitN(opt, "=", 2)
|
con := strings.SplitN(opt, "=", 2)
|
||||||
if len(con) != 2 {
|
if len(con) != 2 {
|
||||||
return fmt.Errorf("invalid --security-opt name=value pair: %q", opt)
|
return fmt.Errorf("invalid --security-opt name=value pair: %q", opt)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch con[0] {
|
switch con[0] {
|
||||||
case "label":
|
case "label":
|
||||||
commonOpts.LabelOpts = append(commonOpts.LabelOpts, con[1])
|
commonOpts.LabelOpts = append(commonOpts.LabelOpts, con[1])
|
||||||
@ -928,10 +929,11 @@ func IsolationOption(isolation string) (define.Isolation, error) {
|
|||||||
|
|
||||||
// Device parses device mapping string to a src, dest & permissions string
|
// Device parses device mapping string to a src, dest & permissions string
|
||||||
// Valid values for device look like:
|
// Valid values for device look like:
|
||||||
// '/dev/sdc"
|
//
|
||||||
// '/dev/sdc:/dev/xvdc"
|
// '/dev/sdc"
|
||||||
// '/dev/sdc:/dev/xvdc:rwm"
|
// '/dev/sdc:/dev/xvdc"
|
||||||
// '/dev/sdc:rm"
|
// '/dev/sdc:/dev/xvdc:rwm"
|
||||||
|
// '/dev/sdc:rm"
|
||||||
func Device(device string) (string, string, string, error) {
|
func Device(device string) (string, string, string, error) {
|
||||||
src := ""
|
src := ""
|
||||||
dst := ""
|
dst := ""
|
||||||
|
6
vendor/github.com/containers/buildah/run_common.go
generated
vendored
6
vendor/github.com/containers/buildah/run_common.go
generated
vendored
@ -1455,8 +1455,8 @@ func cleanableDestinationListFromMounts(mounts []spec.Mount) []string {
|
|||||||
//
|
//
|
||||||
// If this function succeeds, the caller must unlock runMountArtifacts.TargetLocks (when??)
|
// If this function succeeds, the caller must unlock runMountArtifacts.TargetLocks (when??)
|
||||||
func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMaps IDMaps) ([]spec.Mount, *runMountArtifacts, error) {
|
func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMaps IDMaps) ([]spec.Mount, *runMountArtifacts, error) {
|
||||||
// If `type` is not set default to "bind"
|
// If `type` is not set default to TypeBind
|
||||||
mountType := internalParse.TypeBind
|
mountType := define.TypeBind
|
||||||
mountTargets := make([]string, 0, 10)
|
mountTargets := make([]string, 0, 10)
|
||||||
tmpFiles := make([]string, 0, len(mounts))
|
tmpFiles := make([]string, 0, len(mounts))
|
||||||
mountImages := make([]string, 0, 10)
|
mountImages := make([]string, 0, 10)
|
||||||
@ -1510,7 +1510,7 @@ func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMap
|
|||||||
// Count is needed as the default destination of the ssh sock inside the container is /run/buildkit/ssh_agent.{i}
|
// Count is needed as the default destination of the ssh sock inside the container is /run/buildkit/ssh_agent.{i}
|
||||||
sshCount++
|
sshCount++
|
||||||
}
|
}
|
||||||
case "bind":
|
case define.TypeBind:
|
||||||
mount, image, err := b.getBindMount(tokens, sources.SystemContext, sources.ContextDir, sources.StageMountPoints, idMaps)
|
mount, image, err := b.getBindMount(tokens, sources.SystemContext, sources.ContextDir, sources.StageMountPoints, idMaps)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
|
3
vendor/github.com/containers/buildah/run_freebsd.go
generated
vendored
3
vendor/github.com/containers/buildah/run_freebsd.go
generated
vendored
@ -357,8 +357,7 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
|
|||||||
if len(spliti) > 2 {
|
if len(spliti) > 2 {
|
||||||
options = strings.Split(spliti[2], ",")
|
options = strings.Split(spliti[2], ",")
|
||||||
}
|
}
|
||||||
options = append(options, "bind")
|
mount, err := parseMount("nullfs", spliti[0], spliti[1], options)
|
||||||
mount, err := parseMount("bind", spliti[0], spliti[1], options)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/containers/buildah/run_linux.go
generated
vendored
2
vendor/github.com/containers/buildah/run_linux.go
generated
vendored
@ -210,6 +210,8 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
g.SetProcessNoNewPrivileges(b.CommonBuildOpts.NoNewPrivileges)
|
||||||
|
|
||||||
g.SetProcessApparmorProfile(b.CommonBuildOpts.ApparmorProfile)
|
g.SetProcessApparmorProfile(b.CommonBuildOpts.ApparmorProfile)
|
||||||
|
|
||||||
// Now grab the spec from the generator. Set the generator to nil so that future contributors
|
// Now grab the spec from the generator. Set the generator to nil so that future contributors
|
||||||
|
2
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
2
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
@ -72,6 +72,8 @@ fedora_testing_task: &fedora_testing
|
|||||||
TEST_DRIVER: "vfs"
|
TEST_DRIVER: "vfs"
|
||||||
- env:
|
- env:
|
||||||
TEST_DRIVER: "overlay"
|
TEST_DRIVER: "overlay"
|
||||||
|
- env:
|
||||||
|
TEST_DRIVER: "overlay-transient"
|
||||||
- env:
|
- env:
|
||||||
TEST_DRIVER: "fuse-overlay"
|
TEST_DRIVER: "fuse-overlay"
|
||||||
- env:
|
- env:
|
||||||
|
4
vendor/github.com/containers/storage/Makefile
generated
vendored
4
vendor/github.com/containers/storage/Makefile
generated
vendored
@ -60,7 +60,7 @@ local-gccgo: ## build using gccgo on the host
|
|||||||
GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage
|
GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage
|
||||||
|
|
||||||
local-cross: ## cross build the binaries for arm, darwin, and freebsd
|
local-cross: ## cross build the binaries for arm, darwin, and freebsd
|
||||||
@for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \
|
@for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/s390x linux/mips linux/mipsle linux/mips64 linux/mips64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \
|
||||||
os=`echo $${target} | cut -f1 -d/` ; \
|
os=`echo $${target} | cut -f1 -d/` ; \
|
||||||
arch=`echo $${target} | cut -f2 -d/` ; \
|
arch=`echo $${target} | cut -f2 -d/` ; \
|
||||||
suffix=$${os}.$${arch} ; \
|
suffix=$${os}.$${arch} ; \
|
||||||
@ -117,7 +117,7 @@ help: ## this help
|
|||||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
@awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||||
|
|
||||||
vendor-in-container:
|
vendor-in-container:
|
||||||
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang make vendor
|
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang:1.17 make vendor
|
||||||
|
|
||||||
vendor:
|
vendor:
|
||||||
$(GO) mod tidy -compat=1.17
|
$(GO) mod tidy -compat=1.17
|
||||||
|
198
vendor/github.com/containers/storage/containers.go
generated
vendored
198
vendor/github.com/containers/storage/containers.go
generated
vendored
@ -15,6 +15,22 @@ import (
|
|||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type containerLocations uint8
|
||||||
|
|
||||||
|
// The backing store is split in two json files, one (the volatile)
|
||||||
|
// that is written without fsync() meaning it isn't as robust to
|
||||||
|
// unclean shutdown
|
||||||
|
const (
|
||||||
|
stableContainerLocation containerLocations = 1 << iota
|
||||||
|
volatileContainerLocation
|
||||||
|
|
||||||
|
numContainerLocationIndex = iota
|
||||||
|
)
|
||||||
|
|
||||||
|
func containerLocationFromIndex(index int) containerLocations {
|
||||||
|
return 1 << index
|
||||||
|
}
|
||||||
|
|
||||||
// A Container is a reference to a read-write layer with metadata.
|
// A Container is a reference to a read-write layer with metadata.
|
||||||
type Container struct {
|
type Container struct {
|
||||||
// ID is either one which was specified at create-time, or a random
|
// ID is either one which was specified at create-time, or a random
|
||||||
@ -64,6 +80,9 @@ type Container struct {
|
|||||||
GIDMap []idtools.IDMap `json:"gidmap,omitempty"`
|
GIDMap []idtools.IDMap `json:"gidmap,omitempty"`
|
||||||
|
|
||||||
Flags map[string]interface{} `json:"flags,omitempty"`
|
Flags map[string]interface{} `json:"flags,omitempty"`
|
||||||
|
|
||||||
|
// volatileStore is true if the container is from the volatile json file
|
||||||
|
volatileStore bool `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// rwContainerStore provides bookkeeping for information about Containers.
|
// rwContainerStore provides bookkeeping for information about Containers.
|
||||||
@ -115,11 +134,15 @@ type rwContainerStore interface {
|
|||||||
|
|
||||||
// Containers returns a slice enumerating the known containers.
|
// Containers returns a slice enumerating the known containers.
|
||||||
Containers() ([]Container, error)
|
Containers() ([]Container, error)
|
||||||
|
|
||||||
|
// Clean up unreferenced datadirs
|
||||||
|
GarbageCollect() error
|
||||||
}
|
}
|
||||||
|
|
||||||
type containerStore struct {
|
type containerStore struct {
|
||||||
lockfile Locker
|
lockfile Locker
|
||||||
dir string
|
dir string
|
||||||
|
jsonPath [numContainerLocationIndex]string
|
||||||
containers []*Container
|
containers []*Container
|
||||||
idindex *truncindex.TruncIndex
|
idindex *truncindex.TruncIndex
|
||||||
byid map[string]*Container
|
byid map[string]*Container
|
||||||
@ -142,6 +165,7 @@ func copyContainer(c *Container) *Container {
|
|||||||
UIDMap: copyIDMap(c.UIDMap),
|
UIDMap: copyIDMap(c.UIDMap),
|
||||||
GIDMap: copyIDMap(c.GIDMap),
|
GIDMap: copyIDMap(c.GIDMap),
|
||||||
Flags: copyStringInterfaceMap(c.Flags),
|
Flags: copyStringInterfaceMap(c.Flags),
|
||||||
|
volatileStore: c.volatileStore,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -176,6 +200,13 @@ func (c *Container) MountOpts() []string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func containerLocation(c *Container) containerLocations {
|
||||||
|
if c.volatileStore {
|
||||||
|
return volatileContainerLocation
|
||||||
|
}
|
||||||
|
return stableContainerLocation
|
||||||
|
}
|
||||||
|
|
||||||
// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing.
|
// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing.
|
||||||
// If this succeeds, the caller MUST call stopWriting().
|
// If this succeeds, the caller MUST call stopWriting().
|
||||||
//
|
//
|
||||||
@ -289,8 +320,37 @@ func (r *containerStore) Containers() ([]Container, error) {
|
|||||||
return containers, nil
|
return containers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *containerStore) containerspath() string {
|
// This looks for datadirs in the store directory that are not referenced
|
||||||
return filepath.Join(r.dir, "containers.json")
|
// by the json file and removes it. These can happen in the case of unclean
|
||||||
|
// shutdowns or regular restarts in transient store mode.
|
||||||
|
func (r *containerStore) GarbageCollect() error {
|
||||||
|
entries, err := os.ReadDir(r.dir)
|
||||||
|
if err != nil {
|
||||||
|
// Unexpected, don't try any GC
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
id := entry.Name()
|
||||||
|
// Does it look like a datadir directory?
|
||||||
|
if !entry.IsDir() || !nameLooksLikeID(id) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should the id be there?
|
||||||
|
if r.byid[id] != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise remove datadir
|
||||||
|
moreErr := os.RemoveAll(filepath.Join(r.dir, id))
|
||||||
|
// Propagate first error
|
||||||
|
if moreErr != nil && err == nil {
|
||||||
|
err = moreErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *containerStore) datadir(id string) string {
|
func (r *containerStore) datadir(id string) string {
|
||||||
@ -309,31 +369,53 @@ func (r *containerStore) datapath(id, key string) string {
|
|||||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||||
// retrying with lockedForWriting could succeed.
|
// retrying with lockedForWriting could succeed.
|
||||||
func (r *containerStore) load(lockedForWriting bool) (bool, error) {
|
func (r *containerStore) load(lockedForWriting bool) (bool, error) {
|
||||||
rpath := r.containerspath()
|
var modifiedLocations containerLocations
|
||||||
data, err := os.ReadFile(rpath)
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
containers := []*Container{}
|
containers := []*Container{}
|
||||||
if len(data) != 0 {
|
|
||||||
if err := json.Unmarshal(data, &containers); err != nil {
|
ids := make(map[string]*Container)
|
||||||
return false, fmt.Errorf("loading %q: %w", rpath, err)
|
|
||||||
|
for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ {
|
||||||
|
location := containerLocationFromIndex(locationIndex)
|
||||||
|
rpath := r.jsonPath[locationIndex]
|
||||||
|
|
||||||
|
data, err := os.ReadFile(rpath)
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
locationContainers := []*Container{}
|
||||||
|
if len(data) != 0 {
|
||||||
|
if err := json.Unmarshal(data, &locationContainers); err != nil {
|
||||||
|
return false, fmt.Errorf("loading %q: %w", rpath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, container := range locationContainers {
|
||||||
|
// There should be no duplicated ids between json files, but lets check to be sure
|
||||||
|
if ids[container.ID] != nil {
|
||||||
|
continue // skip invalid duplicated container
|
||||||
|
}
|
||||||
|
// Remember where the container came from
|
||||||
|
if location == volatileContainerLocation {
|
||||||
|
container.volatileStore = true
|
||||||
|
}
|
||||||
|
containers = append(containers, container)
|
||||||
|
ids[container.ID] = container
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
idlist := make([]string, 0, len(containers))
|
idlist := make([]string, 0, len(containers))
|
||||||
layers := make(map[string]*Container)
|
layers := make(map[string]*Container)
|
||||||
ids := make(map[string]*Container)
|
|
||||||
names := make(map[string]*Container)
|
names := make(map[string]*Container)
|
||||||
var errorToResolveBySaving error // == nil
|
var errorToResolveBySaving error // == nil
|
||||||
for n, container := range containers {
|
for n, container := range containers {
|
||||||
idlist = append(idlist, container.ID)
|
idlist = append(idlist, container.ID)
|
||||||
ids[container.ID] = containers[n]
|
|
||||||
layers[container.LayerID] = containers[n]
|
layers[container.LayerID] = containers[n]
|
||||||
for _, name := range container.Names {
|
for _, name := range container.Names {
|
||||||
if conflict, ok := names[name]; ok {
|
if conflict, ok := names[name]; ok {
|
||||||
r.removeName(conflict, name)
|
r.removeName(conflict, name)
|
||||||
errorToResolveBySaving = errors.New("container store is inconsistent and the current caller does not hold a write lock")
|
errorToResolveBySaving = errors.New("container store is inconsistent and the current caller does not hold a write lock")
|
||||||
|
modifiedLocations |= containerLocation(container)
|
||||||
}
|
}
|
||||||
names[name] = containers[n]
|
names[name] = containers[n]
|
||||||
}
|
}
|
||||||
@ -348,34 +430,64 @@ func (r *containerStore) load(lockedForWriting bool) (bool, error) {
|
|||||||
if !lockedForWriting {
|
if !lockedForWriting {
|
||||||
return true, errorToResolveBySaving
|
return true, errorToResolveBySaving
|
||||||
}
|
}
|
||||||
return false, r.Save()
|
return false, r.save(modifiedLocations)
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save saves the contents of the store to disk. It should be called with
|
// Save saves the contents of the store to disk. It should be called with
|
||||||
// the lock held, locked for writing.
|
// the lock held, locked for writing.
|
||||||
func (r *containerStore) Save() error {
|
func (r *containerStore) save(saveLocations containerLocations) error {
|
||||||
r.lockfile.AssertLockedForWriting()
|
r.lockfile.AssertLockedForWriting()
|
||||||
rpath := r.containerspath()
|
for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ {
|
||||||
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
location := containerLocationFromIndex(locationIndex)
|
||||||
return err
|
if location&saveLocations == 0 {
|
||||||
}
|
continue
|
||||||
jdata, err := json.Marshal(&r.containers)
|
}
|
||||||
if err != nil {
|
rpath := r.jsonPath[locationIndex]
|
||||||
return err
|
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
||||||
}
|
return err
|
||||||
if err := ioutils.AtomicWriteFile(rpath, jdata, 0600); err != nil {
|
}
|
||||||
return err
|
subsetContainers := make([]*Container, 0, len(r.containers))
|
||||||
|
for _, container := range r.containers {
|
||||||
|
if containerLocation(container) == location {
|
||||||
|
subsetContainers = append(subsetContainers, container)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
jdata, err := json.Marshal(&subsetContainers)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var opts *ioutils.AtomicFileWriterOptions
|
||||||
|
if location == volatileContainerLocation {
|
||||||
|
opts = &ioutils.AtomicFileWriterOptions{
|
||||||
|
NoSync: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := ioutils.AtomicWriteFileWithOpts(rpath, jdata, 0600, opts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return r.lockfile.Touch()
|
return r.lockfile.Touch()
|
||||||
}
|
}
|
||||||
|
|
||||||
func newContainerStore(dir string) (rwContainerStore, error) {
|
func (r *containerStore) saveFor(modifiedContainer *Container) error {
|
||||||
|
return r.save(containerLocation(modifiedContainer))
|
||||||
|
}
|
||||||
|
|
||||||
|
func newContainerStore(dir string, runDir string, transient bool) (rwContainerStore, error) {
|
||||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
lockfile, err := GetLockfile(filepath.Join(dir, "containers.lock"))
|
volatileDir := dir
|
||||||
|
if transient {
|
||||||
|
if err := os.MkdirAll(runDir, 0700); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
volatileDir = runDir
|
||||||
|
}
|
||||||
|
lockfile, err := GetLockfile(filepath.Join(volatileDir, "containers.lock"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -386,7 +498,12 @@ func newContainerStore(dir string) (rwContainerStore, error) {
|
|||||||
byid: make(map[string]*Container),
|
byid: make(map[string]*Container),
|
||||||
bylayer: make(map[string]*Container),
|
bylayer: make(map[string]*Container),
|
||||||
byname: make(map[string]*Container),
|
byname: make(map[string]*Container),
|
||||||
|
jsonPath: [numContainerLocationIndex]string{
|
||||||
|
filepath.Join(dir, "containers.json"),
|
||||||
|
filepath.Join(volatileDir, "volatile-containers.json"),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cstore.startWritingWithReload(false); err != nil {
|
if err := cstore.startWritingWithReload(false); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -418,7 +535,7 @@ func (r *containerStore) ClearFlag(id string, flag string) error {
|
|||||||
return ErrContainerUnknown
|
return ErrContainerUnknown
|
||||||
}
|
}
|
||||||
delete(container.Flags, flag)
|
delete(container.Flags, flag)
|
||||||
return r.Save()
|
return r.saveFor(container)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *containerStore) SetFlag(id string, flag string, value interface{}) error {
|
func (r *containerStore) SetFlag(id string, flag string, value interface{}) error {
|
||||||
@ -430,7 +547,7 @@ func (r *containerStore) SetFlag(id string, flag string, value interface{}) erro
|
|||||||
container.Flags = make(map[string]interface{})
|
container.Flags = make(map[string]interface{})
|
||||||
}
|
}
|
||||||
container.Flags[flag] = value
|
container.Flags[flag] = value
|
||||||
return r.Save()
|
return r.saveFor(container)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *containerStore) Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (container *Container, err error) {
|
func (r *containerStore) Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (container *Container, err error) {
|
||||||
@ -476,6 +593,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
|
|||||||
Flags: copyStringInterfaceMap(options.Flags),
|
Flags: copyStringInterfaceMap(options.Flags),
|
||||||
UIDMap: copyIDMap(options.UIDMap),
|
UIDMap: copyIDMap(options.UIDMap),
|
||||||
GIDMap: copyIDMap(options.GIDMap),
|
GIDMap: copyIDMap(options.GIDMap),
|
||||||
|
volatileStore: options.Volatile,
|
||||||
}
|
}
|
||||||
r.containers = append(r.containers, container)
|
r.containers = append(r.containers, container)
|
||||||
r.byid[id] = container
|
r.byid[id] = container
|
||||||
@ -486,7 +604,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
|
|||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
r.byname[name] = container
|
r.byname[name] = container
|
||||||
}
|
}
|
||||||
err = r.Save()
|
err = r.saveFor(container)
|
||||||
container = copyContainer(container)
|
container = copyContainer(container)
|
||||||
return container, err
|
return container, err
|
||||||
}
|
}
|
||||||
@ -501,7 +619,7 @@ func (r *containerStore) Metadata(id string) (string, error) {
|
|||||||
func (r *containerStore) SetMetadata(id, metadata string) error {
|
func (r *containerStore) SetMetadata(id, metadata string) error {
|
||||||
if container, ok := r.lookup(id); ok {
|
if container, ok := r.lookup(id); ok {
|
||||||
container.Metadata = metadata
|
container.Metadata = metadata
|
||||||
return r.Save()
|
return r.saveFor(container)
|
||||||
}
|
}
|
||||||
return ErrContainerUnknown
|
return ErrContainerUnknown
|
||||||
}
|
}
|
||||||
@ -530,7 +648,7 @@ func (r *containerStore) updateNames(id string, names []string, op updateNameOpe
|
|||||||
r.byname[name] = container
|
r.byname[name] = container
|
||||||
}
|
}
|
||||||
container.Names = names
|
container.Names = names
|
||||||
return r.Save()
|
return r.saveFor(container)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *containerStore) Delete(id string) error {
|
func (r *containerStore) Delete(id string) error {
|
||||||
@ -562,7 +680,7 @@ func (r *containerStore) Delete(id string) error {
|
|||||||
r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...)
|
r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := r.Save(); err != nil {
|
if err := r.saveFor(container); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := os.RemoveAll(r.datadir(id)); err != nil {
|
if err := os.RemoveAll(r.datadir(id)); err != nil {
|
||||||
@ -601,6 +719,7 @@ func (r *containerStore) BigData(id, key string) ([]byte, error) {
|
|||||||
return os.ReadFile(r.datapath(c.ID, key))
|
return os.ReadFile(r.datapath(c.ID, key))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Requires startWriting. Yes, really, WRITING (see SetBigData).
|
||||||
func (r *containerStore) BigDataSize(id, key string) (int64, error) {
|
func (r *containerStore) BigDataSize(id, key string) (int64, error) {
|
||||||
if key == "" {
|
if key == "" {
|
||||||
return -1, fmt.Errorf("can't retrieve size of container big data with empty name: %w", ErrInvalidBigDataName)
|
return -1, fmt.Errorf("can't retrieve size of container big data with empty name: %w", ErrInvalidBigDataName)
|
||||||
@ -609,10 +728,7 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return -1, ErrContainerUnknown
|
return -1, ErrContainerUnknown
|
||||||
}
|
}
|
||||||
if c.BigDataSizes == nil {
|
if size, ok := c.BigDataSizes[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil.
|
||||||
c.BigDataSizes = make(map[string]int64)
|
|
||||||
}
|
|
||||||
if size, ok := c.BigDataSizes[key]; ok {
|
|
||||||
return size, nil
|
return size, nil
|
||||||
}
|
}
|
||||||
if data, err := r.BigData(id, key); err == nil && data != nil {
|
if data, err := r.BigData(id, key); err == nil && data != nil {
|
||||||
@ -631,6 +747,7 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) {
|
|||||||
return -1, ErrSizeUnknown
|
return -1, ErrSizeUnknown
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Requires startWriting. Yes, really, WRITING (see SetBigData).
|
||||||
func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
|
func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
|
||||||
if key == "" {
|
if key == "" {
|
||||||
return "", fmt.Errorf("can't retrieve digest of container big data value with empty name: %w", ErrInvalidBigDataName)
|
return "", fmt.Errorf("can't retrieve digest of container big data value with empty name: %w", ErrInvalidBigDataName)
|
||||||
@ -639,10 +756,7 @@ func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return "", ErrContainerUnknown
|
return "", ErrContainerUnknown
|
||||||
}
|
}
|
||||||
if c.BigDataDigests == nil {
|
if d, ok := c.BigDataDigests[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil.
|
||||||
c.BigDataDigests = make(map[string]digest.Digest)
|
|
||||||
}
|
|
||||||
if d, ok := c.BigDataDigests[key]; ok {
|
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
if data, err := r.BigData(id, key); err == nil && data != nil {
|
if data, err := r.BigData(id, key); err == nil && data != nil {
|
||||||
@ -709,7 +823,7 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
|
|||||||
save = true
|
save = true
|
||||||
}
|
}
|
||||||
if save {
|
if save {
|
||||||
err = r.Save()
|
err = r.saveFor(c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
5
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
@ -251,6 +251,11 @@ func (a *Driver) Exists(id string) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// List layers (not including additional image stores)
|
||||||
|
func (a *Driver) ListLayers() ([]string, error) {
|
||||||
|
return nil, graphdriver.ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
// AdditionalImageStores returns additional image stores supported by the driver
|
// AdditionalImageStores returns additional image stores supported by the driver
|
||||||
func (a *Driver) AdditionalImageStores() []string {
|
func (a *Driver) AdditionalImageStores() []string {
|
||||||
return nil
|
return nil
|
||||||
|
5
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
@ -676,6 +676,11 @@ func (d *Driver) Exists(id string) bool {
|
|||||||
return err == nil
|
return err == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// List layers (not including additional image stores)
|
||||||
|
func (d *Driver) ListLayers() ([]string, error) {
|
||||||
|
return nil, graphdriver.ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
// AdditionalImageStores returns additional image stores supported by the driver
|
// AdditionalImageStores returns additional image stores supported by the driver
|
||||||
func (d *Driver) AdditionalImageStores() []string {
|
func (d *Driver) AdditionalImageStores() []string {
|
||||||
return nil
|
return nil
|
||||||
|
5
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
@ -267,6 +267,11 @@ func (d *Driver) Exists(id string) bool {
|
|||||||
return d.DeviceSet.HasDevice(id)
|
return d.DeviceSet.HasDevice(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// List layers (not including additional image stores)
|
||||||
|
func (d *Driver) ListLayers() ([]string, error) {
|
||||||
|
return nil, graphdriver.ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
// AdditionalImageStores returns additional image stores supported by the driver
|
// AdditionalImageStores returns additional image stores supported by the driver
|
||||||
func (d *Driver) AdditionalImageStores() []string {
|
func (d *Driver) AdditionalImageStores() []string {
|
||||||
return nil
|
return nil
|
||||||
|
3
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
3
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
@ -109,6 +109,9 @@ type ProtoDriver interface {
|
|||||||
// Exists returns whether a filesystem layer with the specified
|
// Exists returns whether a filesystem layer with the specified
|
||||||
// ID exists on this driver.
|
// ID exists on this driver.
|
||||||
Exists(id string) bool
|
Exists(id string) bool
|
||||||
|
// Returns a list of layer ids that exist on this driver (does not include
|
||||||
|
// additional storage layers). Not supported by all backends.
|
||||||
|
ListLayers() ([]string, error)
|
||||||
// Status returns a set of key-value pairs which give low
|
// Status returns a set of key-value pairs which give low
|
||||||
// level diagnostic status about this driver.
|
// level diagnostic status about this driver.
|
||||||
Status() [][2]string
|
Status() [][2]string
|
||||||
|
39
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
39
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@ -17,6 +17,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
graphdriver "github.com/containers/storage/drivers"
|
graphdriver "github.com/containers/storage/drivers"
|
||||||
"github.com/containers/storage/drivers/overlayutils"
|
"github.com/containers/storage/drivers/overlayutils"
|
||||||
@ -356,9 +357,9 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||||||
if opts.forceMask != nil {
|
if opts.forceMask != nil {
|
||||||
return nil, errors.New("'force_mask' is supported only with 'mount_program'")
|
return nil, errors.New("'force_mask' is supported only with 'mount_program'")
|
||||||
}
|
}
|
||||||
// check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
|
// check if they are running over btrfs, aufs, overlay, or ecryptfs
|
||||||
switch fsMagic {
|
switch fsMagic {
|
||||||
case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
|
case graphdriver.FsMagicAufs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
|
||||||
return nil, fmt.Errorf("'overlay' is not supported over %s, a mount_program is required: %w", backingFs, graphdriver.ErrIncompatibleFS)
|
return nil, fmt.Errorf("'overlay' is not supported over %s, a mount_program is required: %w", backingFs, graphdriver.ErrIncompatibleFS)
|
||||||
}
|
}
|
||||||
if unshare.IsRootless() && isNetworkFileSystem(fsMagic) {
|
if unshare.IsRootless() && isNetworkFileSystem(fsMagic) {
|
||||||
@ -1697,6 +1698,40 @@ func (d *Driver) Exists(id string) bool {
|
|||||||
return err == nil
|
return err == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func nameLooksLikeID(name string) bool {
|
||||||
|
if len(name) != 64 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, c := range name {
|
||||||
|
if !unicode.Is(unicode.ASCII_Hex_Digit, c) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// List layers (not including additional image stores)
|
||||||
|
func (d *Driver) ListLayers() ([]string, error) {
|
||||||
|
entries, err := os.ReadDir(d.home)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
layers := make([]string, 0)
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
id := entry.Name()
|
||||||
|
// Does it look like a datadir directory?
|
||||||
|
if !entry.IsDir() || !nameLooksLikeID(id) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
layers = append(layers, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
return layers, err
|
||||||
|
}
|
||||||
|
|
||||||
// isParent returns if the passed in parent is the direct parent of the passed in layer
|
// isParent returns if the passed in parent is the direct parent of the passed in layer
|
||||||
func (d *Driver) isParent(id, parent string) bool {
|
func (d *Driver) isParent(id, parent string) bool {
|
||||||
lowers, err := d.getLowerDirs(id)
|
lowers, err := d.getLowerDirs(id)
|
||||||
|
35
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
35
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
@ -8,6 +8,7 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
graphdriver "github.com/containers/storage/drivers"
|
graphdriver "github.com/containers/storage/drivers"
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
@ -265,6 +266,40 @@ func (d *Driver) Exists(id string) bool {
|
|||||||
return err == nil
|
return err == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func nameLooksLikeID(name string) bool {
|
||||||
|
if len(name) != 64 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, c := range name {
|
||||||
|
if !unicode.Is(unicode.ASCII_Hex_Digit, c) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// List layers (not including additional image stores)
|
||||||
|
func (d *Driver) ListLayers() ([]string, error) {
|
||||||
|
entries, err := os.ReadDir(d.homes[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
layers := make([]string, 0)
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
id := entry.Name()
|
||||||
|
// Does it look like a datadir directory?
|
||||||
|
if !entry.IsDir() || !nameLooksLikeID(id) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
layers = append(layers, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
return layers, err
|
||||||
|
}
|
||||||
|
|
||||||
// AdditionalImageStores returns additional image stores supported by the driver
|
// AdditionalImageStores returns additional image stores supported by the driver
|
||||||
func (d *Driver) AdditionalImageStores() []string {
|
func (d *Driver) AdditionalImageStores() []string {
|
||||||
if len(d.homes) > 1 {
|
if len(d.homes) > 1 {
|
||||||
|
5
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
@ -185,6 +185,11 @@ func (d *Driver) Exists(id string) bool {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// List layers (not including additional image stores)
|
||||||
|
func (d *Driver) ListLayers() ([]string, error) {
|
||||||
|
return nil, graphdriver.ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
||||||
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
||||||
return graphdriver.NaiveCreateFromTemplate(d, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite)
|
return graphdriver.NaiveCreateFromTemplate(d, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite)
|
||||||
|
5
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
@ -506,6 +506,11 @@ func (d *Driver) Exists(id string) bool {
|
|||||||
return d.filesystemsCache[d.zfsPath(id)]
|
return d.filesystemsCache[d.zfsPath(id)]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// List layers (not including additional image stores)
|
||||||
|
func (d *Driver) ListLayers() ([]string, error) {
|
||||||
|
return nil, graphdriver.ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
// AdditionalImageStores returns additional image stores supported by the driver
|
// AdditionalImageStores returns additional image stores supported by the driver
|
||||||
func (d *Driver) AdditionalImageStores() []string {
|
func (d *Driver) AdditionalImageStores() []string {
|
||||||
return nil
|
return nil
|
||||||
|
13
vendor/github.com/containers/storage/images.go
generated
vendored
13
vendor/github.com/containers/storage/images.go
generated
vendored
@ -148,6 +148,9 @@ type rwImageStore interface {
|
|||||||
// Delete removes the record of the image.
|
// Delete removes the record of the image.
|
||||||
Delete(id string) error
|
Delete(id string) error
|
||||||
|
|
||||||
|
addMappedTopLayer(id, layer string) error
|
||||||
|
removeMappedTopLayer(id, layer string) error
|
||||||
|
|
||||||
// Wipe removes records of all images.
|
// Wipe removes records of all images.
|
||||||
Wipe() error
|
Wipe() error
|
||||||
}
|
}
|
||||||
@ -763,10 +766,7 @@ func (r *imageStore) BigDataSize(id, key string) (int64, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||||||
}
|
}
|
||||||
if image.BigDataSizes == nil {
|
if size, ok := image.BigDataSizes[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil.
|
||||||
image.BigDataSizes = make(map[string]int64)
|
|
||||||
}
|
|
||||||
if size, ok := image.BigDataSizes[key]; ok {
|
|
||||||
return size, nil
|
return size, nil
|
||||||
}
|
}
|
||||||
if data, err := r.BigData(id, key); err == nil && data != nil {
|
if data, err := r.BigData(id, key); err == nil && data != nil {
|
||||||
@ -783,10 +783,7 @@ func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||||||
}
|
}
|
||||||
if image.BigDataDigests == nil {
|
if d, ok := image.BigDataDigests[key]; ok { // This is valid, and returns ok == false, for BigDataDigests == nil.
|
||||||
image.BigDataDigests = make(map[string]digest.Digest)
|
|
||||||
}
|
|
||||||
if d, ok := image.BigDataDigests[key]; ok {
|
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
return "", ErrDigestUnknown
|
return "", ErrDigestUnknown
|
||||||
|
296
vendor/github.com/containers/storage/layers.go
generated
vendored
296
vendor/github.com/containers/storage/layers.go
generated
vendored
@ -42,6 +42,22 @@ const (
|
|||||||
maxLayerStoreCleanupIterations = 3
|
maxLayerStoreCleanupIterations = 3
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type layerLocations uint8
|
||||||
|
|
||||||
|
// The backing store is split in two json files, one (the volatile)
|
||||||
|
// that is written without fsync() meaning it isn't as robust to
|
||||||
|
// unclean shutdown
|
||||||
|
const (
|
||||||
|
stableLayerLocation layerLocations = 1 << iota
|
||||||
|
volatileLayerLocation
|
||||||
|
|
||||||
|
numLayerLocationIndex = iota
|
||||||
|
)
|
||||||
|
|
||||||
|
func layerLocationFromIndex(index int) layerLocations {
|
||||||
|
return 1 << index
|
||||||
|
}
|
||||||
|
|
||||||
// A Layer is a record of a copy-on-write layer that's stored by the lower
|
// A Layer is a record of a copy-on-write layer that's stored by the lower
|
||||||
// level graph driver.
|
// level graph driver.
|
||||||
type Layer struct {
|
type Layer struct {
|
||||||
@ -123,6 +139,9 @@ type Layer struct {
|
|||||||
// ReadOnly is true if this layer resides in a read-only layer store.
|
// ReadOnly is true if this layer resides in a read-only layer store.
|
||||||
ReadOnly bool `json:"-"`
|
ReadOnly bool `json:"-"`
|
||||||
|
|
||||||
|
// volatileStore is true if the container is from the volatile json file
|
||||||
|
volatileStore bool `json:"-"`
|
||||||
|
|
||||||
// BigDataNames is a list of names of data items that we keep for the
|
// BigDataNames is a list of names of data items that we keep for the
|
||||||
// convenience of the caller. They can be large, and are only in
|
// convenience of the caller. They can be large, and are only in
|
||||||
// memory when being read from or written to disk.
|
// memory when being read from or written to disk.
|
||||||
@ -276,23 +295,34 @@ type rwLayerStore interface {
|
|||||||
// store.
|
// store.
|
||||||
// This API is experimental and can be changed without bumping the major version number.
|
// This API is experimental and can be changed without bumping the major version number.
|
||||||
PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error)
|
PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error)
|
||||||
|
|
||||||
|
// Clean up unreferenced layers
|
||||||
|
GarbageCollect() error
|
||||||
}
|
}
|
||||||
|
|
||||||
type layerStore struct {
|
type layerStore struct {
|
||||||
lockfile Locker
|
lockfile Locker
|
||||||
mountsLockfile Locker
|
mountsLockfile Locker
|
||||||
rundir string
|
rundir string
|
||||||
driver drivers.Driver
|
jsonPath [numLayerLocationIndex]string
|
||||||
layerdir string
|
driver drivers.Driver
|
||||||
layers []*Layer
|
layerdir string
|
||||||
idindex *truncindex.TruncIndex
|
layers []*Layer
|
||||||
byid map[string]*Layer
|
idindex *truncindex.TruncIndex
|
||||||
byname map[string]*Layer
|
byid map[string]*Layer
|
||||||
bymount map[string]*Layer
|
byname map[string]*Layer
|
||||||
bycompressedsum map[digest.Digest][]string
|
bymount map[string]*Layer
|
||||||
byuncompressedsum map[digest.Digest][]string
|
bycompressedsum map[digest.Digest][]string
|
||||||
loadMut sync.Mutex
|
byuncompressedsum map[digest.Digest][]string
|
||||||
layerspathModified time.Time
|
loadMut sync.Mutex
|
||||||
|
layerspathsModified [numLayerLocationIndex]time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func layerLocation(l *Layer) layerLocations {
|
||||||
|
if l.volatileStore {
|
||||||
|
return volatileLayerLocation
|
||||||
|
}
|
||||||
|
return stableLayerLocation
|
||||||
}
|
}
|
||||||
|
|
||||||
func copyLayer(l *Layer) *Layer {
|
func copyLayer(l *Layer) *Layer {
|
||||||
@ -311,6 +341,7 @@ func copyLayer(l *Layer) *Layer {
|
|||||||
UncompressedSize: l.UncompressedSize,
|
UncompressedSize: l.UncompressedSize,
|
||||||
CompressionType: l.CompressionType,
|
CompressionType: l.CompressionType,
|
||||||
ReadOnly: l.ReadOnly,
|
ReadOnly: l.ReadOnly,
|
||||||
|
volatileStore: l.volatileStore,
|
||||||
BigDataNames: copyStringSlice(l.BigDataNames),
|
BigDataNames: copyStringSlice(l.BigDataNames),
|
||||||
Flags: copyStringInterfaceMap(l.Flags),
|
Flags: copyStringInterfaceMap(l.Flags),
|
||||||
UIDMap: copyIDMap(l.UIDMap),
|
UIDMap: copyIDMap(l.UIDMap),
|
||||||
@ -419,7 +450,7 @@ func (r *layerStore) stopReading() {
|
|||||||
// Modified() checks if the most recent writer was a party other than the
|
// Modified() checks if the most recent writer was a party other than the
|
||||||
// last recorded writer. It should only be called with the lock held.
|
// last recorded writer. It should only be called with the lock held.
|
||||||
func (r *layerStore) Modified() (bool, error) {
|
func (r *layerStore) Modified() (bool, error) {
|
||||||
var mmodified, tmodified bool
|
var mmodified bool
|
||||||
lmodified, err := r.lockfile.Modified()
|
lmodified, err := r.lockfile.Modified()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return lmodified, err
|
return lmodified, err
|
||||||
@ -437,17 +468,20 @@ func (r *layerStore) Modified() (bool, error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the layers.json file has been modified manually, then we have to
|
// If the layers.json file or container-layers.json has been
|
||||||
// reload the storage in any case.
|
// modified manually, then we have to reload the storage in
|
||||||
info, err := os.Stat(r.layerspath())
|
// any case.
|
||||||
if err != nil && !os.IsNotExist(err) {
|
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
|
||||||
return false, fmt.Errorf("stat layers file: %w", err)
|
info, err := os.Stat(r.jsonPath[locationIndex])
|
||||||
}
|
if err != nil && !os.IsNotExist(err) {
|
||||||
if info != nil {
|
return false, fmt.Errorf("stat layers file: %w", err)
|
||||||
tmodified = info.ModTime() != r.layerspathModified
|
}
|
||||||
|
if info != nil && info.ModTime() != r.layerspathsModified[locationIndex] {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return tmodified, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// reloadIfChanged reloads the contents of the store from disk if it is changed.
|
// reloadIfChanged reloads the contents of the store from disk if it is changed.
|
||||||
@ -481,12 +515,35 @@ func (r *layerStore) Layers() ([]Layer, error) {
|
|||||||
return layers, nil
|
return layers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) mountspath() string {
|
func (r *layerStore) GarbageCollect() error {
|
||||||
return filepath.Join(r.rundir, "mountpoints.json")
|
layers, err := r.driver.ListLayers()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, drivers.ErrNotSupported) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, id := range layers {
|
||||||
|
// Is the id still referenced
|
||||||
|
if r.byid[id] != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove layer and any related data of unreferenced id
|
||||||
|
if err := r.driver.Remove(id); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Remove(r.tspath(id))
|
||||||
|
os.RemoveAll(r.datadir(id))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) layerspath() string {
|
func (r *layerStore) mountspath() string {
|
||||||
return filepath.Join(r.layerdir, "layers.json")
|
return filepath.Join(r.rundir, "mountpoints.json")
|
||||||
}
|
}
|
||||||
|
|
||||||
// load reloads the contents of the store from disk.
|
// load reloads the contents of the store from disk.
|
||||||
@ -497,28 +554,49 @@ func (r *layerStore) layerspath() string {
|
|||||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||||
// retrying with lockedForWriting could succeed.
|
// retrying with lockedForWriting could succeed.
|
||||||
func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
||||||
rpath := r.layerspath()
|
var modifiedLocations layerLocations
|
||||||
info, err := os.Stat(rpath)
|
|
||||||
if err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
r.layerspathModified = info.ModTime()
|
|
||||||
}
|
|
||||||
data, err := os.ReadFile(rpath)
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
layers := []*Layer{}
|
layers := []*Layer{}
|
||||||
if len(data) != 0 {
|
ids := make(map[string]*Layer)
|
||||||
if err := json.Unmarshal(data, &layers); err != nil {
|
|
||||||
return false, fmt.Errorf("loading %q: %w", rpath, err)
|
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
|
||||||
|
location := layerLocationFromIndex(locationIndex)
|
||||||
|
rpath := r.jsonPath[locationIndex]
|
||||||
|
info, err := os.Stat(rpath)
|
||||||
|
if err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
r.layerspathsModified[locationIndex] = info.ModTime()
|
||||||
|
}
|
||||||
|
data, err := os.ReadFile(rpath)
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
locationLayers := []*Layer{}
|
||||||
|
if len(data) != 0 {
|
||||||
|
if err := json.Unmarshal(data, &locationLayers); err != nil {
|
||||||
|
return false, fmt.Errorf("loading %q: %w", rpath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, layer := range locationLayers {
|
||||||
|
// There should be no duplicated ids between json files, but lets check to be sure
|
||||||
|
if ids[layer.ID] != nil {
|
||||||
|
continue // skip invalid duplicated layer
|
||||||
|
}
|
||||||
|
// Remember where the layer came from
|
||||||
|
if location == volatileLayerLocation {
|
||||||
|
layer.volatileStore = true
|
||||||
|
}
|
||||||
|
layers = append(layers, layer)
|
||||||
|
ids[layer.ID] = layer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
idlist := make([]string, 0, len(layers))
|
idlist := make([]string, 0, len(layers))
|
||||||
ids := make(map[string]*Layer)
|
|
||||||
names := make(map[string]*Layer)
|
names := make(map[string]*Layer)
|
||||||
compressedsums := make(map[digest.Digest][]string)
|
compressedsums := make(map[digest.Digest][]string)
|
||||||
uncompressedsums := make(map[digest.Digest][]string)
|
uncompressedsums := make(map[digest.Digest][]string)
|
||||||
@ -527,12 +605,12 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
|||||||
selinux.ClearLabels()
|
selinux.ClearLabels()
|
||||||
}
|
}
|
||||||
for n, layer := range layers {
|
for n, layer := range layers {
|
||||||
ids[layer.ID] = layers[n]
|
|
||||||
idlist = append(idlist, layer.ID)
|
idlist = append(idlist, layer.ID)
|
||||||
for _, name := range layer.Names {
|
for _, name := range layer.Names {
|
||||||
if conflict, ok := names[name]; ok {
|
if conflict, ok := names[name]; ok {
|
||||||
r.removeName(conflict, name)
|
r.removeName(conflict, name)
|
||||||
errorToResolveBySaving = ErrDuplicateLayerNames
|
errorToResolveBySaving = ErrDuplicateLayerNames
|
||||||
|
modifiedLocations |= layerLocation(conflict)
|
||||||
}
|
}
|
||||||
names[name] = layers[n]
|
names[name] = layers[n]
|
||||||
}
|
}
|
||||||
@ -593,7 +671,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
|||||||
}
|
}
|
||||||
if layerHasIncompleteFlag(layer) {
|
if layerHasIncompleteFlag(layer) {
|
||||||
logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
|
logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
|
||||||
err = r.deleteInternal(layer.ID)
|
err := r.deleteInternal(layer.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Don't return the error immediately, because deleteInternal does not saveLayers();
|
// Don't return the error immediately, because deleteInternal does not saveLayers();
|
||||||
// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
|
// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
|
||||||
@ -601,9 +679,10 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
|||||||
incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
|
incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
|
||||||
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
|
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
|
||||||
}
|
}
|
||||||
|
modifiedLocations |= layerLocation(layer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := r.saveLayers(); err != nil {
|
if err := r.saveLayers(modifiedLocations); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
if incompleteDeletionErrors != nil {
|
if incompleteDeletionErrors != nil {
|
||||||
@ -652,37 +731,62 @@ func (r *layerStore) loadMounts() error {
|
|||||||
|
|
||||||
// Save saves the contents of the store to disk. It should be called with
|
// Save saves the contents of the store to disk. It should be called with
|
||||||
// the lock held, locked for writing.
|
// the lock held, locked for writing.
|
||||||
func (r *layerStore) Save() error {
|
func (r *layerStore) save(saveLocations layerLocations) error {
|
||||||
r.mountsLockfile.Lock()
|
r.mountsLockfile.Lock()
|
||||||
defer r.mountsLockfile.Unlock()
|
defer r.mountsLockfile.Unlock()
|
||||||
if err := r.saveLayers(); err != nil {
|
if err := r.saveLayers(saveLocations); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return r.saveMounts()
|
return r.saveMounts()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) saveLayers() error {
|
func (r *layerStore) saveFor(modifiedLayer *Layer) error {
|
||||||
|
return r.save(layerLocation(modifiedLayer))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *layerStore) saveLayers(saveLocations layerLocations) error {
|
||||||
if !r.lockfile.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerdir, ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
r.lockfile.AssertLockedForWriting()
|
r.lockfile.AssertLockedForWriting()
|
||||||
rpath := r.layerspath()
|
|
||||||
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
|
||||||
return err
|
location := layerLocationFromIndex(locationIndex)
|
||||||
|
if location&saveLocations == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
rpath := r.jsonPath[locationIndex]
|
||||||
|
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
subsetLayers := make([]*Layer, 0, len(r.layers))
|
||||||
|
for _, layer := range r.layers {
|
||||||
|
if layerLocation(layer) == location {
|
||||||
|
subsetLayers = append(subsetLayers, layer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
jldata, err := json.Marshal(&subsetLayers)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var opts *ioutils.AtomicFileWriterOptions
|
||||||
|
if location == volatileLayerLocation {
|
||||||
|
opts = &ioutils.AtomicFileWriterOptions{
|
||||||
|
NoSync: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0600, opts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return r.lockfile.Touch()
|
||||||
}
|
}
|
||||||
jldata, err := json.Marshal(&r.layers)
|
return nil
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return r.lockfile.Touch()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) saveMounts() error {
|
func (r *layerStore) saveMounts() error {
|
||||||
if !r.lockfile.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerdir, ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
r.mountsLockfile.AssertLockedForWriting()
|
r.mountsLockfile.AssertLockedForWriting()
|
||||||
mpath := r.mountspath()
|
mpath := r.mountspath()
|
||||||
@ -712,13 +816,18 @@ func (r *layerStore) saveMounts() error {
|
|||||||
return r.loadMounts()
|
return r.loadMounts()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver) (rwLayerStore, error) {
|
func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver, transient bool) (rwLayerStore, error) {
|
||||||
if err := os.MkdirAll(rundir, 0700); err != nil {
|
if err := os.MkdirAll(rundir, 0700); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := os.MkdirAll(layerdir, 0700); err != nil {
|
if err := os.MkdirAll(layerdir, 0700); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
// Note: While the containers.lock file is in rundir for transient stores
|
||||||
|
// we don't want to do this here, because the non-transient layers in
|
||||||
|
// layers.json might be used externally as a read-only layer (using e.g.
|
||||||
|
// additionalimagestores), and that would look for the lockfile in the
|
||||||
|
// same directory
|
||||||
lockfile, err := GetLockfile(filepath.Join(layerdir, "layers.lock"))
|
lockfile, err := GetLockfile(filepath.Join(layerdir, "layers.lock"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -727,6 +836,10 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
volatileDir := layerdir
|
||||||
|
if transient {
|
||||||
|
volatileDir = rundir
|
||||||
|
}
|
||||||
rlstore := layerStore{
|
rlstore := layerStore{
|
||||||
lockfile: lockfile,
|
lockfile: lockfile,
|
||||||
mountsLockfile: mountsLockfile,
|
mountsLockfile: mountsLockfile,
|
||||||
@ -736,6 +849,10 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri
|
|||||||
byid: make(map[string]*Layer),
|
byid: make(map[string]*Layer),
|
||||||
bymount: make(map[string]*Layer),
|
bymount: make(map[string]*Layer),
|
||||||
byname: make(map[string]*Layer),
|
byname: make(map[string]*Layer),
|
||||||
|
jsonPath: [numLayerLocationIndex]string{
|
||||||
|
filepath.Join(layerdir, "layers.json"),
|
||||||
|
filepath.Join(volatileDir, "volatile-layers.json"),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
if err := rlstore.startWritingWithReload(false); err != nil {
|
if err := rlstore.startWritingWithReload(false); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -761,6 +878,10 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL
|
|||||||
byid: make(map[string]*Layer),
|
byid: make(map[string]*Layer),
|
||||||
bymount: make(map[string]*Layer),
|
bymount: make(map[string]*Layer),
|
||||||
byname: make(map[string]*Layer),
|
byname: make(map[string]*Layer),
|
||||||
|
jsonPath: [numLayerLocationIndex]string{
|
||||||
|
filepath.Join(layerdir, "layers.json"),
|
||||||
|
filepath.Join(layerdir, "volatile-layers.json"),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
if err := rlstore.startReadingWithReload(false); err != nil {
|
if err := rlstore.startReadingWithReload(false); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -800,19 +921,19 @@ func (r *layerStore) Size(name string) (int64, error) {
|
|||||||
|
|
||||||
func (r *layerStore) ClearFlag(id string, flag string) error {
|
func (r *layerStore) ClearFlag(id string, flag string) error {
|
||||||
if !r.lockfile.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to clear flags on layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to clear flags on layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
layer, ok := r.lookup(id)
|
layer, ok := r.lookup(id)
|
||||||
if !ok {
|
if !ok {
|
||||||
return ErrLayerUnknown
|
return ErrLayerUnknown
|
||||||
}
|
}
|
||||||
delete(layer.Flags, flag)
|
delete(layer.Flags, flag)
|
||||||
return r.Save()
|
return r.saveFor(layer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
|
func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
|
||||||
if !r.lockfile.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to set flags on layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to set flags on layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
layer, ok := r.lookup(id)
|
layer, ok := r.lookup(id)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -822,7 +943,7 @@ func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
|
|||||||
layer.Flags = make(map[string]interface{})
|
layer.Flags = make(map[string]interface{})
|
||||||
}
|
}
|
||||||
layer.Flags[flag] = value
|
layer.Flags[flag] = value
|
||||||
return r.Save()
|
return r.saveFor(layer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) Status() ([][2]string, error) {
|
func (r *layerStore) Status() ([][2]string, error) {
|
||||||
@ -876,7 +997,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
|
|||||||
if layer.UncompressedDigest != "" {
|
if layer.UncompressedDigest != "" {
|
||||||
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
|
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
|
||||||
}
|
}
|
||||||
if err := r.Save(); err != nil {
|
if err := r.saveFor(layer); err != nil {
|
||||||
if err2 := r.driver.Remove(id); err2 != nil {
|
if err2 := r.driver.Remove(id); err2 != nil {
|
||||||
logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, err2)
|
logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, err2)
|
||||||
}
|
}
|
||||||
@ -887,7 +1008,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
|
|||||||
|
|
||||||
func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) {
|
func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) {
|
||||||
if !r.lockfile.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
if err := os.MkdirAll(r.rundir, 0700); err != nil {
|
if err := os.MkdirAll(r.rundir, 0700); err != nil {
|
||||||
return nil, -1, err
|
return nil, -1, err
|
||||||
@ -975,6 +1096,7 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
|
|||||||
UIDMap: copyIDMap(moreOptions.UIDMap),
|
UIDMap: copyIDMap(moreOptions.UIDMap),
|
||||||
GIDMap: copyIDMap(moreOptions.GIDMap),
|
GIDMap: copyIDMap(moreOptions.GIDMap),
|
||||||
BigDataNames: []string{},
|
BigDataNames: []string{},
|
||||||
|
volatileStore: moreOptions.Volatile,
|
||||||
}
|
}
|
||||||
r.layers = append(r.layers, layer)
|
r.layers = append(r.layers, layer)
|
||||||
// This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway.
|
// This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway.
|
||||||
@ -1004,7 +1126,7 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err := r.Save()
|
err := r.saveFor(layer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cleanupFailureContext = "saving incomplete layer metadata"
|
cleanupFailureContext = "saving incomplete layer metadata"
|
||||||
return nil, -1, err
|
return nil, -1, err
|
||||||
@ -1070,7 +1192,7 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
delete(layer.Flags, incompleteFlag)
|
delete(layer.Flags, incompleteFlag)
|
||||||
err = r.Save()
|
err = r.saveFor(layer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cleanupFailureContext = "saving finished layer metadata"
|
cleanupFailureContext = "saving finished layer metadata"
|
||||||
return nil, -1, err
|
return nil, -1, err
|
||||||
@ -1285,7 +1407,7 @@ func (r *layerStore) removeName(layer *Layer, name string) {
|
|||||||
|
|
||||||
func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error {
|
func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error {
|
||||||
if !r.lockfile.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to change layer name assignments at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to change layer name assignments at %q: %w", r.layerdir, ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
layer, ok := r.lookup(id)
|
layer, ok := r.lookup(id)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -1306,7 +1428,7 @@ func (r *layerStore) updateNames(id string, names []string, op updateNameOperati
|
|||||||
r.byname[name] = layer
|
r.byname[name] = layer
|
||||||
}
|
}
|
||||||
layer.Names = names
|
layer.Names = names
|
||||||
return r.Save()
|
return r.saveFor(layer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) datadir(id string) string {
|
func (r *layerStore) datadir(id string) string {
|
||||||
@ -1333,7 +1455,7 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
|
|||||||
return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName)
|
return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName)
|
||||||
}
|
}
|
||||||
if !r.lockfile.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to save data items associated with layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to save data items associated with layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
layer, ok := r.lookup(id)
|
layer, ok := r.lookup(id)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -1370,7 +1492,7 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
|
|||||||
}
|
}
|
||||||
if addName {
|
if addName {
|
||||||
layer.BigDataNames = append(layer.BigDataNames, key)
|
layer.BigDataNames = append(layer.BigDataNames, key)
|
||||||
return r.Save()
|
return r.saveFor(layer)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1392,11 +1514,11 @@ func (r *layerStore) Metadata(id string) (string, error) {
|
|||||||
|
|
||||||
func (r *layerStore) SetMetadata(id, metadata string) error {
|
func (r *layerStore) SetMetadata(id, metadata string) error {
|
||||||
if !r.lockfile.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to modify layer metadata at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to modify layer metadata at %q: %w", r.layerdir, ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
if layer, ok := r.lookup(id); ok {
|
if layer, ok := r.lookup(id); ok {
|
||||||
layer.Metadata = metadata
|
layer.Metadata = metadata
|
||||||
return r.Save()
|
return r.saveFor(layer)
|
||||||
}
|
}
|
||||||
return ErrLayerUnknown
|
return ErrLayerUnknown
|
||||||
}
|
}
|
||||||
@ -1418,7 +1540,7 @@ func layerHasIncompleteFlag(layer *Layer) bool {
|
|||||||
|
|
||||||
func (r *layerStore) deleteInternal(id string) error {
|
func (r *layerStore) deleteInternal(id string) error {
|
||||||
if !r.lockfile.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
layer, ok := r.lookup(id)
|
layer, ok := r.lookup(id)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -1430,7 +1552,7 @@ func (r *layerStore) deleteInternal(id string) error {
|
|||||||
layer.Flags = make(map[string]interface{})
|
layer.Flags = make(map[string]interface{})
|
||||||
}
|
}
|
||||||
layer.Flags[incompleteFlag] = true
|
layer.Flags[incompleteFlag] = true
|
||||||
if err := r.Save(); err != nil {
|
if err := r.saveFor(layer); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1532,7 +1654,7 @@ func (r *layerStore) Delete(id string) error {
|
|||||||
if err := r.deleteInternal(id); err != nil {
|
if err := r.deleteInternal(id); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return r.Save()
|
return r.saveFor(layer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) Exists(id string) bool {
|
func (r *layerStore) Exists(id string) bool {
|
||||||
@ -1549,7 +1671,7 @@ func (r *layerStore) Get(id string) (*Layer, error) {
|
|||||||
|
|
||||||
func (r *layerStore) Wipe() error {
|
func (r *layerStore) Wipe() error {
|
||||||
if !r.lockfile.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
ids := make([]string, 0, len(r.byid))
|
ids := make([]string, 0, len(r.byid))
|
||||||
for id := range r.byid {
|
for id := range r.byid {
|
||||||
@ -1814,7 +1936,7 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
|
|||||||
|
|
||||||
func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (size int64, err error) {
|
func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (size int64, err error) {
|
||||||
if !r.lockfile.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return -1, fmt.Errorf("not allowed to modify layer contents at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return -1, fmt.Errorf("not allowed to modify layer contents at %q: %w", r.layerdir, ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
|
|
||||||
layer, ok := r.lookup(to)
|
layer, ok := r.lookup(to)
|
||||||
@ -1953,7 +2075,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
|
|||||||
return layer.GIDs[i] < layer.GIDs[j]
|
return layer.GIDs[i] < layer.GIDs[j]
|
||||||
})
|
})
|
||||||
|
|
||||||
err = r.Save()
|
err = r.saveFor(layer)
|
||||||
|
|
||||||
return size, err
|
return size, err
|
||||||
}
|
}
|
||||||
@ -1994,7 +2116,7 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
|
|||||||
layer.UncompressedDigest = diffOutput.UncompressedDigest
|
layer.UncompressedDigest = diffOutput.UncompressedDigest
|
||||||
layer.UncompressedSize = diffOutput.Size
|
layer.UncompressedSize = diffOutput.Size
|
||||||
layer.Metadata = diffOutput.Metadata
|
layer.Metadata = diffOutput.Metadata
|
||||||
if err = r.Save(); err != nil {
|
if err = r.saveFor(layer); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for k, v := range diffOutput.BigData {
|
for k, v := range diffOutput.BigData {
|
||||||
@ -2035,7 +2157,7 @@ func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOp
|
|||||||
}
|
}
|
||||||
layer.UIDs = output.UIDs
|
layer.UIDs = output.UIDs
|
||||||
layer.GIDs = output.GIDs
|
layer.GIDs = output.GIDs
|
||||||
err = r.Save()
|
err = r.saveFor(layer)
|
||||||
return &output, err
|
return &output, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
13
vendor/github.com/containers/storage/pkg/archive/changes_other.go
generated
vendored
13
vendor/github.com/containers/storage/pkg/archive/changes_other.go
generated
vendored
@ -43,7 +43,12 @@ func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtool
|
|||||||
func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) {
|
func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) {
|
||||||
root := newRootFileInfo(idMappings)
|
root := newRootFileInfo(idMappings)
|
||||||
|
|
||||||
err := filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error {
|
sourceStat, err := system.Lstat(sourceDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -86,8 +91,12 @@ func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInf
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
info.stat = s
|
|
||||||
|
|
||||||
|
if s.Dev() != sourceStat.Dev() {
|
||||||
|
return filepath.SkipDir
|
||||||
|
}
|
||||||
|
|
||||||
|
info.stat = s
|
||||||
info.capability, _ = system.Lgetxattr(path, "security.capability")
|
info.capability, _ = system.Lgetxattr(path, "security.capability")
|
||||||
|
|
||||||
parent.children[info.name] = info
|
parent.children[info.name] = info
|
||||||
|
8
vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
generated
vendored
8
vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
generated
vendored
@ -61,8 +61,8 @@ func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AtomicWriteFile atomically writes data to a file named by filename.
|
// AtomicWriteFile atomically writes data to a file named by filename.
|
||||||
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
|
func AtomicWriteFileWithOpts(filename string, data []byte, perm os.FileMode, opts *AtomicFileWriterOptions) error {
|
||||||
f, err := newAtomicFileWriter(filename, perm, nil)
|
f, err := newAtomicFileWriter(filename, perm, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -77,6 +77,10 @@ func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||||
|
return AtomicWriteFileWithOpts(filename, data, perm, nil)
|
||||||
|
}
|
||||||
|
|
||||||
type atomicFileWriter struct {
|
type atomicFileWriter struct {
|
||||||
f *os.File
|
f *os.File
|
||||||
fn string
|
fn string
|
||||||
|
4
vendor/github.com/containers/storage/pkg/system/stat_freebsd.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/system/stat_freebsd.go
generated
vendored
@ -18,7 +18,9 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
|
|||||||
uid: s.Uid,
|
uid: s.Uid,
|
||||||
gid: s.Gid,
|
gid: s.Gid,
|
||||||
rdev: uint64(s.Rdev),
|
rdev: uint64(s.Rdev),
|
||||||
mtim: s.Mtimespec}
|
mtim: s.Mtimespec,
|
||||||
|
dev: s.Dev}
|
||||||
st.flags = s.Flags
|
st.flags = s.Flags
|
||||||
|
st.dev = s.Dev
|
||||||
return st, nil
|
return st, nil
|
||||||
}
|
}
|
||||||
|
3
vendor/github.com/containers/storage/pkg/system/stat_linux.go
generated
vendored
3
vendor/github.com/containers/storage/pkg/system/stat_linux.go
generated
vendored
@ -9,7 +9,8 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
|
|||||||
uid: s.Uid,
|
uid: s.Uid,
|
||||||
gid: s.Gid,
|
gid: s.Gid,
|
||||||
rdev: uint64(s.Rdev),
|
rdev: uint64(s.Rdev),
|
||||||
mtim: s.Mtim}, nil
|
mtim: s.Mtim,
|
||||||
|
dev: uint64(s.Dev)}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FromStatT converts a syscall.Stat_t type to a system.Stat_t type
|
// FromStatT converts a syscall.Stat_t type to a system.Stat_t type
|
||||||
|
6
vendor/github.com/containers/storage/pkg/system/stat_unix.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/system/stat_unix.go
generated
vendored
@ -18,6 +18,7 @@ type StatT struct {
|
|||||||
rdev uint64
|
rdev uint64
|
||||||
size int64
|
size int64
|
||||||
mtim syscall.Timespec
|
mtim syscall.Timespec
|
||||||
|
dev uint64
|
||||||
platformStatT
|
platformStatT
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -51,6 +52,11 @@ func (s StatT) Mtim() syscall.Timespec {
|
|||||||
return s.mtim
|
return s.mtim
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Dev returns a unique identifier for owning filesystem
|
||||||
|
func (s StatT) Dev() uint64 {
|
||||||
|
return s.dev
|
||||||
|
}
|
||||||
|
|
||||||
// Stat takes a path to a file and returns
|
// Stat takes a path to a file and returns
|
||||||
// a system.StatT type pertaining to that file.
|
// a system.StatT type pertaining to that file.
|
||||||
//
|
//
|
||||||
|
5
vendor/github.com/containers/storage/pkg/system/stat_windows.go
generated
vendored
5
vendor/github.com/containers/storage/pkg/system/stat_windows.go
generated
vendored
@ -43,6 +43,11 @@ func (s StatT) GID() uint32 {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Dev returns a unique identifier for owning filesystem
|
||||||
|
func (s StatT) Dev() uint64 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
// Stat takes a path to a file and returns
|
// Stat takes a path to a file and returns
|
||||||
// a system.StatT type pertaining to that file.
|
// a system.StatT type pertaining to that file.
|
||||||
//
|
//
|
||||||
|
4
vendor/github.com/containers/storage/storage.conf
generated
vendored
4
vendor/github.com/containers/storage/storage.conf
generated
vendored
@ -32,6 +32,10 @@ graphroot = "/var/lib/containers/storage"
|
|||||||
#
|
#
|
||||||
# rootless_storage_path = "$HOME/.local/share/containers/storage"
|
# rootless_storage_path = "$HOME/.local/share/containers/storage"
|
||||||
|
|
||||||
|
# Transient store mode makes all container metadata be saved in temporary storage
|
||||||
|
# (i.e. runroot above). This is faster, but doesn't persist across reboots.
|
||||||
|
# transient_store = true
|
||||||
|
|
||||||
[storage.options]
|
[storage.options]
|
||||||
# Storage options to be passed to underlying storage drivers
|
# Storage options to be passed to underlying storage drivers
|
||||||
|
|
||||||
|
295
vendor/github.com/containers/storage/store.go
generated
vendored
295
vendor/github.com/containers/storage/store.go
generated
vendored
@ -141,6 +141,7 @@ type Store interface {
|
|||||||
// settings that were passed to GetStore() when the object was created.
|
// settings that were passed to GetStore() when the object was created.
|
||||||
RunRoot() string
|
RunRoot() string
|
||||||
GraphRoot() string
|
GraphRoot() string
|
||||||
|
TransientStore() bool
|
||||||
GraphDriverName() string
|
GraphDriverName() string
|
||||||
GraphOptions() []string
|
GraphOptions() []string
|
||||||
PullOptions() map[string]string
|
PullOptions() map[string]string
|
||||||
@ -502,6 +503,11 @@ type Store interface {
|
|||||||
// Releasing AdditionalLayer handler is caller's responsibility.
|
// Releasing AdditionalLayer handler is caller's responsibility.
|
||||||
// This API is experimental and can be changed without bumping the major version number.
|
// This API is experimental and can be changed without bumping the major version number.
|
||||||
LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error)
|
LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error)
|
||||||
|
|
||||||
|
// Tries to clean up remainders of previous containers or layers that are not
|
||||||
|
// references in the json files. These can happen in the case of unclean
|
||||||
|
// shutdowns or regular restarts in transient store mode.
|
||||||
|
GarbageCollect() error
|
||||||
}
|
}
|
||||||
|
|
||||||
// AdditionalLayer reprents a layer that is contained in the additional layer store
|
// AdditionalLayer reprents a layer that is contained in the additional layer store
|
||||||
@ -545,6 +551,8 @@ type LayerOptions struct {
|
|||||||
// and reliably known by the caller.
|
// and reliably known by the caller.
|
||||||
// Use the default "" if this fields is not applicable or the value is not known.
|
// Use the default "" if this fields is not applicable or the value is not known.
|
||||||
UncompressedDigest digest.Digest
|
UncompressedDigest digest.Digest
|
||||||
|
// True is the layer info can be treated as volatile
|
||||||
|
Volatile bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImageOptions is used for passing options to a Store's CreateImage() method.
|
// ImageOptions is used for passing options to a Store's CreateImage() method.
|
||||||
@ -594,6 +602,7 @@ type store struct {
|
|||||||
containerStore rwContainerStore
|
containerStore rwContainerStore
|
||||||
digestLockRoot string
|
digestLockRoot string
|
||||||
disableVolatile bool
|
disableVolatile bool
|
||||||
|
transientStore bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetStore attempts to find an already-created Store object matching the
|
// GetStore attempts to find an already-created Store object matching the
|
||||||
@ -701,6 +710,7 @@ func GetStore(options types.StoreOptions) (Store, error) {
|
|||||||
additionalGIDs: nil,
|
additionalGIDs: nil,
|
||||||
usernsLock: usernsLock,
|
usernsLock: usernsLock,
|
||||||
disableVolatile: options.DisableVolatile,
|
disableVolatile: options.DisableVolatile,
|
||||||
|
transientStore: options.TransientStore,
|
||||||
pullOptions: options.PullOptions,
|
pullOptions: options.PullOptions,
|
||||||
}
|
}
|
||||||
if err := s.load(); err != nil {
|
if err := s.load(); err != nil {
|
||||||
@ -748,6 +758,10 @@ func (s *store) GraphRoot() string {
|
|||||||
return s.graphRoot
|
return s.graphRoot
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *store) TransientStore() bool {
|
||||||
|
return s.transientStore
|
||||||
|
}
|
||||||
|
|
||||||
func (s *store) GraphOptions() []string {
|
func (s *store) GraphOptions() []string {
|
||||||
return s.graphOptions
|
return s.graphOptions
|
||||||
}
|
}
|
||||||
@ -794,14 +808,16 @@ func (s *store) load() error {
|
|||||||
if err := os.MkdirAll(gcpath, 0700); err != nil {
|
if err := os.MkdirAll(gcpath, 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rcs, err := newContainerStore(gcpath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rcpath := filepath.Join(s.runRoot, driverPrefix+"containers")
|
rcpath := filepath.Join(s.runRoot, driverPrefix+"containers")
|
||||||
if err := os.MkdirAll(rcpath, 0700); err != nil {
|
if err := os.MkdirAll(rcpath, 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rcs, err := newContainerStore(gcpath, rcpath, s.transientStore)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
s.containerStore = rcs
|
s.containerStore = rcs
|
||||||
|
|
||||||
for _, store := range driver.AdditionalImageStores() {
|
for _, store := range driver.AdditionalImageStores() {
|
||||||
@ -883,7 +899,7 @@ func (s *store) getLayerStore() (rwLayerStore, error) {
|
|||||||
if err := os.MkdirAll(glpath, 0700); err != nil {
|
if err := os.MkdirAll(glpath, 0700); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rls, err := s.newLayerStore(rlpath, glpath, driver)
|
rls, err := s.newLayerStore(rlpath, glpath, driver, s.transientStore)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1282,9 +1298,10 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o
|
|||||||
// imageTopLayerForMapping does ???
|
// imageTopLayerForMapping does ???
|
||||||
// On entry:
|
// On entry:
|
||||||
// - ristore must be locked EITHER for reading or writing
|
// - ristore must be locked EITHER for reading or writing
|
||||||
|
// - primaryImageStore must be locked for writing; it might be identical to ristore.
|
||||||
// - rlstore must be locked for writing
|
// - rlstore must be locked for writing
|
||||||
// - lstores must all be locked for reading
|
// - lstores must all be locked for reading
|
||||||
func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, createMappedLayer bool, rlstore rwLayerStore, lstores []roLayerStore, options types.IDMappingOptions) (*Layer, error) {
|
func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, primaryImageStore rwImageStore, rlstore rwLayerStore, lstores []roLayerStore, options types.IDMappingOptions) (*Layer, error) {
|
||||||
layerMatchesMappingOptions := func(layer *Layer, options types.IDMappingOptions) bool {
|
layerMatchesMappingOptions := func(layer *Layer, options types.IDMappingOptions) bool {
|
||||||
// If the driver supports shifting and the layer has no mappings, we can use it.
|
// If the driver supports shifting and the layer has no mappings, we can use it.
|
||||||
if s.canUseShifting(options.UIDMap, options.GIDMap) && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 {
|
if s.canUseShifting(options.UIDMap, options.GIDMap) && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 {
|
||||||
@ -1303,6 +1320,7 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, crea
|
|||||||
var layer, parentLayer *Layer
|
var layer, parentLayer *Layer
|
||||||
allStores := append([]roLayerStore{rlstore}, lstores...)
|
allStores := append([]roLayerStore{rlstore}, lstores...)
|
||||||
// Locate the image's top layer and its parent, if it has one.
|
// Locate the image's top layer and its parent, if it has one.
|
||||||
|
createMappedLayer := ristore == primaryImageStore
|
||||||
for _, s := range allStores {
|
for _, s := range allStores {
|
||||||
store := s
|
store := s
|
||||||
// Walk the top layer list.
|
// Walk the top layer list.
|
||||||
@ -1350,44 +1368,41 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, crea
|
|||||||
return layer, nil
|
return layer, nil
|
||||||
}
|
}
|
||||||
// The top layer's mappings don't match the ones we want, and it's in an image store
|
// The top layer's mappings don't match the ones we want, and it's in an image store
|
||||||
// that lets us edit image metadata...
|
// that lets us edit image metadata, so create a duplicate of the layer with the desired
|
||||||
if istore, ok := ristore.(*imageStore); ok {
|
// mappings, and register it as an alternate top layer in the image.
|
||||||
// ... so create a duplicate of the layer with the desired mappings, and
|
var layerOptions LayerOptions
|
||||||
// register it as an alternate top layer in the image.
|
if s.canUseShifting(options.UIDMap, options.GIDMap) {
|
||||||
var layerOptions LayerOptions
|
layerOptions = LayerOptions{
|
||||||
if s.canUseShifting(options.UIDMap, options.GIDMap) {
|
IDMappingOptions: types.IDMappingOptions{
|
||||||
layerOptions = LayerOptions{
|
HostUIDMapping: true,
|
||||||
IDMappingOptions: types.IDMappingOptions{
|
HostGIDMapping: true,
|
||||||
HostUIDMapping: true,
|
UIDMap: nil,
|
||||||
HostGIDMapping: true,
|
GIDMap: nil,
|
||||||
UIDMap: nil,
|
},
|
||||||
GIDMap: nil,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
layerOptions = LayerOptions{
|
|
||||||
IDMappingOptions: types.IDMappingOptions{
|
|
||||||
HostUIDMapping: options.HostUIDMapping,
|
|
||||||
HostGIDMapping: options.HostGIDMapping,
|
|
||||||
UIDMap: copyIDMap(options.UIDMap),
|
|
||||||
GIDMap: copyIDMap(options.GIDMap),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
layerOptions.TemplateLayer = layer.ID
|
} else {
|
||||||
mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil)
|
layerOptions = LayerOptions{
|
||||||
if err != nil {
|
IDMappingOptions: types.IDMappingOptions{
|
||||||
return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err)
|
HostUIDMapping: options.HostUIDMapping,
|
||||||
|
HostGIDMapping: options.HostGIDMapping,
|
||||||
|
UIDMap: copyIDMap(options.UIDMap),
|
||||||
|
GIDMap: copyIDMap(options.GIDMap),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil {
|
|
||||||
if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil {
|
|
||||||
err = fmt.Errorf("deleting layer %q: %v: %w", mappedLayer.ID, err2, err)
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("registering ID-mapped layer with image %q: %w", image.ID, err)
|
|
||||||
}
|
|
||||||
layer = mappedLayer
|
|
||||||
}
|
}
|
||||||
return layer, nil
|
layerOptions.TemplateLayer = layer.ID
|
||||||
|
mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err)
|
||||||
|
}
|
||||||
|
// By construction, createMappedLayer can only be true if ristore == primaryImageStore.
|
||||||
|
if err = primaryImageStore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil {
|
||||||
|
if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil {
|
||||||
|
err = fmt.Errorf("deleting layer %q: %v: %w", mappedLayer.ID, err2, err)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("registering ID-mapped layer with image %q: %w", image.ID, err)
|
||||||
|
}
|
||||||
|
return mappedLayer, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) {
|
func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) {
|
||||||
@ -1488,8 +1503,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
|
|||||||
idMappingsOptions := options.IDMappingOptions
|
idMappingsOptions := options.IDMappingOptions
|
||||||
if image != "" {
|
if image != "" {
|
||||||
if cimage.TopLayer != "" {
|
if cimage.TopLayer != "" {
|
||||||
createMappedLayer := imageHomeStore == istore
|
ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, istore, rlstore, lstores, idMappingsOptions)
|
||||||
ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, createMappedLayer, rlstore, lstores, idMappingsOptions)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1514,25 +1528,28 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
|
|||||||
gidMap = s.gidMap
|
gidMap = s.gidMap
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var layerOptions *LayerOptions
|
layerOptions := &LayerOptions{
|
||||||
|
// Normally layers for containers are volatile only if the container is.
|
||||||
|
// But in transient store mode, all container layers are volatile.
|
||||||
|
Volatile: options.Volatile || s.transientStore,
|
||||||
|
}
|
||||||
if s.canUseShifting(uidMap, gidMap) {
|
if s.canUseShifting(uidMap, gidMap) {
|
||||||
layerOptions = &LayerOptions{
|
layerOptions.IDMappingOptions =
|
||||||
IDMappingOptions: types.IDMappingOptions{
|
types.IDMappingOptions{
|
||||||
HostUIDMapping: true,
|
HostUIDMapping: true,
|
||||||
HostGIDMapping: true,
|
HostGIDMapping: true,
|
||||||
UIDMap: nil,
|
UIDMap: nil,
|
||||||
GIDMap: nil,
|
GIDMap: nil,
|
||||||
},
|
}
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
layerOptions = &LayerOptions{
|
layerOptions.IDMappingOptions =
|
||||||
IDMappingOptions: types.IDMappingOptions{
|
types.IDMappingOptions{
|
||||||
HostUIDMapping: idMappingsOptions.HostUIDMapping,
|
HostUIDMapping: idMappingsOptions.HostUIDMapping,
|
||||||
HostGIDMapping: idMappingsOptions.HostGIDMapping,
|
HostGIDMapping: idMappingsOptions.HostGIDMapping,
|
||||||
UIDMap: copyIDMap(uidMap),
|
UIDMap: copyIDMap(uidMap),
|
||||||
GIDMap: copyIDMap(gidMap),
|
GIDMap: copyIDMap(gidMap),
|
||||||
},
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if options.Flags == nil {
|
if options.Flags == nil {
|
||||||
options.Flags = make(map[string]interface{})
|
options.Flags = make(map[string]interface{})
|
||||||
@ -1559,6 +1576,11 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
|
|||||||
}
|
}
|
||||||
layer = clayer.ID
|
layer = clayer.ID
|
||||||
|
|
||||||
|
// Normally only `--rm` containers are volatile, but in transient store mode all containers are volatile
|
||||||
|
if s.transientStore {
|
||||||
|
options.Volatile = true
|
||||||
|
}
|
||||||
|
|
||||||
var container *Container
|
var container *Container
|
||||||
err = s.writeToContainerStore(func(rcstore rwContainerStore) error {
|
err = s.writeToContainerStore(func(rcstore rwContainerStore) error {
|
||||||
options.IDMappingOptions = types.IDMappingOptions{
|
options.IDMappingOptions = types.IDMappingOptions{
|
||||||
@ -1888,63 +1910,59 @@ func (s *store) ContainerSize(id string) (int64, error) {
|
|||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
|
|
||||||
rcstore, err := s.getContainerStore()
|
var res int64 = -1
|
||||||
if err != nil {
|
err = s.writeToContainerStore(func(rcstore rwContainerStore) error { // Yes, rcstore.BigDataSize requires a write lock.
|
||||||
return -1, err
|
// Read the container record.
|
||||||
}
|
container, err := rcstore.Get(id)
|
||||||
if err := rcstore.startReading(); err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
defer rcstore.stopReading()
|
|
||||||
|
|
||||||
// Read the container record.
|
|
||||||
container, err := rcstore.Get(id)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the container's layer's size.
|
|
||||||
var layer *Layer
|
|
||||||
var size int64
|
|
||||||
for _, store := range layerStores {
|
|
||||||
if layer, err = store.Get(container.LayerID); err == nil {
|
|
||||||
size, err = store.DiffSize("", layer.ID)
|
|
||||||
if err != nil {
|
|
||||||
return -1, fmt.Errorf("determining size of layer with ID %q: %w", layer.ID, err)
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if layer == nil {
|
|
||||||
return -1, fmt.Errorf("locating layer with ID %q: %w", container.LayerID, ErrLayerUnknown)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count big data items.
|
|
||||||
names, err := rcstore.BigDataNames(id)
|
|
||||||
if err != nil {
|
|
||||||
return -1, fmt.Errorf("reading list of big data items for container %q: %w", container.ID, err)
|
|
||||||
}
|
|
||||||
for _, name := range names {
|
|
||||||
n, err := rcstore.BigDataSize(id, name)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, fmt.Errorf("reading size of big data item %q for container %q: %w", name, id, err)
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the container's layer's size.
|
||||||
|
var layer *Layer
|
||||||
|
var size int64
|
||||||
|
for _, store := range layerStores {
|
||||||
|
if layer, err = store.Get(container.LayerID); err == nil {
|
||||||
|
size, err = store.DiffSize("", layer.ID)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("determining size of layer with ID %q: %w", layer.ID, err)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if layer == nil {
|
||||||
|
return fmt.Errorf("locating layer with ID %q: %w", container.LayerID, ErrLayerUnknown)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count big data items.
|
||||||
|
names, err := rcstore.BigDataNames(id)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("reading list of big data items for container %q: %w", container.ID, err)
|
||||||
|
}
|
||||||
|
for _, name := range names {
|
||||||
|
n, err := rcstore.BigDataSize(id, name)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("reading size of big data item %q for container %q: %w", name, id, err)
|
||||||
|
}
|
||||||
|
size += n
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count the size of our container directory and container run directory.
|
||||||
|
n, err := directory.Size(cdir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
size += n
|
||||||
|
n, err = directory.Size(rdir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
size += n
|
size += n
|
||||||
}
|
|
||||||
|
|
||||||
// Count the size of our container directory and container run directory.
|
res = size
|
||||||
n, err := directory.Size(cdir)
|
return nil
|
||||||
if err != nil {
|
})
|
||||||
return -1, err
|
return res, err
|
||||||
}
|
|
||||||
size += n
|
|
||||||
n, err = directory.Size(rdir)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
size += n
|
|
||||||
|
|
||||||
return size, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) ListContainerBigData(id string) ([]string, error) {
|
func (s *store) ListContainerBigData(id string) ([]string, error) {
|
||||||
@ -1962,27 +1980,23 @@ func (s *store) ListContainerBigData(id string) ([]string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) ContainerBigDataSize(id, key string) (int64, error) {
|
func (s *store) ContainerBigDataSize(id, key string) (int64, error) {
|
||||||
rcstore, err := s.getContainerStore()
|
var res int64 = -1
|
||||||
if err != nil {
|
err := s.writeToContainerStore(func(store rwContainerStore) error { // Yes, BigDataSize requires a write lock.
|
||||||
return -1, err
|
var err error
|
||||||
}
|
res, err = store.BigDataSize(id, key)
|
||||||
if err := rcstore.startReading(); err != nil {
|
return err
|
||||||
return -1, err
|
})
|
||||||
}
|
return res, err
|
||||||
defer rcstore.stopReading()
|
|
||||||
return rcstore.BigDataSize(id, key)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) {
|
func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) {
|
||||||
rcstore, err := s.getContainerStore()
|
var res digest.Digest
|
||||||
if err != nil {
|
err := s.writeToContainerStore(func(store rwContainerStore) error { // Yes, BigDataDigest requires a write lock.
|
||||||
return "", err
|
var err error
|
||||||
}
|
res, err = store.BigDataDigest(id, key)
|
||||||
if err := rcstore.startReading(); err != nil {
|
return err
|
||||||
return "", err
|
})
|
||||||
}
|
return res, err
|
||||||
defer rcstore.stopReading()
|
|
||||||
return rcstore.BigDataDigest(id, key)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) ContainerBigData(id, key string) ([]byte, error) {
|
func (s *store) ContainerBigData(id, key string) ([]byte, error) {
|
||||||
@ -2222,12 +2236,6 @@ func (s *store) DeleteLayer(id string) error {
|
|||||||
if image.TopLayer == id {
|
if image.TopLayer == id {
|
||||||
return fmt.Errorf("layer %v used by image %v: %w", id, image.ID, ErrLayerUsedByImage)
|
return fmt.Errorf("layer %v used by image %v: %w", id, image.ID, ErrLayerUsedByImage)
|
||||||
}
|
}
|
||||||
if stringutils.InSlice(image.MappedTopLayers, id) {
|
|
||||||
// No write access to the image store, fail before the layer is deleted
|
|
||||||
if _, ok := ristore.(*imageStore); !ok {
|
|
||||||
return fmt.Errorf("layer %v used by image %v: %w", id, image.ID, ErrLayerUsedByImage)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
containers, err := rcstore.Containers()
|
containers, err := rcstore.Containers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -2242,14 +2250,10 @@ func (s *store) DeleteLayer(id string) error {
|
|||||||
return fmt.Errorf("delete layer %v: %w", id, err)
|
return fmt.Errorf("delete layer %v: %w", id, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The check here is used to avoid iterating the images if we don't need to.
|
for _, image := range images {
|
||||||
// There is already a check above for the imageStore to be writeable when the layer is part of MappedTopLayers.
|
if stringutils.InSlice(image.MappedTopLayers, id) {
|
||||||
if istore, ok := ristore.(*imageStore); ok {
|
if err = ristore.removeMappedTopLayer(image.ID, id); err != nil {
|
||||||
for _, image := range images {
|
return fmt.Errorf("remove mapped top layer %v from image %v: %w", id, image.ID, err)
|
||||||
if stringutils.InSlice(image.MappedTopLayers, id) {
|
|
||||||
if err = istore.removeMappedTopLayer(image.ID, id); err != nil {
|
|
||||||
return fmt.Errorf("remove mapped top layer %v from image %v: %w", id, image.ID, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3366,3 +3370,20 @@ func (s *store) Free() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tries to clean up old unreferenced container leftovers. returns the first error
|
||||||
|
// but continues as far as it can
|
||||||
|
func (s *store) GarbageCollect() error {
|
||||||
|
firstErr := s.writeToContainerStore(func(rcstore rwContainerStore) error {
|
||||||
|
return rcstore.GarbageCollect()
|
||||||
|
})
|
||||||
|
|
||||||
|
moreErr := s.writeToLayerStore(func(rlstore rwLayerStore) error {
|
||||||
|
return rlstore.GarbageCollect()
|
||||||
|
})
|
||||||
|
if firstErr == nil {
|
||||||
|
firstErr = moreErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return firstErr
|
||||||
|
}
|
||||||
|
4
vendor/github.com/containers/storage/types/options.go
generated
vendored
4
vendor/github.com/containers/storage/types/options.go
generated
vendored
@ -22,6 +22,7 @@ type TomlConfig struct {
|
|||||||
RunRoot string `toml:"runroot,omitempty"`
|
RunRoot string `toml:"runroot,omitempty"`
|
||||||
GraphRoot string `toml:"graphroot,omitempty"`
|
GraphRoot string `toml:"graphroot,omitempty"`
|
||||||
RootlessStoragePath string `toml:"rootless_storage_path,omitempty"`
|
RootlessStoragePath string `toml:"rootless_storage_path,omitempty"`
|
||||||
|
TransientStore bool `toml:"transient_store,omitempty"`
|
||||||
Options cfg.OptionsConfig `toml:"options,omitempty"`
|
Options cfg.OptionsConfig `toml:"options,omitempty"`
|
||||||
} `toml:"storage"`
|
} `toml:"storage"`
|
||||||
}
|
}
|
||||||
@ -234,6 +235,8 @@ type StoreOptions struct {
|
|||||||
PullOptions map[string]string `toml:"pull_options"`
|
PullOptions map[string]string `toml:"pull_options"`
|
||||||
// DisableVolatile doesn't allow volatile mounts when it is set.
|
// DisableVolatile doesn't allow volatile mounts when it is set.
|
||||||
DisableVolatile bool `json:"disable-volatile,omitempty"`
|
DisableVolatile bool `json:"disable-volatile,omitempty"`
|
||||||
|
// If transient, don't persist containers over boot (stores db in runroot)
|
||||||
|
TransientStore bool `json:"transient_store,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// isRootlessDriver returns true if the given storage driver is valid for containers running as non root
|
// isRootlessDriver returns true if the given storage driver is valid for containers running as non root
|
||||||
@ -452,6 +455,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
storeOptions.DisableVolatile = config.Storage.Options.DisableVolatile
|
storeOptions.DisableVolatile = config.Storage.Options.DisableVolatile
|
||||||
|
storeOptions.TransientStore = config.Storage.TransientStore
|
||||||
|
|
||||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)...)
|
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)...)
|
||||||
|
|
||||||
|
13
vendor/github.com/containers/storage/utils.go
generated
vendored
13
vendor/github.com/containers/storage/utils.go
generated
vendored
@ -2,6 +2,7 @@ package storage
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
"github.com/containers/storage/types"
|
"github.com/containers/storage/types"
|
||||||
)
|
)
|
||||||
@ -72,3 +73,15 @@ func applyNameOperation(oldNames []string, opParameters []string, op updateNameO
|
|||||||
}
|
}
|
||||||
return dedupeNames(result), nil
|
return dedupeNames(result), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func nameLooksLikeID(name string) bool {
|
||||||
|
if len(name) != 64 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, c := range name {
|
||||||
|
if !unicode.Is(unicode.ASCII_Hex_Digit, c) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
2
vendor/golang.org/x/crypto/openpgp/armor/armor.go
generated
vendored
2
vendor/golang.org/x/crypto/openpgp/armor/armor.go
generated
vendored
@ -156,7 +156,7 @@ func (r *openpgpReader) Read(p []byte) (n int, err error) {
|
|||||||
n, err = r.b64Reader.Read(p)
|
n, err = r.b64Reader.Read(p)
|
||||||
r.currentCRC = crc24(r.currentCRC, p[:n])
|
r.currentCRC = crc24(r.currentCRC, p[:n])
|
||||||
|
|
||||||
if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) {
|
if err == io.EOF && r.lReader.crcSet && r.lReader.crc != r.currentCRC&crc24Mask {
|
||||||
return 0, ArmorCorrupt
|
return 0, ArmorCorrupt
|
||||||
}
|
}
|
||||||
|
|
||||||
|
15
vendor/golang.org/x/crypto/ssh/common.go
generated
vendored
15
vendor/golang.org/x/crypto/ssh/common.go
generated
vendored
@ -10,6 +10,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
_ "crypto/sha1"
|
_ "crypto/sha1"
|
||||||
@ -118,6 +119,20 @@ func algorithmsForKeyFormat(keyFormat string) []string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// supportedPubKeyAuthAlgos specifies the supported client public key
|
||||||
|
// authentication algorithms. Note that this doesn't include certificate types
|
||||||
|
// since those use the underlying algorithm. This list is sent to the client if
|
||||||
|
// it supports the server-sig-algs extension. Order is irrelevant.
|
||||||
|
var supportedPubKeyAuthAlgos = []string{
|
||||||
|
KeyAlgoED25519,
|
||||||
|
KeyAlgoSKED25519, KeyAlgoSKECDSA256,
|
||||||
|
KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
|
||||||
|
KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA,
|
||||||
|
KeyAlgoDSA,
|
||||||
|
}
|
||||||
|
|
||||||
|
var supportedPubKeyAuthAlgosList = strings.Join(supportedPubKeyAuthAlgos, ",")
|
||||||
|
|
||||||
// unexpectedMessageError results when the SSH message that we received didn't
|
// unexpectedMessageError results when the SSH message that we received didn't
|
||||||
// match what we wanted.
|
// match what we wanted.
|
||||||
func unexpectedMessageError(expected, got uint8) error {
|
func unexpectedMessageError(expected, got uint8) error {
|
||||||
|
21
vendor/golang.org/x/crypto/ssh/handshake.go
generated
vendored
21
vendor/golang.org/x/crypto/ssh/handshake.go
generated
vendored
@ -615,7 +615,8 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.sessionID == nil {
|
firstKeyExchange := t.sessionID == nil
|
||||||
|
if firstKeyExchange {
|
||||||
t.sessionID = result.H
|
t.sessionID = result.H
|
||||||
}
|
}
|
||||||
result.SessionID = t.sessionID
|
result.SessionID = t.sessionID
|
||||||
@ -626,6 +627,24 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
|
|||||||
if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil {
|
if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO
|
||||||
|
// message with the server-sig-algs extension if the client supports it. See
|
||||||
|
// RFC 8308, Sections 2.4 and 3.1.
|
||||||
|
if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") {
|
||||||
|
extInfo := &extInfoMsg{
|
||||||
|
NumExtensions: 1,
|
||||||
|
Payload: make([]byte, 0, 4+15+4+len(supportedPubKeyAuthAlgosList)),
|
||||||
|
}
|
||||||
|
extInfo.Payload = appendInt(extInfo.Payload, len("server-sig-algs"))
|
||||||
|
extInfo.Payload = append(extInfo.Payload, "server-sig-algs"...)
|
||||||
|
extInfo.Payload = appendInt(extInfo.Payload, len(supportedPubKeyAuthAlgosList))
|
||||||
|
extInfo.Payload = append(extInfo.Payload, supportedPubKeyAuthAlgosList...)
|
||||||
|
if err := t.conn.writePacket(Marshal(extInfo)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if packet, err := t.conn.readPacket(); err != nil {
|
if packet, err := t.conn.readPacket(); err != nil {
|
||||||
return err
|
return err
|
||||||
} else if packet[0] != msgNewKeys {
|
} else if packet[0] != msgNewKeys {
|
||||||
|
13
vendor/golang.org/x/crypto/ssh/server.go
generated
vendored
13
vendor/golang.org/x/crypto/ssh/server.go
generated
vendored
@ -291,15 +291,6 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error)
|
|||||||
return perms, err
|
return perms, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func isAcceptableAlgo(algo string) bool {
|
|
||||||
switch algo {
|
|
||||||
case KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519,
|
|
||||||
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkSourceAddress(addr net.Addr, sourceAddrs string) error {
|
func checkSourceAddress(addr net.Addr, sourceAddrs string) error {
|
||||||
if addr == nil {
|
if addr == nil {
|
||||||
return errors.New("ssh: no address known for client, but source-address match required")
|
return errors.New("ssh: no address known for client, but source-address match required")
|
||||||
@ -514,7 +505,7 @@ userAuthLoop:
|
|||||||
return nil, parseError(msgUserAuthRequest)
|
return nil, parseError(msgUserAuthRequest)
|
||||||
}
|
}
|
||||||
algo := string(algoBytes)
|
algo := string(algoBytes)
|
||||||
if !isAcceptableAlgo(algo) {
|
if !contains(supportedPubKeyAuthAlgos, underlyingAlgo(algo)) {
|
||||||
authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo)
|
authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -572,7 +563,7 @@ userAuthLoop:
|
|||||||
// algorithm name that corresponds to algo with
|
// algorithm name that corresponds to algo with
|
||||||
// sig.Format. This is usually the same, but
|
// sig.Format. This is usually the same, but
|
||||||
// for certs, the names differ.
|
// for certs, the names differ.
|
||||||
if !isAcceptableAlgo(sig.Format) {
|
if !contains(supportedPubKeyAuthAlgos, sig.Format) {
|
||||||
authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format)
|
authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
14
vendor/modules.txt
vendored
14
vendor/modules.txt
vendored
@ -69,14 +69,14 @@ github.com/container-orchestrated-devices/container-device-interface/specs-go
|
|||||||
# github.com/containerd/cgroups v1.0.4
|
# github.com/containerd/cgroups v1.0.4
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/containerd/cgroups/stats/v1
|
github.com/containerd/cgroups/stats/v1
|
||||||
# github.com/containerd/containerd v1.6.9
|
# github.com/containerd/containerd v1.6.10
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/containerd/containerd/errdefs
|
github.com/containerd/containerd/errdefs
|
||||||
github.com/containerd/containerd/log
|
github.com/containerd/containerd/log
|
||||||
github.com/containerd/containerd/pkg/userns
|
github.com/containerd/containerd/pkg/userns
|
||||||
github.com/containerd/containerd/platforms
|
github.com/containerd/containerd/platforms
|
||||||
github.com/containerd/containerd/sys
|
github.com/containerd/containerd/sys
|
||||||
# github.com/containerd/stargz-snapshotter/estargz v0.12.1
|
# github.com/containerd/stargz-snapshotter/estargz v0.13.0
|
||||||
## explicit; go 1.16
|
## explicit; go 1.16
|
||||||
github.com/containerd/stargz-snapshotter/estargz
|
github.com/containerd/stargz-snapshotter/estargz
|
||||||
github.com/containerd/stargz-snapshotter/estargz/errorutil
|
github.com/containerd/stargz-snapshotter/estargz/errorutil
|
||||||
@ -95,7 +95,7 @@ github.com/containernetworking/cni/pkg/version
|
|||||||
# github.com/containernetworking/plugins v1.1.1
|
# github.com/containernetworking/plugins v1.1.1
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/containernetworking/plugins/pkg/ns
|
github.com/containernetworking/plugins/pkg/ns
|
||||||
# github.com/containers/buildah v1.28.1-0.20221029151733-c2cf9fa47ab6
|
# github.com/containers/buildah v1.28.1-0.20221122135051-c9f30d81ae37
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/containers/buildah
|
github.com/containers/buildah
|
||||||
github.com/containers/buildah/bind
|
github.com/containers/buildah/bind
|
||||||
@ -118,7 +118,7 @@ github.com/containers/buildah/pkg/rusage
|
|||||||
github.com/containers/buildah/pkg/sshagent
|
github.com/containers/buildah/pkg/sshagent
|
||||||
github.com/containers/buildah/pkg/util
|
github.com/containers/buildah/pkg/util
|
||||||
github.com/containers/buildah/util
|
github.com/containers/buildah/util
|
||||||
# github.com/containers/common v0.50.2-0.20221111184705-791b83e1cdf1
|
# github.com/containers/common v0.50.2-0.20221121202831-385be9a25125
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/containers/common/libimage
|
github.com/containers/common/libimage
|
||||||
github.com/containers/common/libimage/define
|
github.com/containers/common/libimage/define
|
||||||
@ -172,7 +172,7 @@ github.com/containers/common/version
|
|||||||
# github.com/containers/conmon v2.0.20+incompatible
|
# github.com/containers/conmon v2.0.20+incompatible
|
||||||
## explicit
|
## explicit
|
||||||
github.com/containers/conmon/runner/config
|
github.com/containers/conmon/runner/config
|
||||||
# github.com/containers/image/v5 v5.23.1-0.20221109193300-0d85878d7a77
|
# github.com/containers/image/v5 v5.23.1-0.20221121174826-d8eb9dd60533
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/containers/image/v5/copy
|
github.com/containers/image/v5/copy
|
||||||
github.com/containers/image/v5/directory
|
github.com/containers/image/v5/directory
|
||||||
@ -264,7 +264,7 @@ github.com/containers/psgo/internal/dev
|
|||||||
github.com/containers/psgo/internal/host
|
github.com/containers/psgo/internal/host
|
||||||
github.com/containers/psgo/internal/proc
|
github.com/containers/psgo/internal/proc
|
||||||
github.com/containers/psgo/internal/process
|
github.com/containers/psgo/internal/process
|
||||||
# github.com/containers/storage v1.44.1-0.20221110192950-67e9778710f8
|
# github.com/containers/storage v1.44.1-0.20221121144727-71fd3e87df7a
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/containers/storage
|
github.com/containers/storage
|
||||||
github.com/containers/storage/drivers
|
github.com/containers/storage/drivers
|
||||||
@ -775,7 +775,7 @@ go.opencensus.io/internal
|
|||||||
go.opencensus.io/trace
|
go.opencensus.io/trace
|
||||||
go.opencensus.io/trace/internal
|
go.opencensus.io/trace/internal
|
||||||
go.opencensus.io/trace/tracestate
|
go.opencensus.io/trace/tracestate
|
||||||
# golang.org/x/crypto v0.2.0
|
# golang.org/x/crypto v0.3.0
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
golang.org/x/crypto/blowfish
|
golang.org/x/crypto/blowfish
|
||||||
golang.org/x/crypto/cast5
|
golang.org/x/crypto/cast5
|
||||||
|
Reference in New Issue
Block a user