Merge pull request #19265 from rhatdan/tmpfs

Clean up /var/tmp/ when using oci-archives when creating containers
This commit is contained in:
Daniel J Walsh
2023-07-24 12:15:59 -04:00
committed by GitHub
58 changed files with 1104 additions and 840 deletions

View File

@ -6,3 +6,6 @@ x /tmp/run-*/libpod
D! /var/lib/containers/storage/tmp 0700 root root
D! /run/podman 0700 root root
D! /var/lib/cni/networks
# Remove /var/tmp/container_images* podman temporary directories on each
# boot which are created when pulling or saving images.
R! /var/tmp/container_images*

20
go.mod
View File

@ -12,14 +12,14 @@ require (
github.com/container-orchestrated-devices/container-device-interface v0.6.0
github.com/containernetworking/cni v1.1.2
github.com/containernetworking/plugins v1.3.0
github.com/containers/buildah v1.31.1-0.20230710135949-9c9a344b9874
github.com/containers/common v0.55.1-0.20230713173316-9e5d4a690901
github.com/containers/buildah v1.31.1-0.20230722114901-5ece066f82c6
github.com/containers/common v0.55.1-0.20230721175448-664d013a6ae2
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.26.1
github.com/containers/image/v5 v5.26.1-0.20230721194716-30c87d4a5b8d
github.com/containers/libhvee v0.4.0
github.com/containers/ocicrypt v1.1.7
github.com/containers/psgo v1.8.0
github.com/containers/storage v1.48.1-0.20230707125135-6dc2de36ca86
github.com/containers/storage v1.48.1-0.20230721123825-4a3a3019d765
github.com/coreos/go-systemd/v22 v22.5.0
github.com/coreos/stream-metadata-go v0.4.3
github.com/crc-org/vfkit v0.1.1
@ -47,7 +47,7 @@ require (
github.com/onsi/gomega v1.27.9
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.0-rc4
github.com/opencontainers/runc v1.1.7
github.com/opencontainers/runc v1.1.8
github.com/opencontainers/runtime-spec v1.1.0-rc.3
github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69
github.com/opencontainers/selinux v1.11.0
@ -94,7 +94,7 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e // indirect
github.com/disiqueira/gotree/v3 v3.0.2 // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/docker-credential-helpers v0.8.0 // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/fsouza/go-dockerclient v1.9.7 // indirect
@ -163,7 +163,7 @@ require (
github.com/rivo/uniseg v0.4.4 // indirect
github.com/seccomp/libseccomp-golang v0.10.0 // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/sigstore/fulcio v1.3.2 // indirect
github.com/sigstore/fulcio v1.4.0 // indirect
github.com/sigstore/rekor v1.2.2 // indirect
github.com/sigstore/sigstore v1.7.1 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
@ -185,11 +185,11 @@ require (
golang.org/x/arch v0.3.0 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/mod v0.11.0 // indirect
golang.org/x/oauth2 v0.9.0 // indirect
golang.org/x/oauth2 v0.10.0 // indirect
golang.org/x/tools v0.9.3 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect
google.golang.org/grpc v1.56.1 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect
google.golang.org/grpc v1.56.2 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect

36
go.sum
View File

@ -245,14 +245,14 @@ github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHV
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
github.com/containernetworking/plugins v1.3.0 h1:QVNXMT6XloyMUoO2wUOqWTC1hWFV62Q6mVDp5H1HnjM=
github.com/containernetworking/plugins v1.3.0/go.mod h1:Pc2wcedTQQCVuROOOaLBPPxrEXqqXBFt3cZ+/yVg6l0=
github.com/containers/buildah v1.31.1-0.20230710135949-9c9a344b9874 h1:w6tQ7mQ9aQ5hguCo8wECUiIBbLFfSClmSpjyPiU7N9E=
github.com/containers/buildah v1.31.1-0.20230710135949-9c9a344b9874/go.mod h1:KOi66P8FQvReqloGYaLBVPeRDm/3yx9d2/5zvR7Je+8=
github.com/containers/common v0.55.1-0.20230713173316-9e5d4a690901 h1:S1tzzO9XFnnmjyKEOdj1Ey4dzRn9Y7LyZC19Vyv7bxw=
github.com/containers/common v0.55.1-0.20230713173316-9e5d4a690901/go.mod h1:fJVsIJZze4e7pFO2IYRKfRCF9qlnenEPDRmi8IQTINQ=
github.com/containers/buildah v1.31.1-0.20230722114901-5ece066f82c6 h1:K/S8SFQsnnNTF0Ws58SrBD9L0EuClzAG8Zp08d7+6AA=
github.com/containers/buildah v1.31.1-0.20230722114901-5ece066f82c6/go.mod h1:0sptTFBBtSznLqoTh80DfvMOCNbdRsNRgVOKhBhrupA=
github.com/containers/common v0.55.1-0.20230721175448-664d013a6ae2 h1:4B42HUIAghFGSqej5RADTNf0WlOBFiGGzmGjNa3Do78=
github.com/containers/common v0.55.1-0.20230721175448-664d013a6ae2/go.mod h1:O/JSRY1dLfwgBxVvn3yJfKvF63KEjbNJcJAtjpNvO90=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.26.1 h1:8y3xq8GO/6y8FR+nAedHPsAFiAtOrab9qHTBpbqaX8g=
github.com/containers/image/v5 v5.26.1/go.mod h1:IwlOGzTkGnmfirXxt0hZeJlzv1zVukE03WZQ203Z9GA=
github.com/containers/image/v5 v5.26.1-0.20230721194716-30c87d4a5b8d h1:g6DFcXXEMd1OwSVtbrUolGzmkMNyQDyc4OKHOFxbNeE=
github.com/containers/image/v5 v5.26.1-0.20230721194716-30c87d4a5b8d/go.mod h1:dq4a9AttQovSzgEgbRoz+BRcfRFLaz16zrMAlf5DoCY=
github.com/containers/libhvee v0.4.0 h1:HGHIIExgP2PjwjHKKoQM3B+3qakNIZcmmkiAO4luAZE=
github.com/containers/libhvee v0.4.0/go.mod h1:fyWDxNQccveTdE3Oe+QRuLbwF/iyV0hDxXqRX5Svlic=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
@ -265,8 +265,8 @@ github.com/containers/ocicrypt v1.1.7/go.mod h1:7CAhjcj2H8AYp5YvEie7oVSK2AhBY8Ns
github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY=
github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc=
github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
github.com/containers/storage v1.48.1-0.20230707125135-6dc2de36ca86 h1:MZM2+8TDTvuN7erRxEoTBfxFpkfEd8fUOzygK9CNeVU=
github.com/containers/storage v1.48.1-0.20230707125135-6dc2de36ca86/go.mod h1:URATDSDqWwSRt7YsTNwYFu7zrrpJ8fKrmWTB1/R8apw=
github.com/containers/storage v1.48.1-0.20230721123825-4a3a3019d765 h1:FbYVGbhgK50Iia3IzNSovLLUDchISgyFzUcLPmM4RqQ=
github.com/containers/storage v1.48.1-0.20230721123825-4a3a3019d765/go.mod h1:jvPVKMiKlFEdZicActQu3ZqRbObSGIvFwXQ7yFM9zF8=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
@ -330,8 +330,8 @@ github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r
github.com/docker/docker v24.0.4+incompatible h1:s/LVDftw9hjblvqIeTiGYXBCD95nOEEl7qRsRrIOuQI=
github.com/docker/docker v24.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y=
github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q=
@ -912,8 +912,8 @@ github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sigstore/fulcio v1.3.2 h1:92ubG3JPgpjeKKFWROoWj095SvVKBdbolq22t95qBUg=
github.com/sigstore/fulcio v1.3.2/go.mod h1:RCktAN81mgf6fz7ydiT6X2mSjhMGxaRXfCCa7srztTo=
github.com/sigstore/fulcio v1.4.0 h1:05+k8BFvwTQzfCkVxESWzCN4b70KIRliGYz0Upmdrs8=
github.com/sigstore/fulcio v1.4.0/go.mod h1:wcjlktbhoy6+ZTxO3yXpvqUxsLV+JEH4FF3a5Jz4VPI=
github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY=
github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg=
github.com/sigstore/sigstore v1.7.1 h1:fCATemikcBK0cG4+NcM940MfoIgmioY1vC6E66hXxks=
@ -1194,8 +1194,8 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.9.0 h1:BPpt2kU7oMRq3kCHAA1tbSEshXRw1LpG2ztgDwrzuAs=
golang.org/x/oauth2 v0.9.0/go.mod h1:qYgFZaFiu6Wg24azG8bdV52QJXJGbZzIIsRCdVKzbLw=
golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1420,8 +1420,8 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@ -1440,8 +1440,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ=
google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI=
google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -31,9 +31,6 @@ jobs:
update_release: false
dist_git_branches:
- fedora-all
actions:
pre-sync:
- "bash rpm/update-spec-provides.sh"
- job: koji_build
trigger: commit

View File

@ -11,6 +11,7 @@ import (
filtersPkg "github.com/containers/common/pkg/filters"
"github.com/containers/common/pkg/timetype"
"github.com/containers/image/v5/docker/reference"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
@ -147,7 +148,11 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp
filter = filterID(value)
case "digest":
filter = filterDigest(value)
f, err := filterDigest(value)
if err != nil {
return nil, err
}
filter = f
case "intermediate":
intermediate, err := r.bool(duplicate, key, value)
@ -395,12 +400,14 @@ func filterID(value string) filterFunc {
}
// filterDigest creates a digest filter for matching the specified value.
func filterDigest(value string) filterFunc {
// TODO: return an error if value is not a digest
// if _, err := digest.Parse(value); err != nil {...}
return func(img *Image) (bool, error) {
return img.hasDigest(value), nil
func filterDigest(value string) (filterFunc, error) {
d, err := digest.Parse(value)
if err != nil {
return nil, fmt.Errorf("invalid value %q for digest filter: %w", value, err)
}
return func(img *Image) (bool, error) {
return img.hasDigest(d), nil
}, nil
}
// filterIntermediate creates an intermediate filter for images. An image is

View File

@ -159,10 +159,9 @@ func (i *Image) Digests() []digest.Digest {
// hasDigest returns whether the specified value matches any digest of the
// image.
func (i *Image) hasDigest(value string) bool {
// TODO: change the argument to a typed digest.Digest
func (i *Image) hasDigest(wantedDigest digest.Digest) bool {
for _, d := range i.Digests() {
if string(d) == value {
if d == wantedDigest {
return true
}
}
@ -686,24 +685,22 @@ func (i *Image) NamedRepoTags() ([]reference.Named, error) {
return repoTags, nil
}
// inRepoTags looks for the specified name/tag in the image's repo tags. If
// `ignoreTag` is set, only the repo must match and the tag is ignored.
func (i *Image) inRepoTags(namedTagged reference.NamedTagged, ignoreTag bool) (reference.Named, error) {
// referenceFuzzilyMatchingRepoAndTag checks if the images repo (and tag if requiredTag != "") matches a fuzzy short input,
// and if so, returns the matching reference.
//
// DO NOT ADD ANY NEW USERS OF THIS SEMANTICS. Rely on existing libimage calls like LookupImage instead,
// and handle unqualified the way it does (c/image/pkg/shortnames).
func (i *Image) referenceFuzzilyMatchingRepoAndTag(requiredRepo reference.Named, requiredTag string) (reference.Named, error) {
repoTags, err := i.NamedRepoTags()
if err != nil {
return nil, err
}
name := namedTagged.Name()
tag := namedTagged.Tag()
name := requiredRepo.Name()
for _, r := range repoTags {
if !ignoreTag {
var repoTag string
if requiredTag != "" {
tagged, isTagged := r.(reference.NamedTagged)
if isTagged {
repoTag = tagged.Tag()
}
if !isTagged || tag != repoTag {
if !isTagged || tagged.Tag() != requiredTag {
continue
}
}

View File

@ -454,28 +454,20 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, possiblyUnqualifi
if possiblyUnqualifiedNamedReference == nil {
return nil, "", fmt.Errorf("%s: %w", originalName, storage.ErrImageUnknown)
}
// In case of a digested reference, we strip off the digest and require
// any image matching the repo/tag to also match the specified digest.
var requiredDigest digest.Digest
digested, isDigested := possiblyUnqualifiedNamedReference.(reference.Digested)
if isDigested {
requiredDigest = digested.Digest()
possiblyUnqualifiedNamedReference = reference.TrimNamed(possiblyUnqualifiedNamedReference)
name = possiblyUnqualifiedNamedReference.String()
}
if !shortnames.IsShortName(name) {
return nil, "", fmt.Errorf("%s: %w", originalName, storage.ErrImageUnknown)
}
// Docker compat: make sure to add the "latest" tag if needed. The tag
// will be ignored if we're looking for a digest match.
possiblyUnqualifiedNamedReference = reference.TagNameOnly(possiblyUnqualifiedNamedReference)
namedTagged, isNamedTagged := possiblyUnqualifiedNamedReference.(reference.NamedTagged)
if !isNamedTagged {
// NOTE: this should never happen since we already stripped off
// the digest.
var requiredDigest digest.Digest // or ""
var requiredTag string // or ""
possiblyUnqualifiedNamedReference = reference.TagNameOnly(possiblyUnqualifiedNamedReference) // Docker compat: make sure to add the "latest" tag if needed.
if digested, ok := possiblyUnqualifiedNamedReference.(reference.Digested); ok {
requiredDigest = digested.Digest()
name = reference.TrimNamed(possiblyUnqualifiedNamedReference).String()
} else if namedTagged, ok := possiblyUnqualifiedNamedReference.(reference.NamedTagged); ok {
requiredTag = namedTagged.Tag()
} else { // This should never happen after the reference.TagNameOnly above.
return nil, "", fmt.Errorf("%s: %w (could not cast to tagged)", originalName, storage.ErrImageUnknown)
}
@ -485,7 +477,7 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, possiblyUnqualifi
}
for _, image := range allImages {
named, err := image.inRepoTags(namedTagged, isDigested)
named, err := image.referenceFuzzilyMatchingRepoAndTag(possiblyUnqualifiedNamedReference, requiredTag)
if err != nil {
return nil, "", err
}
@ -497,8 +489,8 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, possiblyUnqualifi
return nil, "", err
}
if img != nil {
if isDigested {
if !img.hasDigest(requiredDigest.String()) {
if requiredDigest != "" {
if !img.hasDigest(requiredDigest) {
continue
}
named = reference.TrimNamed(named)

View File

@ -14,6 +14,7 @@ import (
const (
HostContainersInternal = "host.containers.internal"
HostGateway = "host-gateway"
localhost = "localhost"
)
@ -98,7 +99,7 @@ func Remove(file string, entries HostEntries) error {
// new see comment on New()
func newHost(params *Params) error {
entries, err := parseExtraHosts(params.ExtraHosts)
entries, err := parseExtraHosts(params.ExtraHosts, params.HostContainersInternalIP)
if err != nil {
return err
}
@ -230,7 +231,7 @@ func checkIfEntryExists(current HostEntry, entries HostEntries) bool {
// parseExtraHosts converts a slice of "name:ip" string to entries.
// Because podman and buildah both store the extra hosts in this format
// we convert it here instead of having to this on the caller side.
func parseExtraHosts(extraHosts []string) (HostEntries, error) {
func parseExtraHosts(extraHosts []string, hostContainersInternalIP string) (HostEntries, error) {
entries := make(HostEntries, 0, len(extraHosts))
for _, entry := range extraHosts {
values := strings.SplitN(entry, ":", 2)
@ -243,7 +244,14 @@ func parseExtraHosts(extraHosts []string) (HostEntries, error) {
if values[1] == "" {
return nil, fmt.Errorf("IP address in host entry %q is empty", entry)
}
e := HostEntry{IP: values[1], Names: []string{values[0]}}
ip := values[1]
if values[1] == HostGateway {
if hostContainersInternalIP == "" {
return nil, fmt.Errorf("unable to replace %q of host entry %q: host containers internal IP address is empty", HostGateway, entry)
}
ip = hostContainersInternalIP
}
e := HostEntry{IP: ip, Names: []string{values[0]}}
entries = append(entries, e)
}
return entries, nil

View File

@ -51,7 +51,7 @@ const (
BoltDBStateStore RuntimeStateStore = iota
)
var validImageVolumeModes = []string{"bind", "tmpfs", "ignore"}
var validImageVolumeModes = []string{_typeBind, "tmpfs", "ignore"}
// ProxyEnv is a list of Proxy Environment variables
var ProxyEnv = []string{
@ -513,6 +513,11 @@ type EngineConfig struct {
// CompressionLevel is the compression level used to compress image layers.
CompressionLevel *int `toml:"compression_level,omitempty"`
// PodmanshTimeout is the number of seconds to wait for podmansh logins.
// In other words, the timeout for the `podmansh` container to be in running
// state.
PodmanshTimeout uint `toml:"podmansh_timeout,omitempty,omitzero"`
}
// SetOptions contains a subset of options in a Config. It's used to indicate if

View File

@ -14,6 +14,9 @@ const (
// DefaultSignaturePolicyPath is the default value for the
// policy.json file.
DefaultSignaturePolicyPath = "/etc/containers/policy.json"
// Mount type for mounting host dir
_typeBind = "bind"
)
// podman remote clients on darwin cannot use unshare.isRootless() to determine the configuration file locations.

View File

@ -14,6 +14,9 @@ const (
// DefaultSignaturePolicyPath is the default value for the
// policy.json file.
DefaultSignaturePolicyPath = "/usr/local/etc/containers/policy.json"
// Mount type for mounting host dir
_typeBind = "nullfs"
)
// podman remote clients on freebsd cannot use unshare.isRootless() to determine the configuration file locations.

View File

@ -17,6 +17,9 @@ const (
// DefaultSignaturePolicyPath is the default value for the
// policy.json file.
DefaultSignaturePolicyPath = "/etc/containers/policy.json"
// Mount type for mounting host dir
_typeBind = "bind"
)
func selinuxEnabled() bool {

View File

@ -12,6 +12,9 @@ const (
// DefaultSignaturePolicyPath is the default value for the
// policy.json file.
DefaultSignaturePolicyPath = "/etc/containers/policy.json"
// Mount type for mounting host dir
_typeBind = "bind"
)
// podman remote clients on windows cannot use unshare.isRootless() to determine the configuration file locations.

View File

@ -669,6 +669,9 @@ default_sysctls = [
# A value of 0 is treated as no timeout.
#volume_plugin_timeout = 5
# Default timeout in seconds for podmansh logins.
#podmansh_timeout = 30
# Paths to look for a valid OCI runtime (crun, runc, kata, runsc, krun, etc)
[engine.runtimes]
#crun = [

View File

@ -28,7 +28,7 @@ const (
_defaultTransport = "docker://"
// _defaultImageVolumeMode is a mode to handle built-in image volumes.
_defaultImageVolumeMode = "bind"
_defaultImageVolumeMode = _typeBind
)
var (
@ -298,6 +298,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
c.CgroupManager = defaultCgroupManager()
c.ServiceTimeout = uint(5)
c.StopTimeout = uint(10)
c.PodmanshTimeout = uint(30)
c.ExitCommandDelay = uint(5 * 60)
c.Remote = isRemote()
c.OCIRuntimes = map[string][]string{

View File

@ -83,12 +83,12 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
return types.BlobInfo{}, err
}
// === Report progress using the ic.c.progress channel, if required.
if ic.c.progress != nil && ic.c.progressInterval > 0 {
// === Report progress using the ic.c.options.Progress channel, if required.
if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 {
progressReader := newProgressReader(
stream.reader,
ic.c.progress,
ic.c.progressInterval,
ic.c.options.Progress,
ic.c.options.ProgressInterval,
srcInfo,
)
defer progressReader.reportDone()

View File

@ -132,30 +132,25 @@ type Options struct {
// data shared across one or more images in a possible manifest list.
// The owner must call close() when done.
type copier struct {
dest private.ImageDestination
rawSource private.ImageSource
reportWriter io.Writer
progressOutput io.Writer
progressInterval time.Duration
progress chan types.ProgressProperties
policyContext *signature.PolicyContext
dest private.ImageDestination
rawSource private.ImageSource
options *Options // never nil
reportWriter io.Writer
progressOutput io.Writer
unparsedToplevel *image.UnparsedImage // for rawSource
blobInfoCache internalblobinfocache.BlobInfoCache2
ociDecryptConfig *encconfig.DecryptConfig
ociEncryptConfig *encconfig.EncryptConfig
concurrentBlobCopiesSemaphore *semaphore.Weighted // Limits the amount of concurrently copied blobs
downloadForeignLayers bool
signers []*signer.Signer // Signers to use to create new signatures for the image
signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed.
signers []*signer.Signer // Signers to use to create new signatures for the image
signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed.
}
// Image copies image from srcRef to destRef, using policyContext to validate
// source image admissibility. It returns the manifest which was written to
// the new copy of the image.
func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (copiedManifest []byte, retErr error) {
// NOTE this function uses an output parameter for the error return value.
// Setting this and returning is the ideal way to return an error.
//
// the defers in this routine will wrap the error return with its own errors
// which can be valuable context in the middle of a multi-streamed copy.
if options == nil {
options = &Options{}
}
@ -209,27 +204,27 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
}
c := &copier{
dest: dest,
rawSource: rawSource,
reportWriter: reportWriter,
progressOutput: progressOutput,
progressInterval: options.ProgressInterval,
progress: options.Progress,
policyContext: policyContext,
dest: dest,
rawSource: rawSource,
options: options,
reportWriter: reportWriter,
progressOutput: progressOutput,
unparsedToplevel: image.UnparsedInstance(rawSource, nil),
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
// we might want to add a separate CommonCtx — or would that be too confusing?
blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)),
ociDecryptConfig: options.OciDecryptConfig,
ociEncryptConfig: options.OciEncryptConfig,
downloadForeignLayers: options.DownloadForeignLayers,
blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)),
}
defer c.close()
// Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel.
if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() {
c.concurrentBlobCopiesSemaphore = options.ConcurrentBlobCopiesSemaphore
c.concurrentBlobCopiesSemaphore = c.options.ConcurrentBlobCopiesSemaphore
if c.concurrentBlobCopiesSemaphore == nil {
max := options.MaxParallelDownloads
max := c.options.MaxParallelDownloads
if max == 0 {
max = maxParallelDownloads
}
@ -237,33 +232,34 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
}
} else {
c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(1))
if options.ConcurrentBlobCopiesSemaphore != nil {
if err := options.ConcurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
if c.options.ConcurrentBlobCopiesSemaphore != nil {
if err := c.options.ConcurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
return nil, fmt.Errorf("acquiring semaphore for concurrent blob copies: %w", err)
}
defer options.ConcurrentBlobCopiesSemaphore.Release(1)
defer c.options.ConcurrentBlobCopiesSemaphore.Release(1)
}
}
if err := c.setupSigners(options); err != nil {
if err := c.setupSigners(); err != nil {
return nil, err
}
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
multiImage, err := isMultiImage(ctx, unparsedToplevel)
multiImage, err := isMultiImage(ctx, c.unparsedToplevel)
if err != nil {
return nil, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(srcRef), err)
}
if !multiImage {
// The simple case: just copy a single image.
if copiedManifest, _, _, err = c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedToplevel, nil); err != nil {
single, err := c.copySingleImage(ctx, c.unparsedToplevel, nil, copySingleImageOptions{requireCompressionFormatMatch: false})
if err != nil {
return nil, err
}
} else if options.ImageListSelection == CopySystemImage {
copiedManifest = single.manifest
} else if c.options.ImageListSelection == CopySystemImage {
// This is a manifest list, and we weren't asked to copy multiple images. Choose a single image that
// matches the current system to copy, and copy it.
mfest, manifestType, err := unparsedToplevel.Manifest(ctx)
mfest, manifestType, err := c.unparsedToplevel.Manifest(ctx)
if err != nil {
return nil, fmt.Errorf("reading manifest for %s: %w", transports.ImageName(srcRef), err)
}
@ -271,34 +267,35 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
if err != nil {
return nil, fmt.Errorf("parsing primary manifest as list for %s: %w", transports.ImageName(srcRef), err)
}
instanceDigest, err := manifestList.ChooseInstanceByCompression(options.SourceCtx, options.PreferGzipInstances) // try to pick one that matches options.SourceCtx
instanceDigest, err := manifestList.ChooseInstanceByCompression(c.options.SourceCtx, c.options.PreferGzipInstances) // try to pick one that matches c.options.SourceCtx
if err != nil {
return nil, fmt.Errorf("choosing an image from manifest list %s: %w", transports.ImageName(srcRef), err)
}
logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest)
unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
if copiedManifest, _, _, err = c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, nil); err != nil {
single, err := c.copySingleImage(ctx, unparsedInstance, nil, copySingleImageOptions{requireCompressionFormatMatch: false})
if err != nil {
return nil, fmt.Errorf("copying system image from manifest list: %w", err)
}
} else { /* options.ImageListSelection == CopyAllImages or options.ImageListSelection == CopySpecificImages, */
copiedManifest = single.manifest
} else { /* c.options.ImageListSelection == CopyAllImages or c.options.ImageListSelection == CopySpecificImages, */
// If we were asked to copy multiple images and can't, that's an error.
if !supportsMultipleImages(c.dest) {
return nil, fmt.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name())
}
// Copy some or all of the images.
switch options.ImageListSelection {
switch c.options.ImageListSelection {
case CopyAllImages:
logrus.Debugf("Source is a manifest list; copying all instances")
case CopySpecificImages:
logrus.Debugf("Source is a manifest list; copying some instances")
}
if copiedManifest, err = c.copyMultipleImages(ctx, policyContext, options, unparsedToplevel); err != nil {
if copiedManifest, err = c.copyMultipleImages(ctx); err != nil {
return nil, err
}
}
if err := c.dest.Commit(ctx, unparsedToplevel); err != nil {
if err := c.dest.Commit(ctx, c.unparsedToplevel); err != nil {
return nil, fmt.Errorf("committing the finished image: %w", err)
}

View File

@ -34,7 +34,7 @@ type bpDecryptionStepData struct {
// srcInfo is only used for error messages.
// Returns data for other steps; the caller should eventually use updateCryptoOperation.
func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) {
if !isOciEncrypted(stream.info.MediaType) || ic.c.ociDecryptConfig == nil {
if !isOciEncrypted(stream.info.MediaType) || ic.c.options.OciDecryptConfig == nil {
return &bpDecryptionStepData{
decrypting: false,
}, nil
@ -47,7 +47,7 @@ func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo
desc := imgspecv1.Descriptor{
Annotations: stream.info.Annotations,
}
reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.ociDecryptConfig, stream.reader, desc, false)
reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false)
if err != nil {
return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err)
}
@ -81,7 +81,7 @@ type bpEncryptionStepData struct {
// Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations.
func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo,
decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) {
if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.ociEncryptConfig == nil {
if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.options.OciEncryptConfig == nil {
return &bpEncryptionStepData{
encrypting: false,
}, nil
@ -101,7 +101,7 @@ func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncryp
Size: srcInfo.Size,
Annotations: annotations,
}
reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.ociEncryptConfig, stream.reader, desc)
reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.options.OciEncryptConfig, stream.reader, desc)
if err != nil {
return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err)
}

View File

@ -11,7 +11,6 @@ import (
"github.com/containers/image/v5/internal/image"
internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/signature"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
@ -48,10 +47,10 @@ func prepareInstanceCopies(instanceDigests []digest.Digest, options *Options) []
}
// copyMultipleImages copies some or all of an image list's instances, using
// policyContext to validate source image admissibility.
func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, retErr error) {
// c.policyContext to validate source image admissibility.
func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte, retErr error) {
// Parse the list and get a copy of the original value after it's re-encoded.
manifestList, manifestType, err := unparsedToplevel.Manifest(ctx)
manifestList, manifestType, err := c.unparsedToplevel.Manifest(ctx)
if err != nil {
return nil, fmt.Errorf("reading manifest list: %w", err)
}
@ -61,7 +60,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
}
updatedList := originalList.CloneInternal()
sigs, err := c.sourceSignatures(ctx, unparsedToplevel, options,
sigs, err := c.sourceSignatures(ctx, c.unparsedToplevel,
"Getting image list signatures",
"Checking if image list destination supports signatures")
if err != nil {
@ -94,12 +93,12 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
if destIsDigestedReference {
cannotModifyManifestListReason = "Destination specifies a digest"
}
if options.PreserveDigests {
if c.options.PreserveDigests {
cannotModifyManifestListReason = "Instructed to preserve digests"
}
// Determine if we'll need to convert the manifest list to a different format.
forceListMIMEType := options.ForceManifestMIMEType
forceListMIMEType := c.options.ForceManifestMIMEType
switch forceListMIMEType {
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
forceListMIMEType = manifest.DockerV2ListMediaType
@ -119,7 +118,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
// Copy each image, or just the ones we want to copy, in turn.
instanceDigests := updatedList.Instances()
instanceEdits := []internalManifest.ListEdit{}
instanceCopyList := prepareInstanceCopies(instanceDigests, options)
instanceCopyList := prepareInstanceCopies(instanceDigests, c.options)
c.Printf("Copying %d of %d images in list\n", len(instanceCopyList), len(instanceDigests))
for i, instance := range instanceCopyList {
// Update instances to be edited by their `ListOperation` and
@ -129,17 +128,18 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList))
c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList))
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest)
updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceCopyList[i].sourceDigest)
updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{requireCompressionFormatMatch: false})
if err != nil {
return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err)
}
// Record the result of a possible conversion here.
instanceEdits = append(instanceEdits, internalManifest.ListEdit{
ListOperation: internalManifest.ListOpUpdate,
UpdateOldDigest: instance.sourceDigest,
UpdateDigest: updatedManifestDigest,
UpdateSize: int64(len(updatedManifest)),
UpdateMediaType: updatedManifestType})
ListOperation: internalManifest.ListOpUpdate,
UpdateOldDigest: instance.sourceDigest,
UpdateDigest: updated.manifestDigest,
UpdateSize: int64(len(updated.manifest)),
UpdateCompressionAlgorithms: updated.compressionAlgorithms,
UpdateMediaType: updated.manifestMIMEType})
default:
return nil, fmt.Errorf("copying image: invalid copy operation %d", instance.op)
}
@ -204,7 +204,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
}
// Sign the manifest list.
newSigs, err := c.createSignatures(ctx, manifestList, options.SignIdentity)
newSigs, err := c.createSignatures(ctx, manifestList, c.options.SignIdentity)
if err != nil {
return nil, err
}

View File

@ -84,6 +84,8 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.
),
mpb.AppendDecorators(
decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
decor.Name(" | "),
decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""),
),
)
}
@ -94,6 +96,9 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.
mpb.PrependDecorators(
decor.OnComplete(decor.Name(prefix), onComplete),
),
mpb.AppendDecorators(
decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""),
),
)
}
return &progressBar{

View File

@ -13,20 +13,20 @@ import (
"github.com/containers/image/v5/transports"
)
// setupSigners initializes c.signers based on options.
func (c *copier) setupSigners(options *Options) error {
c.signers = append(c.signers, options.Signers...)
// c.signersToClose is intentionally not updated with options.Signers.
// setupSigners initializes c.signers.
func (c *copier) setupSigners() error {
c.signers = append(c.signers, c.options.Signers...)
// c.signersToClose is intentionally not updated with c.options.Signers.
// We immediately append created signers to c.signers, and we rely on c.close() to clean them up; so we dont need
// to clean up any created signers on failure.
if options.SignBy != "" {
if c.options.SignBy != "" {
opts := []simplesigning.Option{
simplesigning.WithKeyFingerprint(options.SignBy),
simplesigning.WithKeyFingerprint(c.options.SignBy),
}
if options.SignPassphrase != "" {
opts = append(opts, simplesigning.WithPassphrase(options.SignPassphrase))
if c.options.SignPassphrase != "" {
opts = append(opts, simplesigning.WithPassphrase(c.options.SignPassphrase))
}
signer, err := simplesigning.NewSigner(opts...)
if err != nil {
@ -36,9 +36,9 @@ func (c *copier) setupSigners(options *Options) error {
c.signersToClose = append(c.signersToClose, signer)
}
if options.SignBySigstorePrivateKeyFile != "" {
if c.options.SignBySigstorePrivateKeyFile != "" {
signer, err := sigstore.NewSigner(
sigstore.WithPrivateKeyFile(options.SignBySigstorePrivateKeyFile, options.SignSigstorePrivateKeyPassphrase),
sigstore.WithPrivateKeyFile(c.options.SignBySigstorePrivateKeyFile, c.options.SignSigstorePrivateKeyPassphrase),
)
if err != nil {
return err
@ -50,13 +50,13 @@ func (c *copier) setupSigners(options *Options) error {
return nil
}
// sourceSignatures returns signatures from unparsedSource based on options,
// sourceSignatures returns signatures from unparsedSource,
// and verifies that they can be used (to avoid copying a large image when we
// can tell in advance that it would ultimately fail)
func (c *copier) sourceSignatures(ctx context.Context, unparsed private.UnparsedImage, options *Options,
func (c *copier) sourceSignatures(ctx context.Context, unparsed private.UnparsedImage,
gettingSignaturesMessage, checkingDestMessage string) ([]internalsig.Signature, error) {
var sigs []internalsig.Signature
if options.RemoveSignatures {
if c.options.RemoveSignatures {
sigs = []internalsig.Signature{}
} else {
c.Printf("%s\n", gettingSignaturesMessage)

View File

@ -18,7 +18,6 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
@ -30,40 +29,54 @@ import (
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
type imageCopier struct {
c *copier
manifestUpdates *types.ManifestUpdateOptions
src *image.SourcedImage
diffIDsAreNeeded bool
cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
canSubstituteBlobs bool
compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil.
compressionLevel *int
ociEncryptLayers *[]int
c *copier
manifestUpdates *types.ManifestUpdateOptions
src *image.SourcedImage
diffIDsAreNeeded bool
cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
canSubstituteBlobs bool
compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil.
compressionLevel *int
requireCompressionFormatMatch bool
}
// copySingleImage copies a single (non-manifest-list) image unparsedImage, using policyContext to validate
type copySingleImageOptions struct {
requireCompressionFormatMatch bool
compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil.
compressionLevel *int
}
// copySingleImageResult carries data produced by copySingleImage
type copySingleImageResult struct {
manifest []byte
manifestMIMEType string
manifestDigest digest.Digest
compressionAlgorithms []compressiontypes.Algorithm
}
// copySingleImage copies a single (non-manifest-list) image unparsedImage, using c.policyContext to validate
// source image admissibility.
func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest) (retManifest []byte, retManifestType string, retManifestDigest digest.Digest, retErr error) {
func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest, opts copySingleImageOptions) (copySingleImageResult, error) {
// The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list.
// Make sure we fail cleanly in such cases.
multiImage, err := isMultiImage(ctx, unparsedImage)
if err != nil {
// FIXME FIXME: How to name a reference for the sub-image?
return nil, "", "", fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err)
return copySingleImageResult{}, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err)
}
if multiImage {
return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
return copySingleImageResult{}, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
}
// Please keep this policy check BEFORE reading any other information about the image.
// (The multiImage check above only matches the MIME type, which we have received anyway.
// Actual parsing of anything should be deferred.)
if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
return nil, "", "", fmt.Errorf("Source image rejected: %w", err)
if allowed, err := c.policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
return copySingleImageResult{}, fmt.Errorf("Source image rejected: %w", err)
}
src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage)
src, err := image.FromUnparsedImage(ctx, c.options.SourceCtx, unparsedImage)
if err != nil {
return nil, "", "", fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err)
return copySingleImageResult{}, fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err)
}
// If the destination is a digested reference, make a note of that, determine what digest value we're
@ -75,33 +88,33 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
destIsDigestedReference = true
matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest())
if err != nil {
return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err)
return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err)
}
if !matches {
manifestList, _, err := unparsedToplevel.Manifest(ctx)
manifestList, _, err := c.unparsedToplevel.Manifest(ctx)
if err != nil {
return nil, "", "", fmt.Errorf("reading manifest from source image: %w", err)
return copySingleImageResult{}, fmt.Errorf("reading manifest from source image: %w", err)
}
matches, err = manifest.MatchesDigest(manifestList, digested.Digest())
if err != nil {
return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err)
return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err)
}
if !matches {
return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference")
return copySingleImageResult{}, errors.New("Digest of source image's manifest would not match destination reference")
}
}
}
}
if err := checkImageDestinationForCurrentRuntime(ctx, options.DestinationCtx, src, c.dest); err != nil {
return nil, "", "", err
if err := checkImageDestinationForCurrentRuntime(ctx, c.options.DestinationCtx, src, c.dest); err != nil {
return copySingleImageResult{}, err
}
sigs, err := c.sourceSignatures(ctx, src, options,
sigs, err := c.sourceSignatures(ctx, src,
"Getting image source signatures",
"Checking if image destination supports signatures")
if err != nil {
return nil, "", "", err
return copySingleImageResult{}, err
}
// Determine if we're allowed to modify the manifest.
@ -114,7 +127,7 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
if destIsDigestedReference {
cannotModifyManifestReason = "Destination specifies a digest"
}
if options.PreserveDigests {
if c.options.PreserveDigests {
cannotModifyManifestReason = "Instructed to preserve digests"
}
@ -123,13 +136,16 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}},
src: src,
// diffIDsAreNeeded is computed later
cannotModifyManifestReason: cannotModifyManifestReason,
ociEncryptLayers: options.OciEncryptLayers,
cannotModifyManifestReason: cannotModifyManifestReason,
requireCompressionFormatMatch: opts.requireCompressionFormatMatch,
}
if options.DestinationCtx != nil {
if opts.compressionFormat != nil {
ic.compressionFormat = opts.compressionFormat
ic.compressionLevel = opts.compressionLevel
} else if c.options.DestinationCtx != nil {
// Note that compressionFormat and compressionLevel can be nil.
ic.compressionFormat = options.DestinationCtx.CompressionFormat
ic.compressionLevel = options.DestinationCtx.CompressionLevel
ic.compressionFormat = c.options.DestinationCtx.CompressionFormat
ic.compressionLevel = c.options.DestinationCtx.CompressionLevel
}
// Decide whether we can substitute blobs with semantic equivalents:
// - Dont do that if we cant modify the manifest at all
@ -142,20 +158,20 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && len(c.signers) == 0
if err := ic.updateEmbeddedDockerReference(); err != nil {
return nil, "", "", err
return copySingleImageResult{}, err
}
destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil
destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig != nil) || c.options.OciEncryptLayers != nil
manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{
srcMIMEType: ic.src.ManifestMIMEType,
destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(),
forceManifestMIMEType: options.ForceManifestMIMEType,
forceManifestMIMEType: c.options.ForceManifestMIMEType,
requiresOCIEncryption: destRequiresOciEncryption,
cannotModifyManifestReason: ic.cannotModifyManifestReason,
})
if err != nil {
return nil, "", "", err
return copySingleImageResult{}, err
}
// We set up this part of ic.manifestUpdates quite early, not just around the
// code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code
@ -169,27 +185,28 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
// If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal
if options.OptimizeDestinationImageAlreadyExists {
if c.options.OptimizeDestinationImageAlreadyExists {
shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
noPendingManifestUpdates := ic.noPendingManifestUpdates()
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates)
if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates {
isSrcDestManifestEqual, retManifest, retManifestType, retManifestDigest, err := compareImageDestinationManifestEqual(ctx, options, src, targetInstance, c.dest)
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t, compression match required for resuing blobs=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates, opts.requireCompressionFormatMatch)
if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates && !ic.requireCompressionFormatMatch {
matchedResult, err := ic.compareImageDestinationManifestEqual(ctx, targetInstance)
if err != nil {
logrus.Warnf("Failed to compare destination image manifest: %v", err)
return nil, "", "", err
return copySingleImageResult{}, err
}
if isSrcDestManifestEqual {
if matchedResult != nil {
c.Printf("Skipping: image already present at destination\n")
return retManifest, retManifestType, retManifestDigest, nil
return *matchedResult, nil
}
}
}
if err := ic.copyLayers(ctx); err != nil {
return nil, "", "", err
compressionAlgos, err := ic.copyLayers(ctx)
if err != nil {
return copySingleImageResult{}, err
}
// With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
@ -197,8 +214,12 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
// without actually trying to upload something and getting a types.ManifestTypeRejectedError.
// So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if
// we're altering how they're compressed. If the process succeeds, fine…
manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
retManifestType = manifestConversionPlan.preferredMIMEType
manifestBytes, manifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
wipResult := copySingleImageResult{
manifest: manifestBytes,
manifestMIMEType: manifestConversionPlan.preferredMIMEType,
manifestDigest: manifestDigest,
}
if err != nil {
logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err)
// … if it fails, and the failure is either because the manifest is rejected by the registry, or
@ -213,14 +234,14 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
// We dont have other options.
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
// Dont bother the user with MIME types if we have no choice.
return nil, "", "", err
return copySingleImageResult{}, err
}
// If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType.
// So if we are here, we will definitely be trying to convert the manifest.
// With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason,
// so lets bail out early and with a better error message.
if ic.cannotModifyManifestReason != "" {
return nil, "", "", fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err)
return copySingleImageResult{}, fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err)
}
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
@ -236,34 +257,37 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
}
// We have successfully uploaded a manifest.
manifestBytes = attemptedManifest
retManifestDigest = attemptedManifestDigest
retManifestType = manifestMIMEType
wipResult = copySingleImageResult{
manifest: attemptedManifest,
manifestMIMEType: manifestMIMEType,
manifestDigest: attemptedManifestDigest,
}
errs = nil // Mark this as a success so that we don't abort below.
break
}
if errs != nil {
return nil, "", "", fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
return copySingleImageResult{}, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
}
}
if targetInstance != nil {
targetInstance = &retManifestDigest
targetInstance = &wipResult.manifestDigest
}
newSigs, err := c.createSignatures(ctx, manifestBytes, options.SignIdentity)
newSigs, err := c.createSignatures(ctx, wipResult.manifest, c.options.SignIdentity)
if err != nil {
return nil, "", "", err
return copySingleImageResult{}, err
}
sigs = append(sigs, newSigs...)
if len(sigs) > 0 {
c.Printf("Storing signatures\n")
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil {
return nil, "", "", fmt.Errorf("writing signatures: %w", err)
return copySingleImageResult{}, fmt.Errorf("writing signatures: %w", err)
}
}
return manifestBytes, retManifestType, retManifestDigest, nil
wipResult.compressionAlgorithms = compressionAlgos
res := wipResult // We are done
return res, nil
}
// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary.
@ -323,52 +347,69 @@ func (ic *imageCopier) noPendingManifestUpdates() bool {
return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly})
}
// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the
// (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal.
func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src *image.SourcedImage, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
srcManifestDigest, err := manifest.Digest(src.ManifestBlob)
// compareImageDestinationManifestEqual compares the source and destination image manifests (reading the manifest from the
// (possibly remote) destination). If they are equal, it returns a full copySingleImageResult, nil otherwise.
func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, targetInstance *digest.Digest) (*copySingleImageResult, error) {
srcManifestDigest, err := manifest.Digest(ic.src.ManifestBlob)
if err != nil {
return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err)
return nil, fmt.Errorf("calculating manifest digest: %w", err)
}
destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx)
destImageSource, err := ic.c.dest.Reference().NewImageSource(ctx, ic.c.options.DestinationCtx)
if err != nil {
logrus.Debugf("Unable to create destination image %s source: %v", dest.Reference(), err)
return false, nil, "", "", nil
logrus.Debugf("Unable to create destination image %s source: %v", ic.c.dest.Reference(), err)
return nil, nil
}
defer destImageSource.Close()
destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance)
if err != nil {
logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err)
return false, nil, "", "", nil
return nil, nil
}
destManifestDigest, err := manifest.Digest(destManifest)
if err != nil {
return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err)
return nil, fmt.Errorf("calculating manifest digest: %w", err)
}
logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest)
if srcManifestDigest != destManifestDigest {
return false, nil, "", "", nil
return nil, nil
}
compressionAlgos := set.New[string]()
for _, srcInfo := range ic.src.LayerInfos() {
compression := compressionAlgorithmFromMIMEType(srcInfo)
compressionAlgos.Add(compression.Name())
}
algos, err := algorithmsByNames(compressionAlgos.Values())
if err != nil {
return nil, err
}
// Destination and source manifests, types and digests should all be equivalent
return true, destManifest, destManifestType, destManifestDigest, nil
return &copySingleImageResult{
manifest: destManifest,
manifestMIMEType: destManifestType,
manifestDigest: srcManifestDigest,
compressionAlgorithms: algos,
}, nil
}
// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "".
func (ic *imageCopier) copyLayers(ctx context.Context) error {
func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algorithm, error) {
srcInfos := ic.src.LayerInfos()
numLayers := len(srcInfos)
updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx)
if err != nil {
return err
return nil, err
}
srcInfosUpdated := false
if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
if ic.cannotModifyManifestReason != "" {
return fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason)
return nil, fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason)
}
srcInfos = updatedSrcInfos
srcInfosUpdated = true
@ -384,7 +425,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
// layer is empty.
man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType)
if err != nil {
return err
return nil, err
}
manifestLayerInfos := man.LayerInfos()
@ -396,7 +437,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
defer copyGroup.Done()
cld := copyLayerData{}
if !ic.c.downloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
if !ic.c.options.DownloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
// DiffIDs are, currently, needed only when converting from schema1.
// In which case src.LayerInfos will not have URLs because schema1
// does not support them.
@ -415,10 +456,10 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
// Decide which layers to encrypt
layersToEncrypt := set.New[int]()
var encryptAll bool
if ic.ociEncryptLayers != nil {
encryptAll = len(*ic.ociEncryptLayers) == 0
if ic.c.options.OciEncryptLayers != nil {
encryptAll = len(*ic.c.options.OciEncryptLayers) == 0
totalLayers := len(srcInfos)
for _, l := range *ic.ociEncryptLayers {
for _, l := range *ic.c.options.OciEncryptLayers {
// if layer is negative, it is reverse indexed.
layersToEncrypt.Add((totalLayers + l) % totalLayers)
}
@ -450,14 +491,18 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
// A call to copyGroup.Wait() is done at this point by the defer above.
return nil
}(); err != nil {
return err
return nil, err
}
compressionAlgos := set.New[string]()
destInfos := make([]types.BlobInfo, numLayers)
diffIDs := make([]digest.Digest, numLayers)
for i, cld := range data {
if cld.err != nil {
return cld.err
return nil, cld.err
}
if cld.destInfo.CompressionAlgorithm != nil {
compressionAlgos.Add(cld.destInfo.CompressionAlgorithm.Name())
}
destInfos[i] = cld.destInfo
diffIDs[i] = cld.diffID
@ -472,7 +517,11 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) {
ic.manifestUpdates.LayerInfos = destInfos
}
return nil
algos, err := algorithmsByNames(compressionAlgos.Values())
if err != nil {
return nil, err
}
return algos, nil
}
// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields)
@ -577,6 +626,19 @@ type diffIDResult struct {
err error
}
func compressionAlgorithmFromMIMEType(srcInfo types.BlobInfo) *compressiontypes.Algorithm {
// This MIME type → compression mapping belongs in manifest-specific code in our manifest
// package (but we should preferably replace/change UpdatedImage instead of productizing
// this workaround).
switch srcInfo.MediaType {
case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip:
return &compression.Gzip
case imgspecv1.MediaTypeImageLayerZstd:
return &compression.Zstd
}
return nil
}
// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
// srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil.
@ -588,17 +650,8 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// which uses the compression information to compute the updated MediaType values.
// (Sadly UpdatedImage() is documented to not update MediaTypes from
// ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.)
//
// This MIME type → compression mapping belongs in manifest-specific code in our manifest
// package (but we should preferably replace/change UpdatedImage instead of productizing
// this workaround).
if srcInfo.CompressionAlgorithm == nil {
switch srcInfo.MediaType {
case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip:
srcInfo.CompressionAlgorithm = &compression.Gzip
case imgspecv1.MediaTypeImageLayerZstd:
srcInfo.CompressionAlgorithm = &compression.Zstd
}
srcInfo.CompressionAlgorithm = compressionAlgorithmFromMIMEType(srcInfo)
}
ic.c.printCopyInfo("blob", srcInfo)
@ -608,7 +661,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// When encrypting to decrypting, only use the simple code path. We might be able to optimize more
// (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again),
// but its not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, lets not.
encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil)
encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.options.OciDecryptConfig != nil)
canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting
// Dont read the layer from the source if we already have the blob, and optimizations are acceptable.
@ -623,12 +676,20 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob.
// Fixing that will probably require passing more information to TryReusingBlob() than the current version of
// the ImageDestination interface lets us pass in.
var requiredCompression *compressiontypes.Algorithm
var originalCompression *compressiontypes.Algorithm
if ic.requireCompressionFormatMatch {
requiredCompression = ic.compressionFormat
originalCompression = srcInfo.CompressionAlgorithm
}
reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
Cache: ic.c.blobInfoCache,
CanSubstitute: canSubstitute,
EmptyLayer: emptyLayer,
LayerIndex: &layerIndex,
SrcRef: srcRef,
Cache: ic.c.blobInfoCache,
CanSubstitute: canSubstitute,
EmptyLayer: emptyLayer,
LayerIndex: &layerIndex,
SrcRef: srcRef,
RequiredCompression: requiredCompression,
OriginalCompression: originalCompression,
})
if err != nil {
return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err)
@ -642,8 +703,8 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
}()
// Throw an event that the layer has been skipped
if ic.c.progress != nil && ic.c.progressInterval > 0 {
ic.c.progress <- types.ProgressProperties{
if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 {
ic.c.options.Progress <- types.ProgressProperties{
Event: types.ProgressEventSkipped,
Artifact: srcInfo,
}
@ -818,3 +879,16 @@ func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorF
return digest.Canonical.FromReader(stream)
}
// algorithmsByNames returns slice of Algorithms from slice of Algorithm Names
func algorithmsByNames(names []string) ([]compressiontypes.Algorithm, error) {
result := []compressiontypes.Algorithm{}
for _, name := range names {
algo, err := compression.AlgorithmByName(name)
if err != nil {
return nil, err
}
result = append(result, algo)
}
return result, nil
}

View File

@ -190,6 +190,9 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalBlobMatchesRequiredCompression(options) {
return false, private.ReusedBlob{}, nil
}
if info.Digest == "" {
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with unknown digest")
}

View File

@ -321,13 +321,21 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
}
// First, check whether the blob happens to already exist at the destination.
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
if err != nil {
return false, private.ReusedBlob{}, err
}
if haveBlob {
return true, reusedInfo, nil
if impl.OriginalBlobMatchesRequiredCompression(options) {
// First, check whether the blob happens to already exist at the destination.
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
if err != nil {
return false, private.ReusedBlob{}, err
}
if haveBlob {
return true, reusedInfo, nil
}
} else {
requiredCompression := "nil"
if options.OriginalCompression != nil {
requiredCompression = options.OriginalCompression.Name()
}
logrus.Debugf("Ignoring exact blob match case due to compression mismatch ( %s vs %s )", options.RequiredCompression.Name(), requiredCompression)
}
// Then try reusing blobs from other locations.
@ -338,6 +346,19 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
continue
}
compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
if err != nil {
logrus.Debugf("OperationAndAlgorithmForCompressor Failed: %v", err)
continue
}
if !impl.BlobMatchesRequiredCompression(options, compressionAlgorithm) {
requiredCompression := "nil"
if compressionAlgorithm != nil {
requiredCompression = compressionAlgorithm.Name()
}
logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name())
continue
}
if candidate.CompressorName != blobinfocache.Uncompressed {
logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
} else {
@ -388,12 +409,6 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
if err != nil {
logrus.Debugf("... Failed: %v", err)
continue
}
return true, private.ReusedBlob{
Digest: candidate.Digest,
Size: size,

View File

@ -129,6 +129,9 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader,
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalBlobMatchesRequiredCompression(options) {
return false, private.ReusedBlob{}, nil
}
if err := d.archive.lock(); err != nil {
return false, private.ReusedBlob{}, err
}

View File

@ -57,7 +57,7 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
// The caller should call .Close() on the returned archive when done.
func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) {
// Save inputStream to a temporary file
tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
tarCopyFile, err := tmpdir.CreateBigFileTemp(sys, "docker-tar")
if err != nil {
return nil, fmt.Errorf("creating temporary file: %w", err)
}

View File

@ -0,0 +1,20 @@
package impl
import (
"github.com/containers/image/v5/internal/private"
compression "github.com/containers/image/v5/pkg/compression/types"
)
// BlobMatchesRequiredCompression validates if compression is required by the caller while selecting a blob, if it is required
// then function performs a match against the compression requested by the caller and compression of existing blob
// (which can be nil to represent uncompressed or unknown)
func BlobMatchesRequiredCompression(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool {
if options.RequiredCompression == nil {
return true // no requirement imposed
}
return candidateCompression != nil && (options.RequiredCompression.Name() == candidateCompression.Name())
}
func OriginalBlobMatchesRequiredCompression(opts private.TryReusingBlobOptions) bool {
return BlobMatchesRequiredCompression(opts, opts.OriginalCompression)
}

View File

@ -64,6 +64,9 @@ func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inpu
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if options.RequiredCompression != nil {
return false, private.ReusedBlob{}, nil
}
reused, blob, err := w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute)
if !reused || err != nil {
return reused, private.ReusedBlob{}, err

View File

@ -5,6 +5,7 @@ import (
"fmt"
platform "github.com/containers/image/v5/internal/pkg/platform"
compression "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@ -57,11 +58,20 @@ func (list *Schema2ListPublic) Instances() []digest.Digest {
func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
for _, manifest := range list.Manifests {
if manifest.Digest == instanceDigest {
return ListUpdate{
ret := ListUpdate{
Digest: manifest.Digest,
Size: manifest.Size,
MediaType: manifest.MediaType,
}, nil
}
ret.ReadOnly.CompressionAlgorithmNames = []string{compression.GzipAlgorithmName}
ret.ReadOnly.Platform = &imgspecv1.Platform{
OS: manifest.Platform.OS,
Architecture: manifest.Platform.Architecture,
OSVersion: manifest.Platform.OSVersion,
OSFeatures: manifest.Platform.OSFeatures,
Variant: manifest.Platform.Variant,
}
return ret, nil
}
}
return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)

View File

@ -68,6 +68,12 @@ type ListUpdate struct {
Digest digest.Digest
Size int64
MediaType string
// ReadOnly fields: may be set by Instance(), ignored by UpdateInstance()
ReadOnly struct {
Platform *imgspecv1.Platform
Annotations map[string]string
CompressionAlgorithmNames []string
}
}
type ListOp int

View File

@ -53,11 +53,15 @@ func (index *OCI1IndexPublic) Instances() []digest.Digest {
func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
for _, manifest := range index.Manifests {
if manifest.Digest == instanceDigest {
return ListUpdate{
ret := ListUpdate{
Digest: manifest.Digest,
Size: manifest.Size,
MediaType: manifest.MediaType,
}, nil
}
ret.ReadOnly.Platform = manifest.Platform
ret.ReadOnly.Annotations = manifest.Annotations
ret.ReadOnly.CompressionAlgorithmNames = annotationsToCompressionAlgorithmNames(manifest.Annotations)
return ret, nil
}
}
return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
@ -78,14 +82,29 @@ func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error {
return index.editInstances(editInstances)
}
func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, annotationsMap map[string]string) {
func annotationsToCompressionAlgorithmNames(annotations map[string]string) []string {
result := make([]string, 0, 1)
if annotations[OCI1InstanceAnnotationCompressionZSTD] == OCI1InstanceAnnotationCompressionZSTDValue {
result = append(result, compression.ZstdAlgorithmName)
}
// No compression was detected, hence assume instance has default compression `Gzip`
if len(result) == 0 {
result = append(result, compression.GzipAlgorithmName)
}
return result
}
func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, annotationsMap *map[string]string) {
// TODO: This should also delete the algorithm if map already contains an algorithm and compressionAlgorithm
// list has a different algorithm. To do that, we would need to modify the callers to always provide a reliable
// and full compressionAlghorithms list.
if *annotationsMap == nil && len(compressionAlgorithms) > 0 {
*annotationsMap = map[string]string{}
}
for _, algo := range compressionAlgorithms {
switch algo.Name() {
case compression.ZstdAlgorithmName:
annotationsMap[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue
(*annotationsMap)[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue
default:
continue
}
@ -130,13 +149,13 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error {
maps.Copy(index.Manifests[targetIndex].Annotations, editInstance.UpdateAnnotations)
}
}
addCompressionAnnotations(editInstance.UpdateCompressionAlgorithms, index.Manifests[targetIndex].Annotations)
addCompressionAnnotations(editInstance.UpdateCompressionAlgorithms, &index.Manifests[targetIndex].Annotations)
case ListOpAdd:
annotations := map[string]string{}
if editInstance.AddAnnotations != nil {
annotations = maps.Clone(editInstance.AddAnnotations)
}
addCompressionAnnotations(editInstance.AddCompressionAlgorithms, annotations)
addCompressionAnnotations(editInstance.AddCompressionAlgorithms, &annotations)
addedEntries = append(addedEntries, imgspecv1.Descriptor{
MediaType: editInstance.AddMediaType,
Size: editInstance.AddSize,

View File

@ -112,10 +112,11 @@ type TryReusingBlobOptions struct {
// Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers
// if they use internal/imagedestination/impl.Compat;
// in that case, they will all be consistently zero-valued.
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
SrcRef reference.Named // A reference to the source image that contains the input blob.
RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go
OriginalCompression *compression.Algorithm // Must be set if RequiredCompression is set; can be set to nil to indicate “uncompressed” or “unknown”.
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
SrcRef reference.Named // A reference to the source image that contains the input blob.
}
// ReusedBlob is information about a blob reused in a destination.

View File

@ -15,7 +15,7 @@ import (
// It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file.
// If an error occurs, inputInfo is not modified.
func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) {
diskBlob, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob")
diskBlob, err := tmpdir.CreateBigFileTemp(sys, "stream-blob")
if err != nil {
return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err)
}

View File

@ -17,10 +17,12 @@ var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles
// DO NOT change this, instead see unixTempDirForBigFiles above.
const builtinUnixTempDirForBigFiles = "/var/tmp"
const prefix = "container_images_"
// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files.
// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp
// which on systemd based systems could be the unsuitable tmpfs filesystem.
func TemporaryDirectoryForBigFiles(sys *types.SystemContext) string {
func temporaryDirectoryForBigFiles(sys *types.SystemContext) string {
if sys != nil && sys.BigFilesTemporaryDir != "" {
return sys.BigFilesTemporaryDir
}
@ -32,3 +34,11 @@ func TemporaryDirectoryForBigFiles(sys *types.SystemContext) string {
}
return temporaryDirectoryForBigFiles
}
func CreateBigFileTemp(sys *types.SystemContext, name string) (*os.File, error) {
return os.CreateTemp(temporaryDirectoryForBigFiles(sys), prefix+name)
}
func MkDirBigFileTemp(sys *types.SystemContext, name string) (string, error) {
return os.MkdirTemp(temporaryDirectoryForBigFiles(sys), prefix+name)
}

View File

@ -156,7 +156,7 @@ func (t *tempDirOCIRef) deleteTempDir() error {
// createOCIRef creates the oci reference of the image
// If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files
func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) {
dir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
dir, err := tmpdir.MkDirBigFileTemp(sys, "oci")
if err != nil {
return tempDirOCIRef{}, fmt.Errorf("creating temp directory: %w", err)
}

View File

@ -172,6 +172,9 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalBlobMatchesRequiredCompression(options) {
return false, private.ReusedBlob{}, nil
}
if info.Digest == "" {
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
}

View File

@ -65,6 +65,10 @@ func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) {
}, nil
}
func (c *openshiftClient) close() {
c.httpClient.CloseIdleConnections()
}
// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object.
func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) {
requestURL := *c.baseURL

View File

@ -71,7 +71,9 @@ func (d *openshiftImageDestination) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *openshiftImageDestination) Close() error {
return d.docker.Close()
err := d.docker.Close()
d.client.close()
return err
}
func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string {

View File

@ -60,14 +60,15 @@ func (s *openshiftImageSource) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageSource, if any.
func (s *openshiftImageSource) Close() error {
var err error
if s.docker != nil {
err := s.docker.Close()
err = s.docker.Close()
s.docker = nil
return err
}
return nil
s.client.close()
return err
}
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).

View File

@ -335,6 +335,9 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalBlobMatchesRequiredCompression(options) {
return false, private.ReusedBlob{}, nil
}
if d.repo == nil {
repo, err := openRepo(d.ref.repo)
if err != nil {

View File

@ -237,6 +237,9 @@ func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *blobCacheDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalBlobMatchesRequiredCompression(options) {
return false, private.ReusedBlob{}, nil
}
present, reusedInfo, err := d.destination.TryReusingBlobWithOptions(ctx, info, options)
if err != nil || present {
return present, reusedInfo, err

View File

@ -519,11 +519,12 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool,
if sys.LegacyFormatAuthFilePath != "" {
return authPath{path: sys.LegacyFormatAuthFilePath, legacyFormat: true}, true, nil
}
if sys.RootForImplicitAbsolutePaths != "" {
// Note: RootForImplicitAbsolutePaths should not affect paths starting with $HOME
if sys.RootForImplicitAbsolutePaths != "" && goOS == "linux" {
return newAuthPathDefault(filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()))), false, nil
}
}
if goOS == "windows" || goOS == "darwin" {
if goOS != "linux" {
return newAuthPathDefault(filepath.Join(homedir.Get(), nonLinuxAuthFilePath)), false, nil
}

View File

@ -73,7 +73,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere
_ = sifImg.UnloadContainer()
}()
workDir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif")
workDir, err := tmpdir.MkDirBigFileTemp(sys, "sif")
if err != nil {
return nil, fmt.Errorf("creating temp directory: %w", err)
}

View File

@ -95,7 +95,7 @@ type addedLayerInfo struct {
// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
// it's time to Commit() the image
func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) {
directory, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage")
directory, err := tmpdir.MkDirBigFileTemp(sys, "storage")
if err != nil {
return nil, fmt.Errorf("creating a temporary directory: %w", err)
}
@ -307,6 +307,9 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalBlobMatchesRequiredCompression(options) {
return false, private.ReusedBlob{}, nil
}
reused, info, err := s.tryReusingBlobAsPending(blobinfo.Digest, blobinfo.Size, &options)
if err != nil || !reused || options.LayerIndex == nil {
return reused, info, err

View File

@ -124,7 +124,7 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c
}
defer rc.Close()
tmpFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "")
tmpFile, err := tmpdir.CreateBigFileTemp(s.systemContext, "")
if err != nil {
return nil, 0, err
}

View File

@ -6,12 +6,12 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 26
VersionMinor = 27
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 1
VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""
VersionDev = "-dev"
)
// Version is the specification version that the package types support.

View File

@ -0,0 +1,82 @@
package lockfile
import (
"bytes"
cryptorand "crypto/rand"
"encoding/binary"
"os"
"sync/atomic"
"time"
)
// LastWrite is an opaque identifier of the last write to some *LockFile.
// It can be used by users of a *LockFile to determine if the lock indicates changes
// since the last check.
//
// Never construct a LastWrite manually; only accept it from *LockFile methods, and pass it back.
type LastWrite struct {
// Never modify fields of a LastWrite object; it has value semantics.
state []byte // Contents of the lock file.
}
var lastWriterIDCounter uint64 // Private state for newLastWriterID
const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID)
// newLastWrite returns a new "last write" ID.
// The value must be different on every call, and also differ from values
// generated by other processes.
func newLastWrite() LastWrite {
// The ID is (PID, time, per-process counter, random)
// PID + time represents both a unique process across reboots,
// and a specific time within the process; the per-process counter
// is an extra safeguard for in-process concurrency.
// The random part disambiguates across process namespaces
// (where PID values might collide), serves as a general-purpose
// extra safety, _and_ is used to pad the output to lastWriterIDSize,
// because other versions of this code exist and they don't work
// efficiently if the size of the value changes.
pid := os.Getpid()
tm := time.Now().UnixNano()
counter := atomic.AddUint64(&lastWriterIDCounter, 1)
res := make([]byte, lastWriterIDSize)
binary.LittleEndian.PutUint64(res[0:8], uint64(tm))
binary.LittleEndian.PutUint64(res[8:16], counter)
binary.LittleEndian.PutUint32(res[16:20], uint32(pid))
if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 {
panic(err) // This shouldn't happen
}
return LastWrite{
state: res,
}
}
// serialize returns bytes to write to the lock file to represent the specified write.
func (lw LastWrite) serialize() []byte {
if lw.state == nil {
panic("LastWrite.serialize on an uninitialized object")
}
return lw.state
}
// Equals returns true if lw matches other
func (lw LastWrite) equals(other LastWrite) bool {
if lw.state == nil {
panic("LastWrite.equals on an uninitialized object")
}
if other.state == nil {
panic("LastWrite.equals with an uninitialized counterparty")
}
return bytes.Equal(lw.state, other.state)
}
// newLastWriteFromData returns a LastWrite corresponding to data that came from a previous LastWrite.serialize
func newLastWriteFromData(serialized []byte) LastWrite {
if serialized == nil {
panic("newLastWriteFromData with nil data")
}
return LastWrite{
state: serialized,
}
}

View File

@ -2,6 +2,7 @@ package lockfile
import (
"fmt"
"os"
"path/filepath"
"sync"
"time"
@ -54,6 +55,38 @@ type Locker interface {
AssertLockedForWriting()
}
type lockType byte
const (
readLock lockType = iota
writeLock
)
// LockFile represents a file lock where the file is used to cache an
// identifier of the last party that made changes to whatever's being protected
// by the lock.
//
// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead.
type LockFile struct {
// The following fields are only set when constructing *LockFile, and must never be modified afterwards.
// They are safe to access without any other locking.
file string
ro bool
// rwMutex serializes concurrent reader-writer acquisitions in the same process space
rwMutex *sync.RWMutex
// stateMutex is used to synchronize concurrent accesses to the state below
stateMutex *sync.Mutex
counter int64
lw LastWrite // A global value valid as of the last .Touch() or .Modified()
lockType lockType
locked bool
// The following fields are only modified on transitions between counter == 0 / counter != 0.
// Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking.
// In other cases, they need to be protected using stateMutex.
fd fileHandle
}
var (
lockFiles map[string]*LockFile
lockFilesLock sync.Mutex
@ -91,6 +124,156 @@ func GetROLockfile(path string) (Locker, error) {
return GetROLockFile(path)
}
// Lock locks the lockfile as a writer. Panic if the lock is a read-only one.
func (l *LockFile) Lock() {
if l.ro {
panic("can't take write lock on read-only lock file")
} else {
l.lock(writeLock)
}
}
// LockRead locks the lockfile as a reader.
func (l *LockFile) RLock() {
l.lock(readLock)
}
// Unlock unlocks the lockfile.
func (l *LockFile) Unlock() {
l.stateMutex.Lock()
if !l.locked {
// Panic when unlocking an unlocked lock. That's a violation
// of the lock semantics and will reveal such.
panic("calling Unlock on unlocked lock")
}
l.counter--
if l.counter < 0 {
// Panic when the counter is negative. There is no way we can
// recover from a corrupted lock and we need to protect the
// storage from corruption.
panic(fmt.Sprintf("lock %q has been unlocked too often", l.file))
}
if l.counter == 0 {
// We should only release the lock when the counter is 0 to
// avoid releasing read-locks too early; a given process may
// acquire a read lock multiple times.
l.locked = false
// Close the file descriptor on the last unlock, releasing the
// file lock.
unlockAndCloseHandle(l.fd)
}
if l.lockType == readLock {
l.rwMutex.RUnlock()
} else {
l.rwMutex.Unlock()
}
l.stateMutex.Unlock()
}
func (l *LockFile) AssertLocked() {
// DO NOT provide a variant that returns the value of l.locked.
//
// If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and
// we cant tell the difference.
//
// Hence, this “AssertLocked” method, which exists only for sanity checks.
// Dont even bother with l.stateMutex: The caller is expected to hold the lock, and in that case l.locked is constant true
// with no possible writers.
// If the caller does not hold the lock, we are violating the locking/memory model anyway, and accessing the data
// without the lock is more efficient for callers, and potentially more visible to lock analysers for incorrect callers.
if !l.locked {
panic("internal error: lock is not held by the expected owner")
}
}
func (l *LockFile) AssertLockedForWriting() {
// DO NOT provide a variant that returns the current lock state.
//
// The same caveats as for AssertLocked apply equally.
l.AssertLocked()
// Like AssertLocked, dont even bother with l.stateMutex.
if l.lockType == readLock {
panic("internal error: lock is not held for writing")
}
}
// ModifiedSince checks if the lock has been changed since a provided LastWrite value,
// and returns the one to record instead.
//
// If ModifiedSince reports no modification, the previous LastWrite value
// is still valid and can continue to be used.
//
// If this function fails, the LastWriter value of the lock is indeterminate;
// the caller should fail and keep using the previously-recorded LastWrite value,
// so that it continues failing until the situation is resolved. Similarly,
// it should only update the recorded LastWrite value after processing the update:
//
// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite)
// if err != nil { /* fail */ }
// state.lastWrite = lw2
// if modified {
// if err := reload(); err != nil { /* fail */ }
// state.lastWrite = lw2
// }
//
// The caller must hold the lock (for reading or writing).
func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) {
l.AssertLocked()
currentLW, err := l.GetLastWrite()
if err != nil {
return LastWrite{}, false, err
}
modified := !previous.equals(currentLW)
return currentLW, modified, nil
}
// Modified indicates if the lockfile has been updated since the last time it
// was loaded.
// NOTE: Unlike ModifiedSince, this returns true the first time it is called on a *LockFile.
// Callers cannot, in general, rely on this, because that might have happened for some other
// owner of the same *LockFile who created it previously.
//
// Deprecated: Use *LockFile.ModifiedSince.
func (l *LockFile) Modified() (bool, error) {
l.stateMutex.Lock()
if !l.locked {
panic("attempted to check last-writer in lockfile without locking it first")
}
defer l.stateMutex.Unlock()
oldLW := l.lw
// Note that this is called with stateMutex held; thats fine because ModifiedSince doesnt need to lock it.
currentLW, modified, err := l.ModifiedSince(oldLW)
if err != nil {
return true, err
}
l.lw = currentLW
return modified, nil
}
// Touch updates the lock file with to record that the current lock holder has modified the lock-protected data.
//
// Deprecated: Use *LockFile.RecordWrite.
func (l *LockFile) Touch() error {
lw, err := l.RecordWrite()
if err != nil {
return err
}
l.stateMutex.Lock()
if !l.locked || (l.lockType == readLock) {
panic("attempted to update last-writer in lockfile without the write lock")
}
defer l.stateMutex.Unlock()
l.lw = lw
return nil
}
// IsReadWrite indicates if the lock file is a read-write lock.
func (l *LockFile) IsReadWrite() bool {
return !l.ro
}
// getLockFile returns a *LockFile object, possibly (depending on the platform)
// working inter-process, and associated with the specified path.
//
@ -128,3 +311,99 @@ func getLockfile(path string, ro bool) (*LockFile, error) {
lockFiles[cleanPath] = lockFile
return lockFile, nil
}
// createLockFileForPath returns new *LockFile object, possibly (depending on the platform)
// working inter-process and associated with the specified path.
//
// This function will be called at most once for each path value within a single process.
//
// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation.
//
// WARNING:
// - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive.
func createLockFileForPath(path string, ro bool) (*LockFile, error) {
// Check if we can open the lock.
fd, err := openLock(path, ro)
if err != nil {
return nil, err
}
unlockAndCloseHandle(fd)
lType := writeLock
if ro {
lType = readLock
}
return &LockFile{
file: path,
ro: ro,
rwMutex: &sync.RWMutex{},
stateMutex: &sync.Mutex{},
lw: newLastWrite(), // For compatibility, the first call of .Modified() will always report a change.
lockType: lType,
locked: false,
}, nil
}
// openLock opens the file at path and returns the corresponding file
// descriptor. The path is opened either read-only or read-write,
// depending on the value of ro argument.
//
// openLock will create the file and its parent directories,
// if necessary.
func openLock(path string, ro bool) (fd fileHandle, err error) {
flags := os.O_CREATE
if ro {
flags |= os.O_RDONLY
} else {
flags |= os.O_RDWR
}
fd, err = openHandle(path, flags)
if err == nil {
return fd, nil
}
// the directory of the lockfile seems to be removed, try to create it
if os.IsNotExist(err) {
if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
return fd, fmt.Errorf("creating lock file directory: %w", err)
}
return openLock(path, ro)
}
return fd, &os.PathError{Op: "open", Path: path, Err: err}
}
// lock locks the lockfile via syscall based on the specified type and
// command.
func (l *LockFile) lock(lType lockType) {
if lType == readLock {
l.rwMutex.RLock()
} else {
l.rwMutex.Lock()
}
l.stateMutex.Lock()
defer l.stateMutex.Unlock()
if l.counter == 0 {
// If we're the first reference on the lock, we need to open the file again.
fd, err := openLock(l.file, l.ro)
if err != nil {
panic(err)
}
l.fd = fd
// Optimization: only use the (expensive) syscall when
// the counter is 0. In this case, we're either the first
// reader lock or a writer lock.
lockHandle(l.fd, lType)
}
l.lockType = lType
l.locked = true
l.counter++
}

View File

@ -4,297 +4,13 @@
package lockfile
import (
"bytes"
cryptorand "crypto/rand"
"encoding/binary"
"fmt"
"os"
"path/filepath"
"sync"
"sync/atomic"
"time"
"github.com/containers/storage/pkg/system"
"golang.org/x/sys/unix"
)
// *LockFile represents a file lock where the file is used to cache an
// identifier of the last party that made changes to whatever's being protected
// by the lock.
//
// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead.
type LockFile struct {
// The following fields are only set when constructing *LockFile, and must never be modified afterwards.
// They are safe to access without any other locking.
file string
ro bool
// rwMutex serializes concurrent reader-writer acquisitions in the same process space
rwMutex *sync.RWMutex
// stateMutex is used to synchronize concurrent accesses to the state below
stateMutex *sync.Mutex
counter int64
lw LastWrite // A global value valid as of the last .Touch() or .Modified()
locktype int16
locked bool
// The following fields are only modified on transitions between counter == 0 / counter != 0.
// Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking.
// In other cases, they need to be protected using stateMutex.
fd uintptr
}
// LastWrite is an opaque identifier of the last write to some *LockFile.
// It can be used by users of a *LockFile to determine if the lock indicates changes
// since the last check.
//
// Never construct a LastWrite manually; only accept it from *LockFile methods, and pass it back.
type LastWrite struct {
// Never modify fields of a LastWrite object; it has value semantics.
state []byte // Contents of the lock file.
}
const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID)
var lastWriterIDCounter uint64 // Private state for newLastWriterID
// newLastWrite returns a new "last write" ID.
// The value must be different on every call, and also differ from values
// generated by other processes.
func newLastWrite() LastWrite {
// The ID is (PID, time, per-process counter, random)
// PID + time represents both a unique process across reboots,
// and a specific time within the process; the per-process counter
// is an extra safeguard for in-process concurrency.
// The random part disambiguates across process namespaces
// (where PID values might collide), serves as a general-purpose
// extra safety, _and_ is used to pad the output to lastWriterIDSize,
// because other versions of this code exist and they don't work
// efficiently if the size of the value changes.
pid := os.Getpid()
tm := time.Now().UnixNano()
counter := atomic.AddUint64(&lastWriterIDCounter, 1)
res := make([]byte, lastWriterIDSize)
binary.LittleEndian.PutUint64(res[0:8], uint64(tm))
binary.LittleEndian.PutUint64(res[8:16], counter)
binary.LittleEndian.PutUint32(res[16:20], uint32(pid))
if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 {
panic(err) // This shouldn't happen
}
return LastWrite{
state: res,
}
}
// newLastWriteFromData returns a LastWrite corresponding to data that came from a previous LastWrite.serialize
func newLastWriteFromData(serialized []byte) LastWrite {
if serialized == nil {
panic("newLastWriteFromData with nil data")
}
return LastWrite{
state: serialized,
}
}
// serialize returns bytes to write to the lock file to represent the specified write.
func (lw LastWrite) serialize() []byte {
if lw.state == nil {
panic("LastWrite.serialize on an uninitialized object")
}
return lw.state
}
// Equals returns true if lw matches other
func (lw LastWrite) equals(other LastWrite) bool {
if lw.state == nil {
panic("LastWrite.equals on an uninitialized object")
}
if other.state == nil {
panic("LastWrite.equals with an uninitialized counterparty")
}
return bytes.Equal(lw.state, other.state)
}
// openLock opens the file at path and returns the corresponding file
// descriptor. The path is opened either read-only or read-write,
// depending on the value of ro argument.
//
// openLock will create the file and its parent directories,
// if necessary.
func openLock(path string, ro bool) (fd int, err error) {
flags := unix.O_CLOEXEC | os.O_CREATE
if ro {
flags |= os.O_RDONLY
} else {
flags |= os.O_RDWR
}
fd, err = unix.Open(path, flags, 0o644)
if err == nil {
return fd, nil
}
// the directory of the lockfile seems to be removed, try to create it
if os.IsNotExist(err) {
if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
return fd, fmt.Errorf("creating lock file directory: %w", err)
}
return openLock(path, ro)
}
return fd, &os.PathError{Op: "open", Path: path, Err: err}
}
// createLockFileForPath returns new *LockFile object, possibly (depending on the platform)
// working inter-process and associated with the specified path.
//
// This function will be called at most once for each path value within a single process.
//
// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation.
//
// WARNING:
// - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive.
func createLockFileForPath(path string, ro bool) (*LockFile, error) {
// Check if we can open the lock.
fd, err := openLock(path, ro)
if err != nil {
return nil, err
}
unix.Close(fd)
locktype := unix.F_WRLCK
if ro {
locktype = unix.F_RDLCK
}
return &LockFile{
file: path,
ro: ro,
rwMutex: &sync.RWMutex{},
stateMutex: &sync.Mutex{},
lw: newLastWrite(), // For compatibility, the first call of .Modified() will always report a change.
locktype: int16(locktype),
locked: false,
}, nil
}
// lock locks the lockfile via FCTNL(2) based on the specified type and
// command.
func (l *LockFile) lock(lType int16) {
lk := unix.Flock_t{
Type: lType,
Whence: int16(unix.SEEK_SET),
Start: 0,
Len: 0,
}
switch lType {
case unix.F_RDLCK:
l.rwMutex.RLock()
case unix.F_WRLCK:
l.rwMutex.Lock()
default:
panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", lType))
}
l.stateMutex.Lock()
defer l.stateMutex.Unlock()
if l.counter == 0 {
// If we're the first reference on the lock, we need to open the file again.
fd, err := openLock(l.file, l.ro)
if err != nil {
panic(err)
}
l.fd = uintptr(fd)
// Optimization: only use the (expensive) fcntl syscall when
// the counter is 0. In this case, we're either the first
// reader lock or a writer lock.
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
}
l.locktype = lType
l.locked = true
l.counter++
}
// Lock locks the lockfile as a writer. Panic if the lock is a read-only one.
func (l *LockFile) Lock() {
if l.ro {
panic("can't take write lock on read-only lock file")
} else {
l.lock(unix.F_WRLCK)
}
}
// LockRead locks the lockfile as a reader.
func (l *LockFile) RLock() {
l.lock(unix.F_RDLCK)
}
// Unlock unlocks the lockfile.
func (l *LockFile) Unlock() {
l.stateMutex.Lock()
if !l.locked {
// Panic when unlocking an unlocked lock. That's a violation
// of the lock semantics and will reveal such.
panic("calling Unlock on unlocked lock")
}
l.counter--
if l.counter < 0 {
// Panic when the counter is negative. There is no way we can
// recover from a corrupted lock and we need to protect the
// storage from corruption.
panic(fmt.Sprintf("lock %q has been unlocked too often", l.file))
}
if l.counter == 0 {
// We should only release the lock when the counter is 0 to
// avoid releasing read-locks too early; a given process may
// acquire a read lock multiple times.
l.locked = false
// Close the file descriptor on the last unlock, releasing the
// file lock.
unix.Close(int(l.fd))
}
if l.locktype == unix.F_RDLCK {
l.rwMutex.RUnlock()
} else {
l.rwMutex.Unlock()
}
l.stateMutex.Unlock()
}
func (l *LockFile) AssertLocked() {
// DO NOT provide a variant that returns the value of l.locked.
//
// If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and
// we cant tell the difference.
//
// Hence, this “AssertLocked” method, which exists only for sanity checks.
// Dont even bother with l.stateMutex: The caller is expected to hold the lock, and in that case l.locked is constant true
// with no possible writers.
// If the caller does not hold the lock, we are violating the locking/memory model anyway, and accessing the data
// without the lock is more efficient for callers, and potentially more visible to lock analysers for incorrect callers.
if !l.locked {
panic("internal error: lock is not held by the expected owner")
}
}
func (l *LockFile) AssertLockedForWriting() {
// DO NOT provide a variant that returns the current lock state.
//
// The same caveats as for AssertLocked apply equally.
l.AssertLocked()
// Like AssertLocked, dont even bother with l.stateMutex.
if l.locktype != unix.F_WRLCK {
panic("internal error: lock is not held for writing")
}
}
type fileHandle uintptr
// GetLastWrite returns a LastWrite value corresponding to current state of the lock.
// This is typically called before (_not after_) loading the state when initializing a consumer
@ -341,81 +57,6 @@ func (l *LockFile) RecordWrite() (LastWrite, error) {
return lw, nil
}
// ModifiedSince checks if the lock has been changed since a provided LastWrite value,
// and returns the one to record instead.
//
// If ModifiedSince reports no modification, the previous LastWrite value
// is still valid and can continue to be used.
//
// If this function fails, the LastWriter value of the lock is indeterminate;
// the caller should fail and keep using the previously-recorded LastWrite value,
// so that it continues failing until the situation is resolved. Similarly,
// it should only update the recorded LastWrite value after processing the update:
//
// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite)
// if err != nil { /* fail */ }
// state.lastWrite = lw2
// if modified {
// if err := reload(); err != nil { /* fail */ }
// state.lastWrite = lw2
// }
//
// The caller must hold the lock (for reading or writing).
func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) {
l.AssertLocked()
currentLW, err := l.GetLastWrite()
if err != nil {
return LastWrite{}, false, err
}
modified := !previous.equals(currentLW)
return currentLW, modified, nil
}
// Touch updates the lock file with to record that the current lock holder has modified the lock-protected data.
//
// Deprecated: Use *LockFile.RecordWrite.
func (l *LockFile) Touch() error {
lw, err := l.RecordWrite()
if err != nil {
return err
}
l.stateMutex.Lock()
if !l.locked || (l.locktype != unix.F_WRLCK) {
panic("attempted to update last-writer in lockfile without the write lock")
}
defer l.stateMutex.Unlock()
l.lw = lw
return nil
}
// Modified indicates if the lockfile has been updated since the last time it
// was loaded.
// NOTE: Unlike ModifiedSince, this returns true the first time it is called on a *LockFile.
// Callers cannot, in general, rely on this, because that might have happened for some other
// owner of the same *LockFile who created it previously.
//
// Deprecated: Use *LockFile.ModifiedSince.
func (l *LockFile) Modified() (bool, error) {
l.stateMutex.Lock()
if !l.locked {
panic("attempted to check last-writer in lockfile without locking it first")
}
defer l.stateMutex.Unlock()
oldLW := l.lw
// Note that this is called with stateMutex held; thats fine because ModifiedSince doesnt need to lock it.
currentLW, modified, err := l.ModifiedSince(oldLW)
if err != nil {
return true, err
}
l.lw = currentLW
return modified, nil
}
// IsReadWriteLock indicates if the lock file is a read-write lock.
func (l *LockFile) IsReadWrite() bool {
return !l.ro
}
// TouchedSince indicates if the lock file has been touched since the specified time
func (l *LockFile) TouchedSince(when time.Time) bool {
st, err := system.Fstat(int(l.fd))
@ -426,3 +67,29 @@ func (l *LockFile) TouchedSince(when time.Time) bool {
touched := time.Unix(mtim.Unix())
return when.Before(touched)
}
func openHandle(path string, mode int) (fileHandle, error) {
mode |= unix.O_CLOEXEC
fd, err := unix.Open(path, mode, 0o644)
return fileHandle(fd), err
}
func lockHandle(fd fileHandle, lType lockType) {
fType := unix.F_RDLCK
if lType != readLock {
fType = unix.F_WRLCK
}
lk := unix.Flock_t{
Type: int16(fType),
Whence: int16(unix.SEEK_SET),
Start: 0,
Len: 0,
}
for unix.FcntlFlock(uintptr(fd), unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
}
func unlockAndCloseHandle(fd fileHandle) {
unix.Close(int(fd))
}

View File

@ -5,81 +5,19 @@ package lockfile
import (
"os"
"sync"
"time"
"golang.org/x/sys/windows"
)
// createLockFileForPath returns a *LockFile object, possibly (depending on the platform)
// working inter-process and associated with the specified path.
//
// This function will be called at most once for each path value within a single process.
//
// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation.
//
// WARNING:
// - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive.
func createLockFileForPath(path string, ro bool) (*LockFile, error) {
return &LockFile{locked: false}, nil
}
const (
reserved = 0
allBytes = ^uint32(0)
)
// *LockFile represents a file lock where the file is used to cache an
// identifier of the last party that made changes to whatever's being protected
// by the lock.
//
// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead.
type LockFile struct {
mu sync.Mutex
file string
locked bool
}
type fileHandle windows.Handle
// LastWrite is an opaque identifier of the last write to some *LockFile.
// It can be used by users of a *LockFile to determine if the lock indicates changes
// since the last check.
// A default-initialized LastWrite never matches any last write, i.e. it always indicates changes.
type LastWrite struct {
// Nothing: The Windows “implementation” does not actually track writes.
}
func (l *LockFile) Lock() {
l.mu.Lock()
l.locked = true
}
func (l *LockFile) RLock() {
l.mu.Lock()
l.locked = true
}
func (l *LockFile) Unlock() {
l.locked = false
l.mu.Unlock()
}
func (l *LockFile) AssertLocked() {
// DO NOT provide a variant that returns the value of l.locked.
//
// If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and
// we cant tell the difference.
//
// Hence, this “AssertLocked” method, which exists only for sanity checks.
if !l.locked {
panic("internal error: lock is not held by the expected owner")
}
}
func (l *LockFile) AssertLockedForWriting() {
// DO NOT provide a variant that returns the current lock state.
//
// The same caveats as for AssertLocked apply equally.
l.AssertLocked() // The current implementation does not distinguish between read and write locks.
}
// GetLastWrite() returns a LastWrite value corresponding to current state of the lock.
// GetLastWrite returns a LastWrite value corresponding to current state of the lock.
// This is typically called before (_not after_) loading the state when initializing a consumer
// of the data protected by the lock.
// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead.
@ -87,7 +25,18 @@ func (l *LockFile) AssertLockedForWriting() {
// The caller must hold the lock (for reading or writing) before this function is called.
func (l *LockFile) GetLastWrite() (LastWrite, error) {
l.AssertLocked()
return LastWrite{}, nil
contents := make([]byte, lastWriterIDSize)
ol := new(windows.Overlapped)
var n uint32
err := windows.ReadFile(windows.Handle(l.fd), contents, &n, ol)
if err != nil && err != windows.ERROR_HANDLE_EOF {
return LastWrite{}, err
}
// It is important to handle the partial read case, because
// the initial size of the lock file is zero, which is a valid
// state (no writes yet)
contents = contents[:n]
return newLastWriteFromData(contents), nil
}
// RecordWrite updates the lock with a new LastWrite value, and returns the new value.
@ -102,47 +51,22 @@ func (l *LockFile) GetLastWrite() (LastWrite, error) {
//
// The caller must hold the lock for writing.
func (l *LockFile) RecordWrite() (LastWrite, error) {
return LastWrite{}, nil
}
// ModifiedSince checks if the lock has been changed since a provided LastWrite value,
// and returns the one to record instead.
//
// If ModifiedSince reports no modification, the previous LastWrite value
// is still valid and can continue to be used.
//
// If this function fails, the LastWriter value of the lock is indeterminate;
// the caller should fail and keep using the previously-recorded LastWrite value,
// so that it continues failing until the situation is resolved. Similarly,
// it should only update the recorded LastWrite value after processing the update:
//
// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite)
// if err != nil { /* fail */ }
// state.lastWrite = lw2
// if modified {
// if err := reload(); err != nil { /* fail */ }
// state.lastWrite = lw2
// }
//
// The caller must hold the lock (for reading or writing).
func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) {
return LastWrite{}, false, nil
}
// Deprecated: Use *LockFile.ModifiedSince.
func (l *LockFile) Modified() (bool, error) {
return false, nil
}
// Deprecated: Use *LockFile.RecordWrite.
func (l *LockFile) Touch() error {
return nil
}
func (l *LockFile) IsReadWrite() bool {
return false
l.AssertLockedForWriting()
lw := newLastWrite()
lockContents := lw.serialize()
ol := new(windows.Overlapped)
var n uint32
err := windows.WriteFile(windows.Handle(l.fd), lockContents, &n, ol)
if err != nil {
return LastWrite{}, err
}
if int(n) != len(lockContents) {
return LastWrite{}, windows.ERROR_DISK_FULL
}
return lw, nil
}
// TouchedSince indicates if the lock file has been touched since the specified time
func (l *LockFile) TouchedSince(when time.Time) bool {
stat, err := os.Stat(l.file)
if err != nil {
@ -150,3 +74,26 @@ func (l *LockFile) TouchedSince(when time.Time) bool {
}
return when.Before(stat.ModTime())
}
func openHandle(path string, mode int) (fileHandle, error) {
mode |= windows.O_CLOEXEC
fd, err := windows.Open(path, mode, windows.S_IWRITE)
return fileHandle(fd), err
}
func lockHandle(fd fileHandle, lType lockType) {
flags := 0
if lType != readLock {
flags = windows.LOCKFILE_EXCLUSIVE_LOCK
}
ol := new(windows.Overlapped)
if err := windows.LockFileEx(windows.Handle(fd), uint32(flags), reserved, allBytes, allBytes, ol); err != nil {
panic(err)
}
}
func unlockAndCloseHandle(fd fileHandle) {
ol := new(windows.Overlapped)
windows.UnlockFileEx(windows.Handle(fd), reserved, allBytes, allBytes, ol)
windows.Close(windows.Handle(fd))
}

View File

@ -26,7 +26,7 @@ func isValidCredsMessage(msg string) error {
// Store uses an external program to save credentials.
func Store(program ProgramFunc, creds *credentials.Credentials) error {
cmd := program("store")
cmd := program(credentials.ActionStore)
buffer := new(bytes.Buffer)
if err := json.NewEncoder(buffer).Encode(creds); err != nil {
@ -50,7 +50,7 @@ func Store(program ProgramFunc, creds *credentials.Credentials) error {
// Get executes an external program to get the credentials from a native store.
func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) {
cmd := program("get")
cmd := program(credentials.ActionGet)
cmd.Input(strings.NewReader(serverURL))
out, err := cmd.Output()
@ -81,7 +81,7 @@ func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error
// Erase executes a program to remove the server credentials from the native store.
func Erase(program ProgramFunc, serverURL string) error {
cmd := program("erase")
cmd := program(credentials.ActionErase)
cmd.Input(strings.NewReader(serverURL))
out, err := cmd.Output()
if err != nil {
@ -99,7 +99,7 @@ func Erase(program ProgramFunc, serverURL string) error {
// List executes a program to list server credentials in the native store.
func List(program ProgramFunc) (map[string]string, error) {
cmd := program("list")
cmd := program(credentials.ActionList)
cmd.Input(strings.NewReader("unused"))
out, err := cmd.Output()
if err != nil {

View File

@ -1,11 +1,9 @@
package client
import (
"fmt"
"io"
"os"
exec "golang.org/x/sys/execabs"
"os/exec"
)
// Program is an interface to execute external programs.
@ -31,27 +29,26 @@ func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc
func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd {
programCmd := exec.Command(commandName, args...)
programCmd.Env = os.Environ()
if env != nil {
for k, v := range *env {
programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v))
programCmd.Env = append(programCmd.Environ(), k+"="+v)
}
}
programCmd.Stderr = os.Stderr
return programCmd
}
// Shell invokes shell commands to talk with a remote credentials helper.
// Shell invokes shell commands to talk with a remote credentials-helper.
type Shell struct {
cmd *exec.Cmd
}
// Output returns responses from the remote credentials helper.
// Output returns responses from the remote credentials-helper.
func (s *Shell) Output() ([]byte, error) {
return s.cmd.Output()
}
// Input sets the input to send to a remote credentials helper.
// Input sets the input to send to a remote credentials-helper.
func (s *Shell) Input(in io.Reader) {
s.cmd.Stdin = in
}

View File

@ -10,6 +10,20 @@ import (
"strings"
)
// Action defines the name of an action (sub-command) supported by a
// credential-helper binary. It is an alias for "string", and mostly
// for convenience.
type Action = string
// List of actions (sub-commands) supported by credential-helper binaries.
const (
ActionStore Action = "store"
ActionGet Action = "get"
ActionErase Action = "erase"
ActionList Action = "list"
ActionVersion Action = "version"
)
// Credentials holds the information shared between docker and the credentials store.
type Credentials struct {
ServerURL string
@ -43,42 +57,52 @@ func SetCredsLabel(label string) {
CredsLabel = label
}
// Serve initializes the credentials helper and parses the action argument.
// Serve initializes the credentials-helper and parses the action argument.
// This function is designed to be called from a command line interface.
// It uses os.Args[1] as the key for the action.
// It uses os.Stdin as input and os.Stdout as output.
// This function terminates the program with os.Exit(1) if there is an error.
func Serve(helper Helper) {
var err error
if len(os.Args) != 2 {
err = fmt.Errorf("Usage: %s <store|get|erase|list|version>", os.Args[0])
_, _ = fmt.Fprintln(os.Stdout, usage())
os.Exit(1)
}
if err == nil {
err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout)
switch os.Args[1] {
case "--version", "-v":
_ = PrintVersion(os.Stdout)
os.Exit(0)
case "--help", "-h":
_, _ = fmt.Fprintln(os.Stdout, usage())
os.Exit(0)
}
if err != nil {
fmt.Fprintf(os.Stdout, "%v\n", err)
if err := HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout); err != nil {
_, _ = fmt.Fprintln(os.Stdout, err)
os.Exit(1)
}
}
// HandleCommand uses a helper and a key to run a credential action.
func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error {
switch key {
case "store":
func usage() string {
return fmt.Sprintf("Usage: %s <store|get|erase|list|version>", Name)
}
// HandleCommand runs a helper to execute a credential action.
func HandleCommand(helper Helper, action Action, in io.Reader, out io.Writer) error {
switch action {
case ActionStore:
return Store(helper, in)
case "get":
case ActionGet:
return Get(helper, in, out)
case "erase":
case ActionErase:
return Erase(helper, in)
case "list":
case ActionList:
return List(helper, out)
case "version":
case ActionVersion:
return PrintVersion(out)
default:
return fmt.Errorf("%s: unknown action: %s", Name, action)
}
return fmt.Errorf("Unknown credential action `%s`", key)
}
// Store uses a helper and an input reader to save credentials.
@ -132,18 +156,17 @@ func Get(helper Helper, reader io.Reader, writer io.Writer) error {
return err
}
resp := Credentials{
buffer.Reset()
err = json.NewEncoder(buffer).Encode(Credentials{
ServerURL: serverURL,
Username: username,
Secret: secret,
}
buffer.Reset()
if err := json.NewEncoder(buffer).Encode(resp); err != nil {
})
if err != nil {
return err
}
fmt.Fprint(writer, buffer.String())
_, _ = fmt.Fprint(writer, buffer.String())
return nil
}
@ -181,6 +204,6 @@ func List(helper Helper, writer io.Writer) error {
// PrintVersion outputs the current version.
func PrintVersion(writer io.Writer) error {
fmt.Fprintf(writer, "%s (%s) %s\n", Name, Package, Version)
_, _ = fmt.Fprintf(writer, "%s (%s) %s\n", Name, Package, Version)
return nil
}

View File

@ -1,5 +1,7 @@
package credentials
import "errors"
const (
// ErrCredentialsNotFound standardizes the not found error, so every helper returns
// the same message and docker can handle it properly.
@ -21,6 +23,11 @@ func (errCredentialsNotFound) Error() string {
return errCredentialsNotFoundMessage
}
// NotFound implements the [ErrNotFound][errdefs.ErrNotFound] interface.
//
// [errdefs.ErrNotFound]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrNotFound
func (errCredentialsNotFound) NotFound() {}
// NewErrCredentialsNotFound creates a new error
// for when the credentials are not in the store.
func NewErrCredentialsNotFound() error {
@ -30,8 +37,8 @@ func NewErrCredentialsNotFound() error {
// IsErrCredentialsNotFound returns true if the error
// was caused by not having a set of credentials in a store.
func IsErrCredentialsNotFound(err error) bool {
_, ok := err.(errCredentialsNotFound)
return ok
var target errCredentialsNotFound
return errors.As(err, &target)
}
// IsErrCredentialsNotFoundMessage returns true if the error
@ -53,6 +60,12 @@ func (errCredentialsMissingServerURL) Error() string {
return errCredentialsMissingServerURLMessage
}
// InvalidParameter implements the [ErrInvalidParameter][errdefs.ErrInvalidParameter]
// interface.
//
// [errdefs.ErrInvalidParameter]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrInvalidParameter
func (errCredentialsMissingServerURL) InvalidParameter() {}
// errCredentialsMissingUsername represents an error raised
// when the credentials object has no username or when no
// username is provided to a credentials operation requiring
@ -63,6 +76,12 @@ func (errCredentialsMissingUsername) Error() string {
return errCredentialsMissingUsernameMessage
}
// InvalidParameter implements the [ErrInvalidParameter][errdefs.ErrInvalidParameter]
// interface.
//
// [errdefs.ErrInvalidParameter]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrInvalidParameter
func (errCredentialsMissingUsername) InvalidParameter() {}
// NewErrCredentialsMissingServerURL creates a new error for
// errCredentialsMissingServerURL.
func NewErrCredentialsMissingServerURL() error {
@ -78,8 +97,8 @@ func NewErrCredentialsMissingUsername() error {
// IsCredentialsMissingServerURL returns true if the error
// was an errCredentialsMissingServerURL.
func IsCredentialsMissingServerURL(err error) bool {
_, ok := err.(errCredentialsMissingServerURL)
return ok
var target errCredentialsMissingServerURL
return errors.As(err, &target)
}
// IsCredentialsMissingServerURLMessage checks for an
@ -91,8 +110,8 @@ func IsCredentialsMissingServerURLMessage(err string) bool {
// IsCredentialsMissingUsername returns true if the error
// was an errCredentialsMissingUsername.
func IsCredentialsMissingUsername(err error) bool {
_, ok := err.(errCredentialsMissingUsername)
return ok
var target errCredentialsMissingUsername
return errors.As(err, &target)
}
// IsCredentialsMissingUsernameMessage checks for an

View File

@ -39,19 +39,20 @@ var (
OIDIssuerV2 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 8}
// CI extensions
OIDBuildSignerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 9}
OIDBuildSignerDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 10}
OIDRunnerEnvironment = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 11}
OIDSourceRepositoryURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 12}
OIDSourceRepositoryDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 13}
OIDSourceRepositoryRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 14}
OIDSourceRepositoryIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 15}
OIDSourceRepositoryOwnerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 16}
OIDSourceRepositoryOwnerIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 17}
OIDBuildConfigURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 18}
OIDBuildConfigDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 19}
OIDBuildTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 20}
OIDRunInvocationURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 21}
OIDBuildSignerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 9}
OIDBuildSignerDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 10}
OIDRunnerEnvironment = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 11}
OIDSourceRepositoryURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 12}
OIDSourceRepositoryDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 13}
OIDSourceRepositoryRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 14}
OIDSourceRepositoryIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 15}
OIDSourceRepositoryOwnerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 16}
OIDSourceRepositoryOwnerIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 17}
OIDBuildConfigURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 18}
OIDBuildConfigDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 19}
OIDBuildTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 20}
OIDRunInvocationURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 21}
OIDSourceRepositoryVisibilityAtSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 22}
)
// Extensions contains all custom x509 extensions defined by Fulcio
@ -128,6 +129,9 @@ type Extensions struct {
// Run Invocation URL to uniquely identify the build execution.
RunInvocationURI string // 1.3.6.1.4.1.57264.1.21
// Source repository visibility at the time of signing the certificate.
SourceRepositoryVisibilityAtSigning string // 1.3.6.1.4.1.57264.1.22
}
func (e Extensions) Render() ([]pkix.Extension, error) {
@ -320,6 +324,16 @@ func (e Extensions) Render() ([]pkix.Extension, error) {
Value: val,
})
}
if e.SourceRepositoryVisibilityAtSigning != "" {
val, err := asn1.MarshalWithParams(e.SourceRepositoryVisibilityAtSigning, "utf8")
if err != nil {
return nil, err
}
exts = append(exts, pkix.Extension{
Id: OIDSourceRepositoryVisibilityAtSigning,
Value: val,
})
}
return exts, nil
}
@ -399,6 +413,10 @@ func parseExtensions(ext []pkix.Extension) (Extensions, error) {
if err := ParseDERString(e.Value, &out.RunInvocationURI); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryVisibilityAtSigning):
if err := ParseDERString(e.Value, &out.SourceRepositoryVisibilityAtSigning); err != nil {
return Extensions{}, err
}
}
}

View File

@ -77,11 +77,18 @@ func FromProto(s *spb.Status) *Status {
// FromError returns a Status representation of err.
//
// - If err was produced by this package or implements the method `GRPCStatus()
// *Status`, or if err wraps a type satisfying this, the appropriate Status is
// returned. For wrapped errors, the message returned contains the entire
// err.Error() text and not just the wrapped status.
// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type
// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped
// errors, the message returned contains the entire err.Error() text and not
// just the wrapped status. In that case, ok is true.
//
// - If err is nil, a Status is returned with codes.OK and no message.
// - If err is nil, a Status is returned with codes.OK and no message, and ok
// is true.
//
// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()`
// returns nil (which maps to Codes.OK), or if err wraps a type
// satisfying this, a Status is returned with codes.Unknown and err's
// Error() message, and ok is false.
//
// - Otherwise, err is an error not compatible with this package. In this
// case, a Status is returned with codes.Unknown and err's Error() message,
@ -92,10 +99,24 @@ func FromError(err error) (s *Status, ok bool) {
}
type grpcstatus interface{ GRPCStatus() *Status }
if gs, ok := err.(grpcstatus); ok {
if gs.GRPCStatus() == nil {
// Error has status nil, which maps to codes.OK. There
// is no sensible behavior for this, so we turn it into
// an error with codes.Unknown and discard the existing
// status.
return New(codes.Unknown, err.Error()), false
}
return gs.GRPCStatus(), true
}
var gs grpcstatus
if errors.As(err, &gs) {
if gs.GRPCStatus() == nil {
// Error wraps an error that has status nil, which maps
// to codes.OK. There is no sensible behavior for this,
// so we turn it into an error with codes.Unknown and
// discard the existing status.
return New(codes.Unknown, err.Error()), false
}
p := gs.GRPCStatus().Proto()
p.Message = err.Error()
return status.FromProto(p), true

View File

@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
const Version = "1.56.1"
const Version = "1.56.2"

22
vendor/modules.txt vendored
View File

@ -134,7 +134,7 @@ github.com/containernetworking/cni/pkg/version
# github.com/containernetworking/plugins v1.3.0
## explicit; go 1.20
github.com/containernetworking/plugins/pkg/ns
# github.com/containers/buildah v1.31.1-0.20230710135949-9c9a344b9874
# github.com/containers/buildah v1.31.1-0.20230722114901-5ece066f82c6
## explicit; go 1.18
github.com/containers/buildah
github.com/containers/buildah/bind
@ -157,7 +157,7 @@ github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/sshagent
github.com/containers/buildah/pkg/util
github.com/containers/buildah/util
# github.com/containers/common v0.55.1-0.20230713173316-9e5d4a690901
# github.com/containers/common v0.55.1-0.20230721175448-664d013a6ae2
## explicit; go 1.18
github.com/containers/common/libimage
github.com/containers/common/libimage/define
@ -215,7 +215,7 @@ github.com/containers/common/version
# github.com/containers/conmon v2.0.20+incompatible
## explicit
github.com/containers/conmon/runner/config
# github.com/containers/image/v5 v5.26.1
# github.com/containers/image/v5 v5.26.1-0.20230721194716-30c87d4a5b8d
## explicit; go 1.18
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
@ -322,7 +322,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
# github.com/containers/storage v1.48.1-0.20230707125135-6dc2de36ca86
# github.com/containers/storage v1.48.1-0.20230721123825-4a3a3019d765
## explicit; go 1.19
github.com/containers/storage
github.com/containers/storage/drivers
@ -462,8 +462,8 @@ github.com/docker/docker/pkg/pools
github.com/docker/docker/pkg/process
github.com/docker/docker/pkg/stdcopy
github.com/docker/docker/pkg/system
# github.com/docker/docker-credential-helpers v0.7.0
## explicit; go 1.18
# github.com/docker/docker-credential-helpers v0.8.0
## explicit; go 1.19
github.com/docker/docker-credential-helpers/client
github.com/docker/docker-credential-helpers/credentials
# github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11
@ -803,7 +803,7 @@ github.com/opencontainers/go-digest
## explicit; go 1.18
github.com/opencontainers/image-spec/specs-go
github.com/opencontainers/image-spec/specs-go/v1
# github.com/opencontainers/runc v1.1.7 => github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc
# github.com/opencontainers/runc v1.1.8 => github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc
## explicit; go 1.17
github.com/opencontainers/runc/libcontainer/apparmor
github.com/opencontainers/runc/libcontainer/cgroups
@ -888,7 +888,7 @@ github.com/seccomp/libseccomp-golang
# github.com/segmentio/ksuid v1.0.4
## explicit; go 1.12
github.com/segmentio/ksuid
# github.com/sigstore/fulcio v1.3.2
# github.com/sigstore/fulcio v1.4.0
## explicit; go 1.20
github.com/sigstore/fulcio/pkg/api
github.com/sigstore/fulcio/pkg/certificate
@ -1086,7 +1086,7 @@ golang.org/x/net/internal/socks
golang.org/x/net/internal/timeseries
golang.org/x/net/proxy
golang.org/x/net/trace
# golang.org/x/oauth2 v0.9.0
# golang.org/x/oauth2 v0.10.0
## explicit; go 1.17
golang.org/x/oauth2
golang.org/x/oauth2/internal
@ -1157,10 +1157,10 @@ google.golang.org/appengine/internal/log
google.golang.org/appengine/internal/remote_api
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/urlfetch
# google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc
# google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98
## explicit; go 1.19
google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.56.1
# google.golang.org/grpc v1.56.2
## explicit; go 1.17
google.golang.org/grpc
google.golang.org/grpc/attributes