mirror of
https://github.com/containers/podman.git
synced 2025-07-02 00:30:00 +08:00
vendor: update containers/{buildah,common,image,storage}
The change in healthcheck_run_test.go, depends on the containers/image change: commit b6afa8ca7b324aca8fd5a7b5b206fc05c0c04874 Author: Mikhail Sokolov <msokolov@evolution.com> Date: Fri Mar 15 13:37:44 2024 +0200 Add support for Docker HealthConfig.StartInterval (v25.0.0+) Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
This commit is contained in:
35
go.mod
35
go.mod
@ -25,15 +25,15 @@ require (
|
||||
github.com/checkpoint-restore/checkpointctl v1.1.0
|
||||
github.com/checkpoint-restore/go-criu/v7 v7.1.0
|
||||
github.com/containernetworking/plugins v1.4.0
|
||||
github.com/containers/buildah v1.35.1-0.20240318192459-e64e6cc09dfd
|
||||
github.com/containers/common v0.58.1-0.20240403123718-735c922b53c4
|
||||
github.com/containers/buildah v1.35.1-0.20240412112838-e393e57728f5
|
||||
github.com/containers/common v0.58.1-0.20240410144442-8db59bf2fcce
|
||||
github.com/containers/conmon v2.0.20+incompatible
|
||||
github.com/containers/gvisor-tap-vsock v0.7.4-0.20240408151405-d744d71db363
|
||||
github.com/containers/image/v5 v5.30.0
|
||||
github.com/containers/image/v5 v5.30.1-0.20240411200840-dc519780d39f
|
||||
github.com/containers/libhvee v0.7.1
|
||||
github.com/containers/ocicrypt v1.1.10
|
||||
github.com/containers/psgo v1.9.0
|
||||
github.com/containers/storage v1.53.0
|
||||
github.com/containers/storage v1.53.1-0.20240411065836-1fd0dc1d20e5
|
||||
github.com/containers/winquit v1.1.0
|
||||
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09
|
||||
github.com/coreos/stream-metadata-go v0.4.4
|
||||
@ -84,7 +84,7 @@ require (
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2
|
||||
go.etcd.io/bbolt v1.3.9
|
||||
golang.org/x/crypto v0.22.0
|
||||
golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8
|
||||
golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0
|
||||
golang.org/x/net v0.24.0
|
||||
golang.org/x/sync v0.7.0
|
||||
golang.org/x/sys v0.19.0
|
||||
@ -117,14 +117,14 @@ require (
|
||||
github.com/containerd/typeurl/v2 v2.1.1 // indirect
|
||||
github.com/containernetworking/cni v1.1.2 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||
github.com/containers/luksy v0.0.0-20240212203526-ceb12d4fd50c // indirect
|
||||
github.com/containers/luksy v0.0.0-20240312134643-3d2cf0e19c84 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.9.0 // indirect
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e // indirect
|
||||
github.com/disiqueira/gotree/v3 v3.0.2 // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
@ -137,14 +137,14 @@ require (
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-openapi/analysis v0.21.4 // indirect
|
||||
github.com/go-openapi/errors v0.21.1 // indirect
|
||||
github.com/go-openapi/errors v0.22.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/loads v0.21.2 // indirect
|
||||
github.com/go-openapi/runtime v0.26.0 // indirect
|
||||
github.com/go-openapi/spec v0.20.9 // indirect
|
||||
github.com/go-openapi/strfmt v0.22.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.10 // indirect
|
||||
github.com/go-openapi/strfmt v0.23.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-openapi/validate v0.22.1 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
@ -164,7 +164,7 @@ require (
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jinzhu/copier v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.17.7 // indirect
|
||||
github.com/klauspost/compress v1.17.8 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
@ -203,10 +203,10 @@ require (
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/sigstore/fulcio v1.4.3 // indirect
|
||||
github.com/sigstore/rekor v1.2.2 // indirect
|
||||
github.com/sigstore/sigstore v1.8.2 // indirect
|
||||
github.com/sigstore/sigstore v1.8.3 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
github.com/sylabs/sif/v2 v2.15.1 // indirect
|
||||
github.com/sylabs/sif/v2 v2.15.2 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
@ -214,7 +214,7 @@ require (
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 // indirect
|
||||
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||
github.com/ulikunitz/xz v0.5.11 // indirect
|
||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||
github.com/vbatts/tar-split v0.11.5 // indirect
|
||||
github.com/vishvananda/netns v0.0.4 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
@ -227,11 +227,10 @@ require (
|
||||
go.opentelemetry.io/otel/sdk v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.22.0 // indirect
|
||||
golang.org/x/arch v0.7.0 // indirect
|
||||
golang.org/x/mod v0.16.0 // indirect
|
||||
golang.org/x/oauth2 v0.18.0 // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/oauth2 v0.19.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/tools v0.19.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
golang.org/x/tools v0.20.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||
google.golang.org/grpc v1.62.0 // indirect
|
||||
gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
|
||||
|
74
go.sum
74
go.sum
@ -74,28 +74,28 @@ github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl3
|
||||
github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
|
||||
github.com/containernetworking/plugins v1.4.0 h1:+w22VPYgk7nQHw7KT92lsRmuToHvb7wwSv9iTbXzzic=
|
||||
github.com/containernetworking/plugins v1.4.0/go.mod h1:UYhcOyjefnrQvKvmmyEKsUA+M9Nfn7tqULPpH0Pkcj0=
|
||||
github.com/containers/buildah v1.35.1-0.20240318192459-e64e6cc09dfd h1:QVUSJsMYYUIQmMi+PU9NYXpbk/lgz0Xx6/naihFHFBQ=
|
||||
github.com/containers/buildah v1.35.1-0.20240318192459-e64e6cc09dfd/go.mod h1:kJEmpENlkUrZ39k4jVJC9RxDNH30qxSsfEOar4la8Ec=
|
||||
github.com/containers/common v0.58.1-0.20240403123718-735c922b53c4 h1:lj/tku4jvMnYcDmRIz182mPkI99CDK7Zvh4eN6NhR/k=
|
||||
github.com/containers/common v0.58.1-0.20240403123718-735c922b53c4/go.mod h1:10Y0+fVkDetxuizCMziHDUBbCUR87tgz82oHGCnhi4g=
|
||||
github.com/containers/buildah v1.35.1-0.20240412112838-e393e57728f5 h1:ucOnAzlQRjgDogeTTByJ45E1fW/On2CYc1WH4XmcHkQ=
|
||||
github.com/containers/buildah v1.35.1-0.20240412112838-e393e57728f5/go.mod h1:unO5wyQXGHXcDBFu0D+W3bUXvfQrMEh1J6a8dgX8F+4=
|
||||
github.com/containers/common v0.58.1-0.20240410144442-8db59bf2fcce h1:mt7/jkY4a+q8SHLE85v7D4XoWX0KGC3tAfBZ7Mfpqos=
|
||||
github.com/containers/common v0.58.1-0.20240410144442-8db59bf2fcce/go.mod h1:wxQdMk9Wuu178UQLJonrQlBCw940zof77Xm60NmDmlI=
|
||||
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
||||
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||
github.com/containers/gvisor-tap-vsock v0.7.4-0.20240408151405-d744d71db363 h1:EqWMZeFa08y2c1GniaFkfjlO5AjegoG2foWo6NlDfUY=
|
||||
github.com/containers/gvisor-tap-vsock v0.7.4-0.20240408151405-d744d71db363/go.mod h1:KN4qqZfwVBzvqlN1Ytbhf84sOzftw+R8YL9bixQlr2Y=
|
||||
github.com/containers/image/v5 v5.30.0 h1:CmHeSwI6W2kTRWnUsxATDFY5TEX4b58gPkaQcEyrLIA=
|
||||
github.com/containers/image/v5 v5.30.0/go.mod h1:gSD8MVOyqBspc0ynLsuiMR9qmt8UQ4jpVImjmK0uXfk=
|
||||
github.com/containers/image/v5 v5.30.1-0.20240411200840-dc519780d39f h1:BpQt4TbrW3L9RNjACQgm7FEi+42u34aMMPopvE/J5Fc=
|
||||
github.com/containers/image/v5 v5.30.1-0.20240411200840-dc519780d39f/go.mod h1:2c2hFZQhnz20XxbfHhyoKvxYbnTHCycHnMO3fZSmQY4=
|
||||
github.com/containers/libhvee v0.7.1 h1:dWGF5GLq9DZvXo3P8aDp3cNieL5eCaSell4UmeA/jY4=
|
||||
github.com/containers/libhvee v0.7.1/go.mod h1:fRKB3AyIqHMvq6xaeYhTpckM2cdoq0oecolyoiuLP7M=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/luksy v0.0.0-20240212203526-ceb12d4fd50c h1:6zalnZZODMOqNZBww9VAM1Mq5EZ3J+S8vYGCo2yg39M=
|
||||
github.com/containers/luksy v0.0.0-20240212203526-ceb12d4fd50c/go.mod h1:A/RMGaYhtzfW6L3whYRU+0GGEFocTYyQBqlWSb2UNEM=
|
||||
github.com/containers/luksy v0.0.0-20240312134643-3d2cf0e19c84 h1:ZyHhVKzwWxHEphK9BFMe8mQU+++IutdHJxJZc5q0wHg=
|
||||
github.com/containers/luksy v0.0.0-20240312134643-3d2cf0e19c84/go.mod h1:+idJYfuH2iLhmk8wZEavPACGMRTnZ/NK/5D74U67Um4=
|
||||
github.com/containers/ocicrypt v1.1.10 h1:r7UR6o8+lyhkEywetubUUgcKFjOWOaWz8cEBrCPX0ic=
|
||||
github.com/containers/ocicrypt v1.1.10/go.mod h1:YfzSSr06PTHQwSTUKqDSjish9BeW1E4HUmreluQcMd8=
|
||||
github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g=
|
||||
github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A=
|
||||
github.com/containers/storage v1.53.0 h1:VSES3C/u1pxjTJIXvLrSmyP7OBtDky04oGu07UvdTEA=
|
||||
github.com/containers/storage v1.53.0/go.mod h1:pujcoOSc+upx15Jirdkebhtd8uJiLwbSd/mYT6zDJK8=
|
||||
github.com/containers/storage v1.53.1-0.20240411065836-1fd0dc1d20e5 h1:owLaLUu/RKf0x62tFm5ZQjU21oRUUIWTRMpZ0zkIt3E=
|
||||
github.com/containers/storage v1.53.1-0.20240411065836-1fd0dc1d20e5/go.mod h1:P4tgJNR/o42wmg+9WZtoJtOJvmZKu2dwzFQggcH9aQw=
|
||||
github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE=
|
||||
github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8=
|
||||
github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo=
|
||||
@ -127,9 +127,9 @@ github.com/digitalocean/go-qemu v0.0.0-20230711162256-2e3d0186973e h1:x5PInTuXLd
|
||||
github.com/digitalocean/go-qemu v0.0.0-20230711162256-2e3d0186973e/go.mod h1:K4+o74YGNjOb9N6yyG+LPj1NjHtk+Qz0IYQPvirbaLs=
|
||||
github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWhkNRq8=
|
||||
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v25.0.3+incompatible h1:KLeNs7zws74oFuVhgZQ5ONGZiXUUdgsdy6/EsX/6284=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v25.0.5+incompatible h1:3Llw3kcE1gOScEojA247iDD+p1l9hHeC7H3vf3Zd5fk=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v26.0.1+incompatible h1:t39Hm6lpXuXtgkF0dm1t9a5HkbUfdGy6XbWexmGr+hA=
|
||||
@ -180,8 +180,8 @@ github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9Qy
|
||||
github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.21.1 h1:rVisxQPdETctjlYntm0Ek4dKf68nAQocCloCT50vWuI=
|
||||
github.com/go-openapi/errors v0.21.1/go.mod h1:LyiY9bgc7AVVh6wtVvMYEyoj3KJYNoRw92mmvnMWgj8=
|
||||
github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w=
|
||||
github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
@ -202,14 +202,14 @@ github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6
|
||||
github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
|
||||
github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
|
||||
github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
|
||||
github.com/go-openapi/strfmt v0.22.2 h1:DPYOrm6gexCfZZfXUaXFS4+Jw6HAaIIG0SZ5630f8yw=
|
||||
github.com/go-openapi/strfmt v0.22.2/go.mod h1:HB/b7TCm91rno75Dembc1dFW/0FPLk5CEXsoF9ReNc4=
|
||||
github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c=
|
||||
github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.22.10 h1:4y86NVn7Z2yYd6pfS4Z+Nyh3aAUL3Nul+LMbhFKy0gA=
|
||||
github.com/go-openapi/swag v0.22.10/go.mod h1:Cnn8BYtRlx6BNE3DPN86f/xkapGIcLWzh3CLEb4C1jI=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
|
||||
github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
@ -349,8 +349,8 @@ github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0Lh
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
|
||||
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
|
||||
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
@ -468,7 +468,6 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
|
||||
github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
|
||||
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
|
||||
github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI=
|
||||
github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
||||
@ -520,8 +519,8 @@ github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ
|
||||
github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og=
|
||||
github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY=
|
||||
github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg=
|
||||
github.com/sigstore/sigstore v1.8.2 h1:0Ttjcn3V0fVQXlYq7+oHaaHkGFIt3ywm7SF4JTU/l8c=
|
||||
github.com/sigstore/sigstore v1.8.2/go.mod h1:CHVcSyknCcjI4K2ZhS1SI28r0tcQyBlwtALG536x1DY=
|
||||
github.com/sigstore/sigstore v1.8.3 h1:G7LVXqL+ekgYtYdksBks9B38dPoIsbscjQJX/MGWkA4=
|
||||
github.com/sigstore/sigstore v1.8.3/go.mod h1:mqbTEariiGA94cn6G3xnDiV6BD8eSLdL/eA7bvJ0fVs=
|
||||
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
@ -554,8 +553,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/sylabs/sif/v2 v2.15.1 h1:75BcunPOY11fVhe02/WHuNLTfDd3OHH0ex0MuuNMYX0=
|
||||
github.com/sylabs/sif/v2 v2.15.1/go.mod h1:YiwCUdZOhiohnPbyxuxvCZa+03HwAaiC+vfAKZPR8nQ=
|
||||
github.com/sylabs/sif/v2 v2.15.2 h1:UzeG36B+lynOnXBTgGFYxZ/ev0JWB73g8XXX6/1vvx8=
|
||||
github.com/sylabs/sif/v2 v2.15.2/go.mod h1:65ua0JOZQ+8Rb9UReICXngJg9UjQBA0+SSxMa67qCPE=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
|
||||
@ -575,8 +574,8 @@ github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 h1:YcojQL98T/OO+rybuzn2
|
||||
github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264=
|
||||
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
|
||||
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
|
||||
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
|
||||
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
|
||||
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
|
||||
github.com/vbauerster/mpb/v8 v8.7.3 h1:n/mKPBav4FFWp5fH4U0lPpXfiOmCEgl5Yx/NM3tKJA0=
|
||||
@ -647,8 +646,8 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf
|
||||
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
|
||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw=
|
||||
golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ=
|
||||
golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0 h1:985EYyeCOxTpcgOTJpflJUwOeEz0CQOdPt73OzpE9F8=
|
||||
golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
@ -657,8 +656,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
|
||||
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -682,8 +681,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
|
||||
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
|
||||
golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg=
|
||||
golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -755,7 +754,6 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
@ -779,16 +777,14 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
|
||||
golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
|
||||
golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
|
||||
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
|
@ -159,8 +159,8 @@ errmsg "no contents in .*" \
|
||||
"Error: context must be a directory: .*" \
|
||||
"bud with specified context should fail if context contains empty Dockerfile"
|
||||
|
||||
errmsg "credential file is not accessible: stat /tmp/nonexistent: no such file or directory" \
|
||||
"Error: credential file is not accessible: stat /tmp/nonexistent: no such file or directory" \
|
||||
errmsg "credential file is not accessible: faccessat /tmp/nonexistent: no such file or directory" \
|
||||
"Error: credential file is not accessible: faccessat /tmp/nonexistent: no such file or directory" \
|
||||
"bud with Containerfile should fail with nonexistent authfile"
|
||||
|
||||
errmsg "cannot find Containerfile or Dockerfile" \
|
||||
|
@ -71,7 +71,7 @@ var _ = Describe("Podman healthcheck run", func() {
|
||||
hc := podmanTest.Podman([]string{"container", "inspect", "--format", "{{.Config.Healthcheck}}", "hc"})
|
||||
hc.WaitWithDefaultTimeout()
|
||||
Expect(hc).Should(ExitCleanly())
|
||||
Expect(hc.OutputToString()).To(Equal("{[CMD-SHELL curl -f http://localhost/ || exit 1] 0s 5m0s 3s 0}"))
|
||||
Expect(hc.OutputToString()).To(Equal("{[CMD-SHELL curl -f http://localhost/ || exit 1] 0s 0s 5m0s 3s 0}"))
|
||||
})
|
||||
|
||||
It("podman disable healthcheck with --health-cmd=none on valid container", func() {
|
||||
|
@ -168,7 +168,7 @@ var _ = Describe("Podman login and logout", func() {
|
||||
session = podmanTest.Podman([]string{"push", "-q", "--authfile", "/tmp/nonexistent", ALPINE, testImg})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).To(ExitWithError())
|
||||
Expect(session.ErrorToString()).To(Equal("Error: credential file is not accessible: stat /tmp/nonexistent: no such file or directory"))
|
||||
Expect(session.ErrorToString()).To(Equal("Error: credential file is not accessible: faccessat /tmp/nonexistent: no such file or directory"))
|
||||
|
||||
session = podmanTest.Podman([]string{"push", "-q", "--authfile", authFile, ALPINE, testImg})
|
||||
session.WaitWithDefaultTimeout()
|
||||
@ -182,8 +182,7 @@ var _ = Describe("Podman login and logout", func() {
|
||||
session = podmanTest.Podman([]string{"logout", "--authfile", "/tmp/nonexistent", server})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).To(ExitWithError())
|
||||
Expect(session.ErrorToString()).To(Equal("Error: credential file is not accessible: stat /tmp/nonexistent: no such file or directory"))
|
||||
|
||||
Expect(session.ErrorToString()).To(Equal("Error: credential file is not accessible: faccessat /tmp/nonexistent: no such file or directory"))
|
||||
session = podmanTest.Podman([]string{"logout", "--authfile", authFile, server})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(ExitCleanly())
|
||||
@ -206,7 +205,7 @@ var _ = Describe("Podman login and logout", func() {
|
||||
session = podmanTest.Podman([]string{"logout", "--compat-auth-file", "/tmp/nonexistent", server})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).To(ExitWithError())
|
||||
Expect(session.ErrorToString()).To(Equal("Error: credential file is not accessible: stat /tmp/nonexistent: no such file or directory"))
|
||||
Expect(session.ErrorToString()).To(Equal("Error: credential file is not accessible: faccessat /tmp/nonexistent: no such file or directory"))
|
||||
|
||||
// inconsistent command line flags are rejected
|
||||
// Pre-create the files to make sure we are not hitting the “file not found” path
|
||||
|
@ -172,7 +172,7 @@ var _ = Describe("Podman pull", func() {
|
||||
session := podmanTest.Podman([]string{"pull", "-q", "--authfile", "/tmp/nonexistent", ALPINE})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).To(ExitWithError())
|
||||
Expect(session.ErrorToString()).To(Equal("Error: credential file is not accessible: stat /tmp/nonexistent: no such file or directory"))
|
||||
Expect(session.ErrorToString()).To(Equal("Error: credential file is not accessible: faccessat /tmp/nonexistent: no such file or directory"))
|
||||
})
|
||||
|
||||
It("podman pull by digest (image list)", func() {
|
||||
|
@ -1339,7 +1339,7 @@ search | $IMAGE |
|
||||
if [[ "$args" = "''" ]]; then args=;fi
|
||||
|
||||
run_podman 125 $command --authfile=$bogus $args
|
||||
assert "$output" = "Error: credential file is not accessible: stat $bogus: no such file or directory" \
|
||||
assert "$output" = "Error: credential file is not accessible: faccessat $bogus: no such file or directory" \
|
||||
"$command --authfile=nonexistent-path"
|
||||
|
||||
if [[ "$command" != "logout" ]]; then
|
||||
|
@ -759,7 +759,8 @@ spec:
|
||||
bogus=$PODMAN_TMPDIR/bogus-authfile
|
||||
|
||||
run_podman 125 kube play --authfile=$bogus - < $PODMAN_TMPDIR/test.yaml
|
||||
is "$output" "Error: credential file is not accessible: stat $bogus: no such file or directory" "$command should fail with not such file"
|
||||
is "$output" "Error: credential file is not accessible: faccessat $bogus: no such file or directory" \
|
||||
"$command should fail with not such file"
|
||||
}
|
||||
|
||||
@test "podman kube play with umask from containers.conf" {
|
||||
|
@ -107,7 +107,7 @@ See 'podman create --help'" "--module must be specified before the command"
|
||||
# Nonexistent module path with comma
|
||||
nonesuch=${PODMAN_TMPDIR}/nonexistent,withcomma
|
||||
run_podman 1 --module=$nonesuch sdfsdfdsf
|
||||
is "$output" "Failed to obtain podman configuration: could not resolve module \"$nonesuch\": stat $nonesuch: no such file or directory" \
|
||||
is "$output" "Failed to obtain podman configuration: could not resolve module \"$nonesuch\": faccessat $nonesuch: no such file or directory" \
|
||||
"--module=ENOENT"
|
||||
}
|
||||
|
||||
@ -188,7 +188,7 @@ EOF
|
||||
XDG_CONFIG_HOME=$fake_home run_podman 1 --module=$nonesuch invalid-command
|
||||
expect="Failed to obtain podman configuration: could not resolve module \"$nonesuch\": 3 errors occurred:"
|
||||
for dir in $fake_home /etc /usr/share;do
|
||||
expect+=$'\n\t'"* stat $dir/containers/containers.conf.modules/$nonesuch: no such file or directory"
|
||||
expect+=$'\n\t'"* faccessat $dir/containers/containers.conf.modules/$nonesuch: no such file or directory"
|
||||
done
|
||||
is "$output" "$expect" "--module=ENOENT : error message"
|
||||
}
|
||||
|
4
vendor/github.com/containers/buildah/.cirrus.yml
generated
vendored
4
vendor/github.com/containers/buildah/.cirrus.yml
generated
vendored
@ -32,7 +32,7 @@ env:
|
||||
DEBIAN_NAME: "debian-13"
|
||||
|
||||
# Image identifiers
|
||||
IMAGE_SUFFIX: "c20240222t143004z-f39f38d13"
|
||||
IMAGE_SUFFIX: "c20240320t153921z-f39f38d13"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
|
||||
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
||||
@ -221,7 +221,6 @@ integration_task:
|
||||
DISTRO_NV: "${DEBIAN_NAME}"
|
||||
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'vfs'
|
||||
CI_DESIRED_RUNTIME: runc
|
||||
# OVERLAY
|
||||
- env:
|
||||
DISTRO_NV: "${FEDORA_NAME}"
|
||||
@ -235,7 +234,6 @@ integration_task:
|
||||
DISTRO_NV: "${DEBIAN_NAME}"
|
||||
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'overlay'
|
||||
CI_DESIRED_RUNTIME: runc
|
||||
|
||||
gce_instance:
|
||||
image_name: "$IMAGE_NAME"
|
||||
|
6
vendor/github.com/containers/buildah/Makefile
generated
vendored
6
vendor/github.com/containers/buildah/Makefile
generated
vendored
@ -16,6 +16,8 @@ BUILDFLAGS := -tags "$(BUILDTAGS)"
|
||||
BUILDAH := buildah
|
||||
SELINUXOPT ?= $(shell test -x /usr/sbin/selinuxenabled && selinuxenabled && echo -Z)
|
||||
SELINUXTYPE=container_runtime_exec_t
|
||||
AS ?= as
|
||||
STRIP ?= strip
|
||||
|
||||
GO := go
|
||||
GO_LDFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-ldflags"; fi)
|
||||
@ -79,14 +81,14 @@ bin/buildah: $(SOURCES) cmd/buildah/*.go internal/mkcw/embed/entrypoint_amd64.gz
|
||||
$(GO_BUILD) $(BUILDAH_LDFLAGS) $(GO_GCFLAGS) "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah
|
||||
test -z "${SELINUXOPT}" || chcon --verbose -t $(SELINUXTYPE) $@
|
||||
|
||||
ifneq ($(shell as --version | grep x86_64),)
|
||||
ifneq ($(shell $(AS) --version | grep x86_64),)
|
||||
internal/mkcw/embed/entrypoint_amd64.gz: internal/mkcw/embed/entrypoint_amd64
|
||||
gzip -k9nf $^
|
||||
|
||||
internal/mkcw/embed/entrypoint_amd64: internal/mkcw/embed/entrypoint_amd64.s
|
||||
$(AS) -o $(patsubst %.s,%.o,$^) $^
|
||||
$(LD) -o $@ $(patsubst %.s,%.o,$^)
|
||||
strip $@
|
||||
$(STRIP) $@
|
||||
endif
|
||||
|
||||
|
||||
|
47
vendor/github.com/containers/buildah/buildah.go
generated
vendored
47
vendor/github.com/containers/buildah/buildah.go
generated
vendored
@ -91,7 +91,7 @@ type Builder struct {
|
||||
// Logger is the logrus logger to write log messages with
|
||||
Logger *logrus.Logger `json:"-"`
|
||||
|
||||
// Args define variables that users can pass at build-time to the builder
|
||||
// Args define variables that users can pass at build-time to the builder.
|
||||
Args map[string]string
|
||||
// Type is used to help identify a build container's metadata. It
|
||||
// should not be modified.
|
||||
@ -118,7 +118,7 @@ type Builder struct {
|
||||
// MountPoint is the last location where the container's root
|
||||
// filesystem was mounted. It should not be modified.
|
||||
MountPoint string `json:"mountpoint,omitempty"`
|
||||
// ProcessLabel is the SELinux process label associated with the container
|
||||
// ProcessLabel is the SELinux process label to use during subsequent Run() calls.
|
||||
ProcessLabel string `json:"process-label,omitempty"`
|
||||
// MountLabel is the SELinux mount label associated with the container
|
||||
MountLabel string `json:"mount-label,omitempty"`
|
||||
@ -139,7 +139,7 @@ type Builder struct {
|
||||
|
||||
// Isolation controls how we handle "RUN" statements and the Run() method.
|
||||
Isolation define.Isolation
|
||||
// NamespaceOptions controls how we set up the namespaces for processes that we run in the container.
|
||||
// NamespaceOptions controls how we set up the namespaces for processes that we Run().
|
||||
NamespaceOptions define.NamespaceOptions
|
||||
// ConfigureNetwork controls whether or not network interfaces and
|
||||
// routing are configured for a new network namespace (i.e., when not
|
||||
@ -157,11 +157,11 @@ type Builder struct {
|
||||
// NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks.
|
||||
NetworkInterface nettypes.ContainerNetwork `json:"-"`
|
||||
|
||||
// GroupAdd is a list of groups to add to the primary process within
|
||||
// the container. 'keep-groups' allows container processes to use
|
||||
// supplementary groups.
|
||||
// GroupAdd is a list of groups to add to the primary process when Run() is
|
||||
// called. The magic 'keep-groups' value indicates that the process should
|
||||
// be allowed to inherit the current set of supplementary groups.
|
||||
GroupAdd []string
|
||||
// ID mapping options to use when running processes in the container with non-host user namespaces.
|
||||
// ID mapping options to use when running processes with non-host user namespaces.
|
||||
IDMappingOptions define.IDMappingOptions
|
||||
// Capabilities is a list of capabilities to use when running commands in the container.
|
||||
Capabilities []string
|
||||
@ -177,14 +177,20 @@ type Builder struct {
|
||||
CommonBuildOpts *define.CommonBuildOptions
|
||||
// TopLayer is the top layer of the image
|
||||
TopLayer string
|
||||
// Format for the build Image
|
||||
// Format to use for a container image we eventually commit, when we do.
|
||||
Format string
|
||||
// TempVolumes are temporary mount points created during container runs
|
||||
// TempVolumes are temporary mount points created during Run() calls.
|
||||
TempVolumes map[string]bool
|
||||
// ContentDigester counts the digest of all Add()ed content
|
||||
// ContentDigester counts the digest of all Add()ed content since it was
|
||||
// last restarted.
|
||||
ContentDigester CompositeDigester
|
||||
// Devices are the additional devices to add to the containers
|
||||
// Devices are parsed additional devices to provide to Run() calls.
|
||||
Devices define.ContainerDevices
|
||||
// DeviceSpecs are unparsed additional devices to provide to Run() calls.
|
||||
DeviceSpecs []string
|
||||
// CDIConfigDir is the location of CDI configuration files, if the files in
|
||||
// the default configuration locations shouldn't be used.
|
||||
CDIConfigDir string
|
||||
}
|
||||
|
||||
// BuilderInfo are used as objects to display container information
|
||||
@ -215,6 +221,8 @@ type BuilderInfo struct {
|
||||
IDMappingOptions define.IDMappingOptions
|
||||
History []v1.History
|
||||
Devices define.ContainerDevices
|
||||
DeviceSpecs []string
|
||||
CDIConfigDir string
|
||||
}
|
||||
|
||||
// GetBuildInfo gets a pointer to a Builder object and returns a BuilderInfo object from it.
|
||||
@ -251,6 +259,8 @@ func GetBuildInfo(b *Builder) BuilderInfo {
|
||||
Capabilities: b.Capabilities,
|
||||
History: history,
|
||||
Devices: b.Devices,
|
||||
DeviceSpecs: b.DeviceSpecs,
|
||||
CDIConfigDir: b.CDIConfigDir,
|
||||
}
|
||||
}
|
||||
|
||||
@ -328,13 +338,15 @@ type BuilderOptions struct {
|
||||
// ID mapping options to use if we're setting up our own user namespace.
|
||||
IDMappingOptions *define.IDMappingOptions
|
||||
// Capabilities is a list of capabilities to use when
|
||||
// running commands in the container.
|
||||
// running commands for Run().
|
||||
Capabilities []string
|
||||
CommonBuildOpts *define.CommonBuildOptions
|
||||
// Format for the container image
|
||||
// Format to use for a container image we eventually commit, when we do.
|
||||
Format string
|
||||
// Devices are the additional devices to add to the containers
|
||||
// Devices are additional parsed devices to provide for Run() calls.
|
||||
Devices define.ContainerDevices
|
||||
// DeviceSpecs are additional unparsed devices to provide for Run() calls.
|
||||
DeviceSpecs []string
|
||||
// DefaultEnv is deprecated and ignored.
|
||||
DefaultEnv []string
|
||||
// MaxPullRetries is the maximum number of attempts we'll make to pull
|
||||
@ -345,9 +357,9 @@ type BuilderOptions struct {
|
||||
// OciDecryptConfig contains the config that can be used to decrypt an image if it is
|
||||
// encrypted if non-nil. If nil, it does not attempt to decrypt an image.
|
||||
OciDecryptConfig *encconfig.DecryptConfig
|
||||
// ProcessLabel is the SELinux process label associated with the container
|
||||
// ProcessLabel is the SELinux process label associated with commands we Run()
|
||||
ProcessLabel string
|
||||
// MountLabel is the SELinux mount label associated with the container
|
||||
// MountLabel is the SELinux mount label associated with the working container
|
||||
MountLabel string
|
||||
// PreserveBaseImageAnns indicates that we should preserve base
|
||||
// image information (Annotations) that are present in our base image,
|
||||
@ -355,6 +367,9 @@ type BuilderOptions struct {
|
||||
// itself. Useful as an internal implementation detail of multistage
|
||||
// builds, and does not need to be set by most callers.
|
||||
PreserveBaseImageAnns bool
|
||||
// CDIConfigDir is the location of CDI configuration files, if the files in
|
||||
// the default configuration locations shouldn't be used.
|
||||
CDIConfigDir string
|
||||
}
|
||||
|
||||
// ImportOptions are used to initialize a Builder from an existing container
|
||||
|
15
vendor/github.com/containers/buildah/chroot/run_freebsd.go
generated
vendored
15
vendor/github.com/containers/buildah/chroot/run_freebsd.go
generated
vendored
@ -4,8 +4,10 @@
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@ -13,6 +15,7 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/buildah/pkg/jail"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
@ -178,9 +181,9 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
||||
}
|
||||
}
|
||||
target := filepath.Join(spec.Root.Path, m.Destination)
|
||||
if _, err := os.Stat(target); err != nil {
|
||||
if err := fileutils.Exists(target); err != nil {
|
||||
// If the target can't be stat()ted, check the error.
|
||||
if !os.IsNotExist(err) {
|
||||
if !errors.Is(err, fs.ErrNotExist) {
|
||||
return undoBinds, fmt.Errorf("examining %q for mounting in mount namespace: %w", target, err)
|
||||
}
|
||||
// The target isn't there yet, so create it, and make a
|
||||
@ -211,11 +214,11 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
||||
// Do the bind mount.
|
||||
if !srcinfo.IsDir() {
|
||||
logrus.Debugf("emulating file mount %q on %q", m.Source, target)
|
||||
_, err := os.Stat(target)
|
||||
err := fileutils.Exists(target)
|
||||
if err == nil {
|
||||
save := saveDir(spec, target)
|
||||
if _, err := os.Stat(save); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := fileutils.Exists(save); err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
err = os.MkdirAll(save, 0111)
|
||||
}
|
||||
if err != nil {
|
||||
@ -224,7 +227,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
||||
removes = append(removes, save)
|
||||
}
|
||||
savePath := filepath.Join(save, filepath.Base(target))
|
||||
if _, err := os.Stat(target); err == nil {
|
||||
if err := fileutils.Exists(target); err == nil {
|
||||
logrus.Debugf("moving %q to %q", target, savePath)
|
||||
if err := os.Rename(target, savePath); err != nil {
|
||||
return undoBinds, fmt.Errorf("moving %q to %q: %w", target, savePath, err)
|
||||
|
4
vendor/github.com/containers/buildah/common.go
generated
vendored
4
vendor/github.com/containers/buildah/common.go
generated
vendored
@ -3,7 +3,6 @@ package buildah
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
@ -15,6 +14,7 @@ import (
|
||||
"github.com/containers/image/v5/types"
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
)
|
||||
|
||||
@ -59,7 +59,7 @@ func getSystemContext(store storage.Store, defaults *types.SystemContext, signat
|
||||
if store != nil {
|
||||
if sc.SystemRegistriesConfPath == "" && unshare.IsRootless() {
|
||||
userRegistriesFile := filepath.Join(store.GraphRoot(), "registries.conf")
|
||||
if _, err := os.Stat(userRegistriesFile); err == nil {
|
||||
if err := fileutils.Exists(userRegistriesFile); err == nil {
|
||||
sc.SystemRegistriesConfPath = userRegistriesFile
|
||||
}
|
||||
}
|
||||
|
36
vendor/github.com/containers/buildah/define/build.go
generated
vendored
36
vendor/github.com/containers/buildah/define/build.go
generated
vendored
@ -59,7 +59,7 @@ type CommonBuildOptions struct {
|
||||
DNSServers []string
|
||||
// DNSOptions is the list of DNS
|
||||
DNSOptions []string
|
||||
// LabelOpts is the a slice of fields of an SELinux context, given in "field:pair" format, or "disable".
|
||||
// LabelOpts is a slice of the fields of an SELinux context, given in "field:pair" format, or "disable".
|
||||
// Recognized field names are "role", "type", and "level".
|
||||
LabelOpts []string
|
||||
// MemorySwap limits the amount of memory and swap together.
|
||||
@ -140,7 +140,8 @@ type BuildOptions struct {
|
||||
Runtime string
|
||||
// RuntimeArgs adds global arguments for the runtime.
|
||||
RuntimeArgs []string
|
||||
// TransientMounts is a list of mounts that won't be kept in the image.
|
||||
// TransientMounts is a list of unparsed mounts that will be provided to
|
||||
// RUN instructions.
|
||||
TransientMounts []string
|
||||
// CacheFrom specifies any remote repository which can be treated as
|
||||
// potential cache source.
|
||||
@ -242,22 +243,24 @@ type BuildOptions struct {
|
||||
CommonBuildOpts *CommonBuildOptions
|
||||
// CPPFlags are additional arguments to pass to the C Preprocessor (cpp).
|
||||
CPPFlags []string
|
||||
// DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format
|
||||
// DefaultMountsFilePath is the file path holding the mounts to be mounted for RUN
|
||||
// instructions in "host-path:container-path" format
|
||||
DefaultMountsFilePath string
|
||||
// IIDFile tells the builder to write the image ID to the specified file
|
||||
IIDFile string
|
||||
// Squash tells the builder to produce an image with a single layer
|
||||
// instead of with possibly more than one layer.
|
||||
// Squash tells the builder to produce an image with a single layer instead of with
|
||||
// possibly more than one layer, by only committing a new layer after processing the
|
||||
// final instruction.
|
||||
Squash bool
|
||||
// Labels metadata for an image
|
||||
// Labels to set in a committed image.
|
||||
Labels []string
|
||||
// LayerLabels metadata for an intermediate image
|
||||
LayerLabels []string
|
||||
// Annotation metadata for an image
|
||||
// Annotations to set in a committed image, in OCI format.
|
||||
Annotations []string
|
||||
// OnBuild commands to be run by images based on this image
|
||||
// OnBuild commands to be run by builds that use the image we'll commit as a base image.
|
||||
OnBuild []string
|
||||
// Layers tells the builder to create a cache of images for each step in the Dockerfile
|
||||
// Layers tells the builder to commit an image for each step in the Dockerfile.
|
||||
Layers bool
|
||||
// NoCache tells the builder to build the image from scratch without checking for a cache.
|
||||
// It creates a new set of cached images for the build.
|
||||
@ -272,7 +275,7 @@ type BuildOptions struct {
|
||||
BlobDirectory string
|
||||
// Target the targeted FROM in the Dockerfile to build.
|
||||
Target string
|
||||
// Devices are the additional devices to add to the containers.
|
||||
// Devices are unparsed devices to provide to RUN instructions.
|
||||
Devices []string
|
||||
// SignBy is the fingerprint of a GPG key to use for signing images.
|
||||
SignBy string
|
||||
@ -298,18 +301,18 @@ type BuildOptions struct {
|
||||
JobSemaphore *semaphore.Weighted
|
||||
// LogRusage logs resource usage for each step.
|
||||
LogRusage bool
|
||||
// File to which the Rusage logs will be saved to instead of stdout
|
||||
// File to which the Rusage logs will be saved to instead of stdout.
|
||||
RusageLogFile string
|
||||
// Excludes is a list of excludes to be used instead of the .dockerignore file.
|
||||
Excludes []string
|
||||
// IgnoreFile is a name of the .containerignore file
|
||||
IgnoreFile string
|
||||
// From is the image name to use to replace the value specified in the first
|
||||
// FROM instruction in the Containerfile
|
||||
// FROM instruction in the Containerfile.
|
||||
From string
|
||||
// GroupAdd is a list of groups to add to the primary process within
|
||||
// the container. 'keep-groups' allows container processes to use
|
||||
// supplementary groups.
|
||||
// GroupAdd is a list of groups to add to the primary process when handling RUN
|
||||
// instructions. The magic 'keep-groups' value indicates that the process should
|
||||
// be allowed to inherit the current set of supplementary groups.
|
||||
GroupAdd []string
|
||||
// Platforms is the list of parsed OS/Arch/Variant triples that we want
|
||||
// to build the image for. If this slice has items in it, the OS and
|
||||
@ -336,4 +339,7 @@ type BuildOptions struct {
|
||||
// SBOMScanOptions encapsulates options which control whether or not we
|
||||
// run scanners on the rootfs that we're about to commit, and how.
|
||||
SBOMScanOptions []SBOMScanOptions
|
||||
// CDIConfigDir is the location of CDI configuration files, if the files in
|
||||
// the default configuration locations shouldn't be used.
|
||||
CDIConfigDir string
|
||||
}
|
||||
|
15
vendor/github.com/containers/buildah/imagebuildah/executor.go
generated
vendored
15
vendor/github.com/containers/buildah/imagebuildah/executor.go
generated
vendored
@ -122,6 +122,7 @@ type Executor struct {
|
||||
unusedArgs map[string]struct{}
|
||||
capabilities []string
|
||||
devices define.ContainerDevices
|
||||
deviceSpecs []string
|
||||
signBy string
|
||||
architecture string
|
||||
timestamp *time.Time
|
||||
@ -153,6 +154,7 @@ type Executor struct {
|
||||
envs []string
|
||||
confidentialWorkload define.ConfidentialWorkloadOptions
|
||||
sbomScanOptions []define.SBOMScanOptions
|
||||
cdiConfigDir string
|
||||
}
|
||||
|
||||
type imageTypeAndHistoryAndDiffIDs struct {
|
||||
@ -181,16 +183,8 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
|
||||
return nil, err
|
||||
}
|
||||
|
||||
devices := define.ContainerDevices{}
|
||||
for _, device := range append(defaultContainerConfig.Containers.Devices.Get(), options.Devices...) {
|
||||
dev, err := parse.DeviceFromPath(device)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
devices = append(dev, devices...)
|
||||
}
|
||||
var transientMounts []Mount
|
||||
|
||||
transientMounts := []Mount{}
|
||||
for _, volume := range append(defaultContainerConfig.Volumes(), options.TransientMounts...) {
|
||||
mount, err := parse.Volume(volume)
|
||||
if err != nil {
|
||||
@ -285,7 +279,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
|
||||
blobDirectory: options.BlobDirectory,
|
||||
unusedArgs: make(map[string]struct{}),
|
||||
capabilities: capabilities,
|
||||
devices: devices,
|
||||
deviceSpecs: options.Devices,
|
||||
signBy: options.SignBy,
|
||||
architecture: options.Architecture,
|
||||
timestamp: options.Timestamp,
|
||||
@ -312,6 +306,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
|
||||
envs: append([]string{}, options.Envs...),
|
||||
confidentialWorkload: options.ConfidentialWorkload,
|
||||
sbomScanOptions: options.SBOMScanOptions,
|
||||
cdiConfigDir: options.CDIConfigDir,
|
||||
}
|
||||
if exec.err == nil {
|
||||
exec.err = os.Stderr
|
||||
|
19
vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
generated
vendored
19
vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
generated
vendored
@ -543,6 +543,15 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
|
||||
StripSetuidBit: stripSetuid,
|
||||
StripSetgidBit: stripSetgid,
|
||||
}
|
||||
if len(copy.Files) > 0 {
|
||||
// If we are copying heredoc files, we need to temporary place
|
||||
// them in the context dir and then move to container via copier
|
||||
// there are cases where .containerignore can have a patterns like
|
||||
// '*' which can match our heredoc files so let's not set any excludes
|
||||
// or IgnoreFile for this copy.
|
||||
options.Excludes = nil
|
||||
options.IgnoreFile = ""
|
||||
}
|
||||
if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -751,7 +760,7 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
|
||||
Env: config.Env,
|
||||
Hostname: config.Hostname,
|
||||
Logger: s.executor.logger,
|
||||
Mounts: append([]Mount{}, s.executor.transientMounts...),
|
||||
Mounts: s.executor.transientMounts,
|
||||
NamespaceOptions: namespaceOptions,
|
||||
NoHostname: s.executor.noHostname,
|
||||
NoHosts: s.executor.noHosts,
|
||||
@ -906,6 +915,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
|
||||
Format: s.executor.outputFormat,
|
||||
Capabilities: s.executor.capabilities,
|
||||
Devices: s.executor.devices,
|
||||
DeviceSpecs: s.executor.deviceSpecs,
|
||||
MaxPullRetries: s.executor.maxPullPushRetries,
|
||||
PullRetryDelay: s.executor.retryPullPushDelay,
|
||||
OciDecryptConfig: s.executor.ociDecryptConfig,
|
||||
@ -913,6 +923,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
|
||||
ProcessLabel: s.executor.processLabel,
|
||||
MountLabel: s.executor.mountLabel,
|
||||
PreserveBaseImageAnns: preserveBaseImageAnnotations,
|
||||
CDIConfigDir: s.executor.cdiConfigDir,
|
||||
}
|
||||
|
||||
builder, err = buildah.NewBuilder(ctx, s.executor.store, builderOptions)
|
||||
@ -1400,7 +1411,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
||||
}
|
||||
}
|
||||
|
||||
needsCacheKey := (len(s.executor.cacheFrom) != 0 || len(s.executor.cacheTo) != 0) && !avoidLookingCache
|
||||
needsCacheKey := (len(s.executor.cacheFrom) != 0 && !avoidLookingCache) || len(s.executor.cacheTo) != 0
|
||||
|
||||
// If we have to commit for this instruction, only assign the
|
||||
// stage's configured output name to the last layer.
|
||||
@ -1431,7 +1442,6 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
||||
// and copy the content.
|
||||
canMatchCacheOnlyAfterRun = (step.Command == command.Add || step.Command == command.Copy)
|
||||
if canMatchCacheOnlyAfterRun {
|
||||
s.didExecute = true
|
||||
if err = ib.Run(step, s, noRunsRemaining); err != nil {
|
||||
logrus.Debugf("Error building at step %+v: %v", *step, err)
|
||||
return "", nil, false, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err)
|
||||
@ -1468,6 +1478,9 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
||||
}
|
||||
}
|
||||
}
|
||||
if canMatchCacheOnlyAfterRun && cacheID == "" {
|
||||
s.didExecute = true
|
||||
}
|
||||
}
|
||||
|
||||
// If we didn't find a cache entry, or we need to add content
|
||||
|
4
vendor/github.com/containers/buildah/install.md
generated
vendored
4
vendor/github.com/containers/buildah/install.md
generated
vendored
@ -10,7 +10,7 @@
|
||||
sudo pacman -S buildah
|
||||
```
|
||||
|
||||
#### [CentOS](https://www.centos.org)
|
||||
### [CentOS](https://www.centos.org)
|
||||
|
||||
Buildah is available in the default Extras repos for CentOS 7 and in
|
||||
the AppStream repo for CentOS 8 and Stream, however the available version often
|
||||
@ -20,7 +20,7 @@ lags the upstream release.
|
||||
sudo yum -y install buildah
|
||||
```
|
||||
|
||||
#### [Debian](https://debian.org)
|
||||
### [Debian](https://debian.org)
|
||||
|
||||
The buildah package is available in
|
||||
the [Bookworm](https://packages.debian.org/bookworm/buildah), which
|
||||
|
3
vendor/github.com/containers/buildah/internal/mkcw/attest.go
generated
vendored
3
vendor/github.com/containers/buildah/internal/mkcw/attest.go
generated
vendored
@ -15,6 +15,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/containers/buildah/internal/mkcw/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -224,7 +225,7 @@ func GenerateMeasurement(workloadConfig WorkloadConfig, firmwareLibrary string)
|
||||
}
|
||||
}
|
||||
for _, candidate := range pathsToCheck {
|
||||
if _, err := os.Lstat(candidate); err == nil {
|
||||
if err := fileutils.Lexists(candidate); err == nil {
|
||||
var stdout, stderr bytes.Buffer
|
||||
logrus.Debugf("krunfw_measurement -c %s -m %s %s", cpuString, memoryString, candidate)
|
||||
cmd := exec.Command("krunfw_measurement", "-c", cpuString, "-m", memoryString, candidate)
|
||||
|
4
vendor/github.com/containers/buildah/internal/parse/parse.go
generated
vendored
4
vendor/github.com/containers/buildah/internal/parse/parse.go
generated
vendored
@ -2,11 +2,11 @@ package parse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/parse"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
@ -15,7 +15,7 @@ func ValidateVolumeMountHostDir(hostDir string) error {
|
||||
if !filepath.IsAbs(hostDir) {
|
||||
return fmt.Errorf("invalid host path, must be an absolute path %q", hostDir)
|
||||
}
|
||||
if _, err := os.Stat(hostDir); err != nil {
|
||||
if err := fileutils.Exists(hostDir); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
2
vendor/github.com/containers/buildah/new.go
generated
vendored
2
vendor/github.com/containers/buildah/new.go
generated
vendored
@ -320,8 +320,10 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
|
||||
Format: options.Format,
|
||||
TempVolumes: map[string]bool{},
|
||||
Devices: options.Devices,
|
||||
DeviceSpecs: options.DeviceSpecs,
|
||||
Logger: options.Logger,
|
||||
NetworkInterface: options.NetworkInterface,
|
||||
CDIConfigDir: options.CDIConfigDir,
|
||||
}
|
||||
|
||||
if options.Mount {
|
||||
|
1
vendor/github.com/containers/buildah/pkg/cli/build.go
generated
vendored
1
vendor/github.com/containers/buildah/pkg/cli/build.go
generated
vendored
@ -358,6 +358,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
|
||||
CacheFrom: cacheFrom,
|
||||
CacheTo: cacheTo,
|
||||
CacheTTL: cacheTTL,
|
||||
CDIConfigDir: iopts.CDIConfigDir,
|
||||
CNIConfigDir: iopts.CNIConfigDir,
|
||||
CNIPluginPath: iopts.CNIPlugInPath,
|
||||
ConfidentialWorkload: confidentialWorkloadOptions,
|
||||
|
5
vendor/github.com/containers/buildah/pkg/cli/common.go
generated
vendored
5
vendor/github.com/containers/buildah/pkg/cli/common.go
generated
vendored
@ -128,6 +128,7 @@ type FromAndBudResults struct {
|
||||
BlobCache string
|
||||
CapAdd []string
|
||||
CapDrop []string
|
||||
CDIConfigDir string
|
||||
CgroupParent string
|
||||
CPUPeriod uint64
|
||||
CPUQuota int64
|
||||
@ -377,6 +378,8 @@ func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults,
|
||||
}
|
||||
fs.StringSliceVar(&flags.CapAdd, "cap-add", []string{}, "add the specified capability when running (default [])")
|
||||
fs.StringSliceVar(&flags.CapDrop, "cap-drop", []string{}, "drop the specified capability when running (default [])")
|
||||
fs.StringVar(&flags.CDIConfigDir, "cdi-config-dir", "", "`directory` of CDI configuration files")
|
||||
_ = fs.MarkHidden("cdi-config-dir")
|
||||
fs.StringVar(&flags.CgroupParent, "cgroup-parent", "", "optional parent cgroup for the container")
|
||||
fs.Uint64Var(&flags.CPUPeriod, "cpu-period", 0, "limit the CPU CFS (Completely Fair Scheduler) period")
|
||||
fs.Int64Var(&flags.CPUQuota, "cpu-quota", 0, "limit the CPU CFS (Completely Fair Scheduler) quota")
|
||||
@ -384,7 +387,7 @@ func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults,
|
||||
fs.StringVar(&flags.CPUSetCPUs, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
|
||||
fs.StringVar(&flags.CPUSetMems, "cpuset-mems", "", "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.")
|
||||
fs.StringSliceVar(&flags.DecryptionKeys, "decryption-key", nil, "key needed to decrypt the image")
|
||||
fs.StringArrayVar(&flags.Devices, "device", defaultContainerConfig.Containers.Devices.Get(), "additional devices to be used within containers (default [])")
|
||||
fs.StringArrayVar(&flags.Devices, "device", defaultContainerConfig.Containers.Devices.Get(), "additional devices to provide")
|
||||
fs.StringSliceVar(&flags.DNSSearch, "dns-search", defaultContainerConfig.Containers.DNSSearches.Get(), "set custom DNS search domains")
|
||||
fs.StringSliceVar(&flags.DNSServers, "dns", defaultContainerConfig.Containers.DNSServers.Get(), "set custom DNS servers or disable it completely by setting it to 'none', which prevents the automatic creation of `/etc/resolv.conf`.")
|
||||
fs.StringSliceVar(&flags.DNSOptions, "dns-option", defaultContainerConfig.Containers.DNSOptions.Get(), "set custom DNS options")
|
||||
|
20
vendor/github.com/containers/buildah/pkg/parse/parse.go
generated
vendored
20
vendor/github.com/containers/buildah/pkg/parse/parse.go
generated
vendored
@ -7,6 +7,7 @@ package parse
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -26,6 +27,7 @@ import (
|
||||
"github.com/containers/common/pkg/parse"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
storageTypes "github.com/containers/storage/types"
|
||||
@ -252,14 +254,14 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti
|
||||
}
|
||||
|
||||
if commonOpts.SeccompProfilePath == "" {
|
||||
if _, err := os.Stat(SeccompOverridePath); err == nil {
|
||||
if err := fileutils.Exists(SeccompOverridePath); err == nil {
|
||||
commonOpts.SeccompProfilePath = SeccompOverridePath
|
||||
} else {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
if !errors.Is(err, fs.ErrNotExist) {
|
||||
return err
|
||||
}
|
||||
if _, err := os.Stat(SeccompDefaultPath); err != nil {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
if err := fileutils.Exists(SeccompDefaultPath); err != nil {
|
||||
if !errors.Is(err, fs.ErrNotExist) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
@ -982,7 +984,7 @@ func IDMappingOptionsFromFlagSet(flags *pflag.FlagSet, persistentFlags *pflag.Fl
|
||||
usernsOption.Host = true
|
||||
default:
|
||||
how = strings.TrimPrefix(how, "ns:")
|
||||
if _, err := os.Stat(how); err != nil {
|
||||
if err := fileutils.Exists(how); err != nil {
|
||||
return nil, nil, fmt.Errorf("checking %s namespace: %w", string(specs.UserNamespace), err)
|
||||
}
|
||||
logrus.Debugf("setting %q namespace to %q", string(specs.UserNamespace), how)
|
||||
@ -1077,7 +1079,7 @@ func NamespaceOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name st
|
||||
how = strings.TrimPrefix(how, "ns:")
|
||||
// if not a path we assume it is a comma separated network list, see setupNamespaces() in run_linux.go
|
||||
if filepath.IsAbs(how) || what != string(specs.NetworkNamespace) {
|
||||
if _, err := os.Stat(how); err != nil {
|
||||
if err := fileutils.Exists(how); err != nil {
|
||||
return nil, define.NetworkDefault, fmt.Errorf("checking %s namespace: %w", what, err)
|
||||
}
|
||||
}
|
||||
@ -1243,7 +1245,7 @@ func Secrets(secrets []string) (map[string]define.Secret, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse secrets: %w", err)
|
||||
}
|
||||
_, err = os.Stat(fullPath)
|
||||
err = fileutils.Exists(fullPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse secrets: %w", err)
|
||||
}
|
||||
@ -1293,10 +1295,10 @@ func ContainerIgnoreFile(contextDir, path string, containerFiles []string) ([]st
|
||||
containerfile = filepath.Join(contextDir, containerfile)
|
||||
}
|
||||
containerfileIgnore := ""
|
||||
if _, err := os.Stat(containerfile + ".containerignore"); err == nil {
|
||||
if err := fileutils.Exists(containerfile + ".containerignore"); err == nil {
|
||||
containerfileIgnore = containerfile + ".containerignore"
|
||||
}
|
||||
if _, err := os.Stat(containerfile + ".dockerignore"); err == nil {
|
||||
if err := fileutils.Exists(containerfile + ".dockerignore"); err == nil {
|
||||
containerfileIgnore = containerfile + ".dockerignore"
|
||||
}
|
||||
if containerfileIgnore != "" {
|
||||
|
7
vendor/github.com/containers/buildah/pkg/parse/parse_unix.go
generated
vendored
7
vendor/github.com/containers/buildah/pkg/parse/parse_unix.go
generated
vendored
@ -18,6 +18,13 @@ func DeviceFromPath(device string) (define.ContainerDevices, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if linkTarget, err := os.Readlink(src); err == nil {
|
||||
if filepath.IsAbs(linkTarget) {
|
||||
src = linkTarget
|
||||
} else {
|
||||
src = filepath.Join(filepath.Dir(src), linkTarget)
|
||||
}
|
||||
}
|
||||
srcInfo, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting info of source device %s: %w", src, err)
|
||||
|
14
vendor/github.com/containers/buildah/run.go
generated
vendored
14
vendor/github.com/containers/buildah/run.go
generated
vendored
@ -147,14 +147,15 @@ type RunOptions struct {
|
||||
// after processing the AddCapabilities set. If a capability appears in both
|
||||
// lists, it will be dropped.
|
||||
DropCapabilities []string
|
||||
// Devices are the additional devices to add to the containers
|
||||
// Devices are parsed additional devices to add
|
||||
Devices define.ContainerDevices
|
||||
// Secrets are the available secrets to use in a RUN
|
||||
// DeviceSpecs are unparsed additional devices to add
|
||||
DeviceSpecs []string
|
||||
// Secrets are the available secrets to use
|
||||
Secrets map[string]define.Secret
|
||||
// SSHSources is the available ssh agents to use in a RUN
|
||||
// SSHSources is the available ssh agents to use
|
||||
SSHSources map[string]*sshagent.Source `json:"-"`
|
||||
// RunMounts are mounts for this run. RunMounts for this run
|
||||
// will not show up in subsequent runs.
|
||||
// RunMounts are unparsed mounts to be added for this run
|
||||
RunMounts []string
|
||||
// Map of stages and container mountpoint if any from stage executor
|
||||
StageMountPoints map[string]internal.StageMountDetails
|
||||
@ -166,6 +167,9 @@ type RunOptions struct {
|
||||
SystemContext *types.SystemContext
|
||||
// CgroupManager to use for running OCI containers
|
||||
CgroupManager string
|
||||
// CDIConfigDir is the location of CDI configuration files, if the files in
|
||||
// the default configuration locations shouldn't be used.
|
||||
CDIConfigDir string
|
||||
}
|
||||
|
||||
// RunMountArtifacts are the artifacts created when using a run mount.
|
||||
|
6
vendor/github.com/containers/buildah/run_common.go
generated
vendored
6
vendor/github.com/containers/buildah/run_common.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
@ -40,6 +41,7 @@ import (
|
||||
"github.com/containers/common/pkg/subscriptions"
|
||||
imageTypes "github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
@ -1414,8 +1416,8 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin
|
||||
// If we need to, create the directory that we'll use to hold
|
||||
// the volume contents. If we do need to create it, then we'll
|
||||
// need to populate it, too, so make a note of that.
|
||||
if _, err := os.Stat(volumePath); err != nil {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
if err := fileutils.Exists(volumePath); err != nil {
|
||||
if !errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("setting up built-in volume path at %q for %q", volumePath, volume)
|
||||
|
106
vendor/github.com/containers/buildah/run_linux.go
generated
vendored
106
vendor/github.com/containers/buildah/run_linux.go
generated
vendored
@ -34,6 +34,7 @@ import (
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/common/pkg/hooks"
|
||||
hooksExec "github.com/containers/common/pkg/hooks/exec"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
@ -45,11 +46,9 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/sys/unix"
|
||||
"tags.cncf.io/container-device-interface/pkg/cdi"
|
||||
)
|
||||
|
||||
// ContainerDevices is an alias for a slice of github.com/opencontainers/runc/libcontainer/configs.Device structures.
|
||||
type ContainerDevices define.ContainerDevices
|
||||
|
||||
var (
|
||||
// We dont want to remove destinations with /etc, /dev, /sys,
|
||||
// /proc as rootfs already contains these files and unionfs
|
||||
@ -69,6 +68,81 @@ func setChildProcess() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Builder) cdiSetupDevicesInSpec(deviceSpecs []string, configDir string, spec *specs.Spec) ([]string, error) {
|
||||
leftoverDevices := deviceSpecs
|
||||
registry, err := cdi.NewCache()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating CDI registry: %w", err)
|
||||
}
|
||||
var configDirs []string
|
||||
if b.CDIConfigDir != "" {
|
||||
configDirs = append(configDirs, b.CDIConfigDir)
|
||||
}
|
||||
if configDir != "" {
|
||||
configDirs = append(configDirs, configDir)
|
||||
}
|
||||
// TODO: CdiSpecDirs will be in containers/common v0.59.0 or later?
|
||||
// defConfig, err := config.Default()
|
||||
// if err != nil {
|
||||
// return nil, fmt.Errorf("failed to get container config: %w", err)
|
||||
// }
|
||||
// configDirs = append(configDirs, defConfig.Engine.CdiSpecDirs.Get()...)
|
||||
if len(configDirs) > 0 {
|
||||
if err := registry.Configure(cdi.WithSpecDirs(configDirs...)); err != nil {
|
||||
return nil, fmt.Errorf("CDI registry ignored configured directories %v: %w", configDirs, err)
|
||||
}
|
||||
}
|
||||
if err := registry.Refresh(); err != nil {
|
||||
logrus.Warnf("CDI registry refresh: %v", err)
|
||||
} else {
|
||||
leftoverDevices, err = registry.InjectDevices(spec, deviceSpecs...)
|
||||
if err != nil {
|
||||
logrus.Debugf("CDI device injection: %v, unresolved list %v", err, leftoverDevices)
|
||||
}
|
||||
}
|
||||
removed := slices.DeleteFunc(slices.Clone(deviceSpecs), func(t string) bool { return slices.Contains(leftoverDevices, t) })
|
||||
logrus.Debugf("CDI taking care of devices %v, leaving devices %v", removed, leftoverDevices)
|
||||
return leftoverDevices, nil
|
||||
}
|
||||
|
||||
// Extract the device list so that we can still try to make it work if
|
||||
// we're running rootless and can't just mknod() the device nodes.
|
||||
func separateDevicesFromRuntimeSpec(g *generate.Generator) define.ContainerDevices {
|
||||
var result define.ContainerDevices
|
||||
if g.Config != nil && g.Config.Linux != nil {
|
||||
for _, device := range g.Config.Linux.Devices {
|
||||
var bDevice define.BuildahDevice
|
||||
bDevice.Path = device.Path
|
||||
switch device.Type {
|
||||
case "b":
|
||||
bDevice.Type = 'b'
|
||||
case "c":
|
||||
bDevice.Type = 'c'
|
||||
case "u":
|
||||
bDevice.Type = 'u'
|
||||
case "p":
|
||||
bDevice.Type = 'p'
|
||||
}
|
||||
bDevice.Major = device.Major
|
||||
bDevice.Minor = device.Minor
|
||||
if device.FileMode != nil {
|
||||
bDevice.FileMode = *device.FileMode
|
||||
}
|
||||
if device.UID != nil {
|
||||
bDevice.Uid = *device.UID
|
||||
}
|
||||
if device.GID != nil {
|
||||
bDevice.Gid = *device.GID
|
||||
}
|
||||
bDevice.Source = device.Path
|
||||
bDevice.Destination = device.Path
|
||||
result = append(result, bDevice)
|
||||
}
|
||||
}
|
||||
g.ClearLinuxDevices()
|
||||
return result
|
||||
}
|
||||
|
||||
// Run runs the specified command in the container's root filesystem.
|
||||
func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
p, err := os.MkdirTemp(tmpdir.GetTempDir(), define.Package)
|
||||
@ -147,8 +221,24 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
g.SetProcessArgs(nil)
|
||||
}
|
||||
|
||||
// Mount devices if any and if session is rootless attempt a bind-mount
|
||||
// just like podman.
|
||||
// Combine the working container's set of devices with the ones for just this run.
|
||||
deviceSpecs := append(append([]string{}, options.DeviceSpecs...), b.DeviceSpecs...)
|
||||
deviceSpecs, err = b.cdiSetupDevicesInSpec(deviceSpecs, options.CDIConfigDir, g.Config) // makes changes to more than just the device list
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
devices := separateDevicesFromRuntimeSpec(g)
|
||||
for _, deviceSpec := range deviceSpecs {
|
||||
device, err := parse.DeviceFromPath(deviceSpec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up device %q: %w", deviceSpec, err)
|
||||
}
|
||||
devices = append(devices, device...)
|
||||
}
|
||||
devices = append(append(devices, options.Devices...), b.Devices...)
|
||||
|
||||
// Mount devices, if any, and if we're rootless attempt to work around not
|
||||
// being able to create device nodes by bind-mounting them from the host, like podman does.
|
||||
if unshare.IsRootless() {
|
||||
// We are going to create bind mounts for devices
|
||||
// but we need to make sure that we don't override
|
||||
@ -158,7 +248,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
mounts[m.Destination] = true
|
||||
}
|
||||
newMounts := []specs.Mount{}
|
||||
for _, d := range b.Devices {
|
||||
for _, d := range devices {
|
||||
// Default permission is read-only.
|
||||
perm := "ro"
|
||||
// Get permission configured for this device but only process `write`
|
||||
@ -184,7 +274,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
}
|
||||
g.Config.Mounts = append(newMounts, g.Config.Mounts...)
|
||||
} else {
|
||||
for _, d := range b.Devices {
|
||||
for _, d := range devices {
|
||||
sDev := specs.LinuxDevice{
|
||||
Type: string(d.Type),
|
||||
Path: d.Path,
|
||||
@ -1242,7 +1332,7 @@ func setupSpecialMountSpecChanges(spec *specs.Spec, shmSize string) ([]specs.Mou
|
||||
// if userns and host ipc bind mount shm
|
||||
if isUserns && !isIpcns {
|
||||
// bind mount /dev/shm when it exists
|
||||
if _, err := os.Stat("/dev/shm"); err == nil {
|
||||
if err := fileutils.Exists("/dev/shm"); err == nil {
|
||||
shmMount := specs.Mount{
|
||||
Source: "/dev/shm",
|
||||
Type: "bind",
|
||||
|
4
vendor/github.com/containers/common/libimage/load.go
generated
vendored
4
vendor/github.com/containers/common/libimage/load.go
generated
vendored
@ -6,7 +6,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
dirTransport "github.com/containers/image/v5/directory"
|
||||
@ -15,6 +14,7 @@ import (
|
||||
ociTransport "github.com/containers/image/v5/oci/layout"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -141,7 +141,7 @@ func (r *Runtime) loadMultiImageDockerArchive(ctx context.Context, ref types.Ima
|
||||
// syntax to reference an image within the archive was used, so we
|
||||
// should.
|
||||
path := ref.StringWithinTransport()
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if err := fileutils.Exists(path); err != nil {
|
||||
return r.copyFromDockerArchive(ctx, ref, options)
|
||||
}
|
||||
|
||||
|
3
vendor/github.com/containers/common/libimage/manifests/manifests.go
generated
vendored
3
vendor/github.com/containers/common/libimage/manifests/manifests.go
generated
vendored
@ -32,6 +32,7 @@ import (
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
@ -330,7 +331,7 @@ func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, in
|
||||
return nil, fmt.Errorf(`internal error: no file or blob with artifact "config" or "layer" digest %q recorded`, referencedBlobDigest)
|
||||
}
|
||||
expectedLayerBlobPath := filepath.Join(blobsDir, referencedBlobDigest.Encoded())
|
||||
if _, err := os.Lstat(expectedLayerBlobPath); err == nil {
|
||||
if err := fileutils.Lexists(expectedLayerBlobPath); err == nil {
|
||||
// did this one already
|
||||
continue
|
||||
} else if knownFile {
|
||||
|
4
vendor/github.com/containers/common/libnetwork/cni/cni_types.go
generated
vendored
4
vendor/github.com/containers/common/libnetwork/cni/cni_types.go
generated
vendored
@ -4,10 +4,10 @@ package cni
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -250,7 +250,7 @@ func newDNSNamePlugin(domainName string) dnsNameConfig {
|
||||
// hasDNSNamePlugin looks to see if the dnsname cni plugin is present
|
||||
func hasDNSNamePlugin(paths []string) bool {
|
||||
for _, p := range paths {
|
||||
if _, err := os.Stat(filepath.Join(p, "dnsname")); err == nil {
|
||||
if err := fileutils.Exists(filepath.Join(p, "dnsname")); err == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
3
vendor/github.com/containers/common/libnetwork/cni/network.go
generated
vendored
3
vendor/github.com/containers/common/libnetwork/cni/network.go
generated
vendored
@ -19,6 +19,7 @@ import (
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/common/pkg/version"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -331,7 +332,7 @@ func (n *cniNetwork) NetworkInfo() types.NetworkInfo {
|
||||
if err != nil {
|
||||
logrus.Infof("Failed to get the dnsname plugin version: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(dnsPath); err == nil {
|
||||
if err := fileutils.Exists(dnsPath); err == nil {
|
||||
info.DNS = types.DNSNetworkInfo{
|
||||
Path: dnsPath,
|
||||
Package: dnsPackage,
|
||||
|
7
vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_linux.go
generated
vendored
7
vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_linux.go
generated
vendored
@ -16,6 +16,7 @@ import (
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/common/pkg/netns"
|
||||
"github.com/containers/common/pkg/systemd"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
@ -154,7 +155,7 @@ func (n *Netns) getOrCreateNetns() (ns.NetNS, bool, error) {
|
||||
}
|
||||
|
||||
func (n *Netns) cleanup() error {
|
||||
if _, err := os.Stat(n.dir); err != nil {
|
||||
if err := fileutils.Exists(n.dir); err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// dir does not exists no need for cleanup
|
||||
return nil
|
||||
@ -337,7 +338,7 @@ func (n *Netns) setupMounts() error {
|
||||
// 2. Also keep /run/systemd if it exists.
|
||||
// Many files are symlinked into this dir, for example /dev/log.
|
||||
runSystemd := "/run/systemd"
|
||||
_, err = os.Stat(runSystemd)
|
||||
err = fileutils.Exists(runSystemd)
|
||||
if err == nil {
|
||||
newRunSystemd := n.getPath(runSystemd)
|
||||
err = mountAndMkdirDest(runSystemd, newRunSystemd, none, unix.MS_BIND|unix.MS_REC)
|
||||
@ -476,7 +477,7 @@ func (n *Netns) mountCNIVarDir() error {
|
||||
// while we could always use /var there are cases where a user might store the cni
|
||||
// configs under /var/custom and this would break
|
||||
for {
|
||||
if _, err := os.Stat(varTarget); err == nil {
|
||||
if err := fileutils.Exists(varTarget); err == nil {
|
||||
varDir = n.getPath(varTarget)
|
||||
break
|
||||
}
|
||||
|
5
vendor/github.com/containers/common/libnetwork/netavark/config.go
generated
vendored
5
vendor/github.com/containers/common/libnetwork/netavark/config.go
generated
vendored
@ -376,6 +376,11 @@ func (n *netavarkNetwork) NetworkRemove(nameOrID string) error {
|
||||
return fmt.Errorf("default network %s cannot be removed", n.defaultNetwork)
|
||||
}
|
||||
|
||||
// remove the ipam bucket for this network
|
||||
if err := n.removeNetworkIPAMBucket(network); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
file := filepath.Join(n.networkConfigDir, network.Name+".json")
|
||||
// make sure to not error for ErrNotExist
|
||||
if err := os.Remove(file); err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
|
21
vendor/github.com/containers/common/libnetwork/netavark/ipam.go
generated
vendored
21
vendor/github.com/containers/common/libnetwork/netavark/ipam.go
generated
vendored
@ -4,6 +4,7 @@ package netavark
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
@ -357,6 +358,26 @@ func (n *netavarkNetwork) deallocIPs(opts *types.NetworkOptions) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (n *netavarkNetwork) removeNetworkIPAMBucket(network *types.Network) error {
|
||||
if !requiresIPAMAlloc(network) {
|
||||
return nil
|
||||
}
|
||||
db, err := n.openDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
return db.Update(func(tx *bbolt.Tx) error {
|
||||
// Ignore ErrBucketNotFound, can happen if the network never allocated any ips,
|
||||
// i.e. because no container was started.
|
||||
if err := tx.DeleteBucket([]byte(network.Name)); err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// requiresIPAMAlloc return true when we have to allocate ips for this network
|
||||
// it checks the ipam driver and if subnets are set
|
||||
func requiresIPAMAlloc(network *types.Network) bool {
|
||||
|
3
vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go
generated
vendored
3
vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
@ -61,7 +62,7 @@ func getDefaultResolvConf(params *Params) ([]byte, bool, error) {
|
||||
if ns.Path != "" && !strings.HasPrefix(ns.Path, "/proc/") {
|
||||
// check for netns created by "ip netns"
|
||||
path := filepath.Join("/etc/netns", filepath.Base(ns.Path), "resolv.conf")
|
||||
_, err := os.Stat(path)
|
||||
err := fileutils.Exists(path)
|
||||
if err == nil {
|
||||
resolveConf = path
|
||||
}
|
||||
|
3
vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go
generated
vendored
3
vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go
generated
vendored
@ -16,6 +16,7 @@ import (
|
||||
"text/template"
|
||||
|
||||
"github.com/containers/common/pkg/apparmor/internal/supported"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
runcaa "github.com/opencontainers/runc/libcontainer/apparmor"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -75,7 +76,7 @@ func (p *profileData) generateDefault(apparmorParserPath string, out io.Writer)
|
||||
|
||||
// macrosExists checks if the passed macro exists.
|
||||
func macroExists(m string) bool {
|
||||
_, err := os.Stat(path.Join(profileDirectory, m))
|
||||
err := fileutils.Exists(path.Join(profileDirectory, m))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
|
3
vendor/github.com/containers/common/pkg/auth/auth.go
generated
vendored
3
vendor/github.com/containers/common/pkg/auth/auth.go
generated
vendored
@ -16,6 +16,7 @@ import (
|
||||
"github.com/containers/image/v5/pkg/docker/config"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -69,7 +70,7 @@ func CheckAuthFile(pathOption string) error {
|
||||
if pathOption == "" {
|
||||
return nil
|
||||
}
|
||||
if _, err := os.Stat(pathOption); err != nil {
|
||||
if err := fileutils.Exists(pathOption); err != nil {
|
||||
return fmt.Errorf("credential file is not accessible: %w", err)
|
||||
}
|
||||
return nil
|
||||
|
5
vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go
generated
vendored
5
vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go
generated
vendored
@ -17,6 +17,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
|
||||
"github.com/godbus/dbus/v5"
|
||||
@ -367,7 +368,7 @@ func Load(path string) (*CgroupControl, error) {
|
||||
// check that the cgroup exists at least under one controller
|
||||
for name := range handlers {
|
||||
p := control.getCgroupv1Path(name)
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
if err := fileutils.Exists(p); err == nil {
|
||||
oneExists = true
|
||||
break
|
||||
}
|
||||
@ -575,7 +576,7 @@ func readCgroup2MapFile(ctr *CgroupControl, name string) (map[string][]string, e
|
||||
|
||||
func (c *CgroupControl) createCgroupDirectory(controller string) (bool, error) {
|
||||
cPath := c.getCgroupv1Path(controller)
|
||||
_, err := os.Stat(cPath)
|
||||
err := fileutils.Exists(cPath)
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
3
vendor/github.com/containers/common/pkg/cgroups/utils_linux.go
generated
vendored
3
vendor/github.com/containers/common/pkg/cgroups/utils_linux.go
generated
vendored
@ -13,6 +13,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -235,7 +236,7 @@ func MoveUnderCgroup(cgroup, subtree string, processes []uint32) error {
|
||||
cgroupRoot = filepath.Join(cgroupRoot, "unified")
|
||||
|
||||
// Ignore the unified mount if it doesn't exist
|
||||
if _, err := os.Stat(cgroupRoot); err != nil && os.IsNotExist(err) {
|
||||
if err := fileutils.Exists(cgroupRoot); err != nil && os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
} else if parts[1] != "" {
|
||||
|
7
vendor/github.com/containers/common/pkg/config/config.go
generated
vendored
7
vendor/github.com/containers/common/pkg/config/config.go
generated
vendored
@ -12,6 +12,7 @@ import (
|
||||
"github.com/containers/common/internal/attributedstring"
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/common/pkg/capabilities"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
units "github.com/docker/go-units"
|
||||
selinux "github.com/opencontainers/selinux/go-selinux"
|
||||
@ -715,7 +716,7 @@ func (c *Config) CheckCgroupsAndAdjustConfig() {
|
||||
if hasSession {
|
||||
for _, part := range strings.Split(session, ",") {
|
||||
if strings.HasPrefix(part, "unix:path=") {
|
||||
_, err := os.Stat(strings.TrimPrefix(part, "unix:path="))
|
||||
err := fileutils.Exists(strings.TrimPrefix(part, "unix:path="))
|
||||
hasSession = err == nil
|
||||
break
|
||||
}
|
||||
@ -780,7 +781,7 @@ func (c *EngineConfig) findRuntime() string {
|
||||
// Search for crun first followed by runc, runj, kata, runsc, ocijail
|
||||
for _, name := range []string{"crun", "runc", "runj", "kata", "runsc", "ocijail"} {
|
||||
for _, v := range c.OCIRuntimes[name] {
|
||||
if _, err := os.Stat(v); err == nil {
|
||||
if err := fileutils.Exists(v); err == nil {
|
||||
return name
|
||||
}
|
||||
}
|
||||
@ -1189,7 +1190,7 @@ func (c *Config) FindInitBinary() (string, error) {
|
||||
return c.Engine.InitPath, nil
|
||||
}
|
||||
// keep old default working to guarantee backwards compat
|
||||
if _, err := os.Stat(DefaultInitPath); err == nil {
|
||||
if err := fileutils.Exists(DefaultInitPath); err == nil {
|
||||
return DefaultInitPath, nil
|
||||
}
|
||||
return c.FindHelperBinary(defaultInitName, true)
|
||||
|
3
vendor/github.com/containers/common/pkg/config/config_local.go
generated
vendored
3
vendor/github.com/containers/common/pkg/config/config_local.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
units "github.com/docker/go-units"
|
||||
"tags.cncf.io/container-device-interface/pkg/parser"
|
||||
)
|
||||
@ -83,7 +84,7 @@ func (c *ContainersConfig) validateTZ() error {
|
||||
|
||||
for _, paths := range lookupPaths {
|
||||
zonePath := filepath.Join(paths, c.TZ)
|
||||
if _, err := os.Stat(zonePath); err == nil {
|
||||
if err := fileutils.Exists(zonePath); err == nil {
|
||||
// found zone information
|
||||
return nil
|
||||
}
|
||||
|
5
vendor/github.com/containers/common/pkg/config/default.go
generated
vendored
5
vendor/github.com/containers/common/pkg/config/default.go
generated
vendored
@ -13,6 +13,7 @@ import (
|
||||
nettypes "github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/common/pkg/apparmor"
|
||||
"github.com/containers/common/pkg/cgroupv2"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/containers/storage/types"
|
||||
@ -206,8 +207,8 @@ func defaultConfig() (*Config, error) {
|
||||
}
|
||||
sigPath := filepath.Join(configHome, DefaultRootlessSignaturePolicyPath)
|
||||
defaultEngineConfig.SignaturePolicyPath = sigPath
|
||||
if _, err := os.Stat(sigPath); err != nil {
|
||||
if _, err := os.Stat(DefaultSignaturePolicyPath); err == nil {
|
||||
if err := fileutils.Exists(sigPath); err != nil {
|
||||
if err := fileutils.Exists(DefaultSignaturePolicyPath); err == nil {
|
||||
defaultEngineConfig.SignaturePolicyPath = DefaultSignaturePolicyPath
|
||||
}
|
||||
}
|
||||
|
6
vendor/github.com/containers/common/pkg/config/modules.go
generated
vendored
6
vendor/github.com/containers/common/pkg/config/modules.go
generated
vendored
@ -2,9 +2,9 @@ package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
@ -76,7 +76,7 @@ func ModuleDirectories() ([]string, error) { // Public API for shell completions
|
||||
// Resolve the specified path to a module.
|
||||
func resolveModule(path string, dirs []string) (string, error) {
|
||||
if filepath.IsAbs(path) {
|
||||
_, err := os.Stat(path)
|
||||
err := fileutils.Exists(path)
|
||||
return path, err
|
||||
}
|
||||
|
||||
@ -85,7 +85,7 @@ func resolveModule(path string, dirs []string) (string, error) {
|
||||
var multiErr error
|
||||
for _, d := range dirs {
|
||||
candidate := filepath.Join(d, path)
|
||||
_, err := os.Stat(candidate)
|
||||
err := fileutils.Exists(candidate)
|
||||
if err == nil {
|
||||
return candidate, nil
|
||||
}
|
||||
|
5
vendor/github.com/containers/common/pkg/config/new.go
generated
vendored
5
vendor/github.com/containers/common/pkg/config/new.go
generated
vendored
@ -11,6 +11,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -101,7 +102,7 @@ func newLocked(options *Options) (*Config, error) {
|
||||
// The _OVERRIDE variable _must_ always win. That's a contract we need
|
||||
// to honor (for the Podman CI).
|
||||
if path := os.Getenv(containersConfOverrideEnv); path != "" {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if err := fileutils.Exists(path); err != nil {
|
||||
return nil, fmt.Errorf("%s file: %w", containersConfOverrideEnv, err)
|
||||
}
|
||||
options.additionalConfigs = append(options.additionalConfigs, path)
|
||||
@ -152,7 +153,7 @@ func NewConfig(userConfigPath string) (*Config, error) {
|
||||
// file settings.
|
||||
func systemConfigs() (configs []string, finalErr error) {
|
||||
if path := os.Getenv(containersConfEnv); path != "" {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if err := fileutils.Exists(path); err != nil {
|
||||
return nil, fmt.Errorf("%s file: %w", containersConfEnv, err)
|
||||
}
|
||||
return append(configs, path), nil
|
||||
|
4
vendor/github.com/containers/common/pkg/hooks/1.0.0/hook.go
generated
vendored
4
vendor/github.com/containers/common/pkg/hooks/1.0.0/hook.go
generated
vendored
@ -5,9 +5,9 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
@ -44,7 +44,7 @@ func (hook *Hook) Validate(extensionStages []string) (err error) {
|
||||
return errors.New("missing required property: hook.path")
|
||||
}
|
||||
|
||||
if _, err := os.Stat(hook.Hook.Path); err != nil {
|
||||
if err := fileutils.Exists(hook.Hook.Path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
5
vendor/github.com/containers/common/pkg/parse/parse.go
generated
vendored
5
vendor/github.com/containers/common/pkg/parse/parse.go
generated
vendored
@ -6,10 +6,11 @@ package parse
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
)
|
||||
|
||||
// ValidateVolumeOpts validates a volume's options
|
||||
@ -175,7 +176,7 @@ func ValidateVolumeHostDir(hostDir string) error {
|
||||
return errors.New("host directory cannot be empty")
|
||||
}
|
||||
if filepath.IsAbs(hostDir) {
|
||||
if _, err := os.Stat(hostDir); err != nil {
|
||||
if err := fileutils.Exists(hostDir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
3
vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go
generated
vendored
3
vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
@ -128,7 +129,7 @@ func (d *Driver) Delete(id string) error {
|
||||
// getAllData reads the data file and returns all data
|
||||
func (d *Driver) getAllData() (map[string][]byte, error) {
|
||||
// check if the db file exists
|
||||
_, err := os.Stat(d.secretsDataFilePath)
|
||||
err := fileutils.Exists(d.secretsDataFilePath)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// the file will be created later on a store()
|
||||
|
4
vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go
generated
vendored
4
vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go
generated
vendored
@ -11,6 +11,8 @@ import (
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -74,7 +76,7 @@ func defaultDriverConfig() *driverConfig {
|
||||
func (cfg *driverConfig) findGpgID() {
|
||||
path := cfg.Root
|
||||
for len(path) > 1 {
|
||||
if _, err := os.Stat(filepath.Join(path, ".gpg-id")); err == nil {
|
||||
if err := fileutils.Exists(filepath.Join(path, ".gpg-id")); err == nil {
|
||||
bs, err := os.ReadFile(filepath.Join(path, ".gpg-id"))
|
||||
if err != nil {
|
||||
continue
|
||||
|
3
vendor/github.com/containers/common/pkg/ssh/connection_golang.go
generated
vendored
3
vendor/github.com/containers/common/pkg/ssh/connection_golang.go
generated
vendored
@ -17,6 +17,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -312,7 +313,7 @@ func ValidateAndConfigure(uri *url.URL, iden string, insecureIsMachineConnection
|
||||
return err
|
||||
}
|
||||
keyDir := path.Dir(keyFilePath)
|
||||
if _, err := os.Stat(keyDir); errors.Is(err, os.ErrNotExist) {
|
||||
if err := fileutils.Exists(keyDir); errors.Is(err, os.ErrNotExist) {
|
||||
if err := os.Mkdir(keyDir, 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
13
vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go
generated
vendored
13
vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/umask"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
@ -182,7 +183,7 @@ func MountsWithUIDGID(mountLabel, containerRunDir, mountFile, mountPoint string,
|
||||
mountFiles = append(mountFiles, mountFile)
|
||||
}
|
||||
for _, file := range mountFiles {
|
||||
if _, err := os.Stat(file); err == nil {
|
||||
if err := fileutils.Exists(file); err == nil {
|
||||
mounts, err := addSubscriptionsFromMountsFile(file, mountLabel, containerRunDir, uid, gid)
|
||||
if err != nil {
|
||||
logrus.Warnf("Failed to mount subscriptions, skipping entry in %s: %v", file, err)
|
||||
@ -197,7 +198,7 @@ func MountsWithUIDGID(mountLabel, containerRunDir, mountFile, mountPoint string,
|
||||
return subscriptionMounts
|
||||
}
|
||||
// Add FIPS mode subscription if /etc/system-fips exists on the host
|
||||
_, err := os.Stat("/etc/system-fips")
|
||||
err := fileutils.Exists("/etc/system-fips")
|
||||
switch {
|
||||
case err == nil:
|
||||
if err := addFIPSModeSubscription(&subscriptionMounts, containerRunDir, mountPoint, mountLabel, uid, gid); err != nil {
|
||||
@ -240,7 +241,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string
|
||||
ctrDirOrFileOnHost := filepath.Join(containerRunDir, ctrDirOrFile)
|
||||
|
||||
// In the event of a restart, don't want to copy subscriptions over again as they already would exist in ctrDirOrFileOnHost
|
||||
_, err = os.Stat(ctrDirOrFileOnHost)
|
||||
err = fileutils.Exists(ctrDirOrFileOnHost)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
hostDirOrFile, err = resolveSymbolicLink(hostDirOrFile)
|
||||
if err != nil {
|
||||
@ -315,7 +316,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string
|
||||
func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint, mountLabel string, uid, gid int) error {
|
||||
subscriptionsDir := "/run/secrets"
|
||||
ctrDirOnHost := filepath.Join(containerRunDir, subscriptionsDir)
|
||||
if _, err := os.Stat(ctrDirOnHost); errors.Is(err, os.ErrNotExist) {
|
||||
if err := fileutils.Exists(ctrDirOnHost); errors.Is(err, os.ErrNotExist) {
|
||||
if err = idtools.MkdirAllAs(ctrDirOnHost, 0o755, uid, gid); err != nil { //nolint
|
||||
return err
|
||||
}
|
||||
@ -325,7 +326,7 @@ func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint,
|
||||
}
|
||||
fipsFile := filepath.Join(ctrDirOnHost, "system-fips")
|
||||
// In the event of restart, it is possible for the FIPS mode file to already exist
|
||||
if _, err := os.Stat(fipsFile); errors.Is(err, os.ErrNotExist) {
|
||||
if err := fileutils.Exists(fipsFile); errors.Is(err, os.ErrNotExist) {
|
||||
file, err := os.Create(fipsFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating system-fips file in container for FIPS mode: %w", err)
|
||||
@ -346,7 +347,7 @@ func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint,
|
||||
srcBackendDir := "/usr/share/crypto-policies/back-ends/FIPS"
|
||||
destDir := "/etc/crypto-policies/back-ends"
|
||||
srcOnHost := filepath.Join(mountPoint, srcBackendDir)
|
||||
if _, err := os.Stat(srcOnHost); err != nil {
|
||||
if err := fileutils.Exists(srcOnHost); err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
|
5
vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go
generated
vendored
5
vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go
generated
vendored
@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/cgroupv2"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
@ -51,7 +52,7 @@ func New(quiet bool) *SysInfo {
|
||||
sysInfo.BridgeNFCallIP6TablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables")
|
||||
|
||||
// Check if AppArmor is supported.
|
||||
if _, err := os.Stat("/sys/kernel/security/apparmor"); !errors.Is(err, os.ErrNotExist) {
|
||||
if err := fileutils.Exists("/sys/kernel/security/apparmor"); !errors.Is(err, os.ErrNotExist) {
|
||||
sysInfo.AppArmor = true
|
||||
}
|
||||
|
||||
@ -249,7 +250,7 @@ func checkCgroupPids(cgMounts map[string]string, quiet bool) cgroupPids {
|
||||
}
|
||||
|
||||
func cgroupEnabled(mountPoint, name string) bool {
|
||||
_, err := os.Stat(path.Join(mountPoint, name))
|
||||
err := fileutils.Exists(path.Join(mountPoint, name))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
|
3
vendor/github.com/containers/common/pkg/timezone/timezone.go
generated
vendored
3
vendor/github.com/containers/common/pkg/timezone/timezone.go
generated
vendored
@ -10,6 +10,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
securejoin "github.com/cyphar/filepath-securejoin"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
@ -54,7 +55,7 @@ func ConfigureContainerTimeZone(timezone, containerRunDir, mountPoint, etcPath,
|
||||
}
|
||||
|
||||
var localtimePath string
|
||||
if _, err := os.Stat(hostPath); err != nil {
|
||||
if err := fileutils.Exists(hostPath); err != nil {
|
||||
// File does not exist, which means tzdata is not installed in the container.
|
||||
// Create /etc/localtime as a copy from the host.
|
||||
logrus.Debugf("Timezone %s does not exist in the container, create our own copy from the host", timezonePath)
|
||||
|
4
vendor/github.com/containers/common/pkg/umask/umask.go
generated
vendored
4
vendor/github.com/containers/common/pkg/umask/umask.go
generated
vendored
@ -4,6 +4,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
)
|
||||
|
||||
// MkdirAllIgnoreUmask creates a directory by ignoring the currently set umask.
|
||||
@ -13,7 +15,7 @@ func MkdirAllIgnoreUmask(dir string, mode os.FileMode) error {
|
||||
|
||||
// Find all parent directories which would have been created by MkdirAll
|
||||
for {
|
||||
if _, err := os.Stat(parent); err == nil {
|
||||
if err := fileutils.Exists(parent); err == nil {
|
||||
break
|
||||
} else if !os.IsNotExist(err) {
|
||||
return fmt.Errorf("cannot stat %s: %w", dir, err)
|
||||
|
5
vendor/github.com/containers/common/pkg/util/util.go
generated
vendored
5
vendor/github.com/containers/common/pkg/util/util.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
@ -56,7 +57,7 @@ func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, e
|
||||
case e := <-chWait:
|
||||
return true, e
|
||||
case <-inotifyEvents:
|
||||
_, err := os.Stat(path)
|
||||
err := fileutils.Exists(path)
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
@ -68,7 +69,7 @@ func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, e
|
||||
// if the inotify watcher could not have been created. It is
|
||||
// also useful when using inotify as if for any reasons we missed
|
||||
// a notification, we won't hang the process.
|
||||
_, err := os.Stat(path)
|
||||
err := fileutils.Exists(path)
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
7
vendor/github.com/containers/common/pkg/version/version.go
generated
vendored
7
vendor/github.com/containers/common/pkg/version/version.go
generated
vendored
@ -3,9 +3,10 @@ package version
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -15,7 +16,7 @@ const (
|
||||
// Note: This function is copied from containers/podman libpod/util.go
|
||||
// Please see https://github.com/containers/common/pull/1460
|
||||
func queryPackageVersion(cmdArg ...string) string {
|
||||
_, err := os.Stat(cmdArg[0])
|
||||
err := fileutils.Exists(cmdArg[0])
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
@ -63,7 +64,7 @@ func queryPackageVersion(cmdArg ...string) string {
|
||||
// Note: This function is copied from containers/podman libpod/util.go
|
||||
// Please see https://github.com/containers/common/pull/1460
|
||||
func Package(program string) string { // program is full path
|
||||
_, err := os.Stat(program)
|
||||
err := fileutils.Exists(program)
|
||||
if err != nil {
|
||||
return UnknownPackage
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
2
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
@ -287,7 +287,7 @@ func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCom
|
||||
maps.Copy(*annotations, d.uploadedAnnotations)
|
||||
}
|
||||
|
||||
// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo (as returned by PutBlob)
|
||||
// recordValidatedDigestData updates b.blobInfoCache with data about the created uploadedInfo (as returned by PutBlob)
|
||||
// and the original srcInfo (which the caller guarantees has been validated).
|
||||
// This must ONLY be called if all data has been validated by OUR code, and is not coming from third parties.
|
||||
func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo,
|
||||
|
3
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
3
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
@ -15,6 +15,7 @@ import (
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -249,7 +250,7 @@ func (d *dirImageDestination) Commit(context.Context, types.UnparsedImage) error
|
||||
|
||||
// returns true if path exists
|
||||
func pathExists(path string) (bool, error) {
|
||||
_, err := os.Stat(path)
|
||||
err := fileutils.Exists(path)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
4
vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
generated
vendored
4
vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
generated
vendored
@ -4,6 +4,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
)
|
||||
|
||||
// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path.
|
||||
@ -11,7 +13,7 @@ import (
|
||||
// a non-existent name (but not a symlink pointing to a non-existent name)
|
||||
// This is intended as a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc.
|
||||
func ResolvePathToFullyExplicit(path string) (string, error) {
|
||||
switch _, err := os.Lstat(path); {
|
||||
switch err := fileutils.Lexists(path); {
|
||||
case err == nil:
|
||||
return resolveExistingPathToFullyExplicit(path)
|
||||
case os.IsNotExist(err):
|
||||
|
36
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
36
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@ -18,6 +18,7 @@ import (
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/internal/multierr"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/internal/useragent"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
@ -25,6 +26,7 @@ import (
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/pkg/tlsclientconfig"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
@ -186,7 +188,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
|
||||
}
|
||||
|
||||
fullCertDirPath = filepath.Join(hostCertDir, hostPort)
|
||||
_, err := os.Stat(fullCertDirPath)
|
||||
err := fileutils.Exists(fullCertDirPath)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
@ -497,8 +499,8 @@ func (c *dockerClient) resolveRequestURL(path string) (*url.URL, error) {
|
||||
// Checks if the auth headers in the response contain an indication of a failed
|
||||
// authorizdation because of an "insufficient_scope" error. If that's the case,
|
||||
// returns the required scope to be used for fetching a new token.
|
||||
func needsRetryWithUpdatedScope(err error, res *http.Response) (bool, *authScope) {
|
||||
if err == nil && res.StatusCode == http.StatusUnauthorized {
|
||||
func needsRetryWithUpdatedScope(res *http.Response) (bool, *authScope) {
|
||||
if res.StatusCode == http.StatusUnauthorized {
|
||||
challenges := parseAuthHeader(res.Header)
|
||||
for _, challenge := range challenges {
|
||||
if challenge.Scheme == "bearer" {
|
||||
@ -557,6 +559,9 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri
|
||||
attempts := 0
|
||||
for {
|
||||
res, err := c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, extraScope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attempts++
|
||||
|
||||
// By default we use pre-defined scopes per operation. In
|
||||
@ -572,19 +577,24 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri
|
||||
// We also cannot retry with a body (stream != nil) as stream
|
||||
// was already read
|
||||
if attempts == 1 && stream == nil && auth != noAuth {
|
||||
if retry, newScope := needsRetryWithUpdatedScope(err, res); retry {
|
||||
if retry, newScope := needsRetryWithUpdatedScope(res); retry {
|
||||
logrus.Debug("Detected insufficient_scope error, will retry request with updated scope")
|
||||
res.Body.Close()
|
||||
// Note: This retry ignores extraScope. That’s, strictly speaking, incorrect, but we don’t currently
|
||||
// expect the insufficient_scope errors to happen for those callers. If that changes, we can add support
|
||||
// for more than one extra scope.
|
||||
res, err = c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, newScope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
extraScope = newScope
|
||||
}
|
||||
}
|
||||
if res == nil || res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately
|
||||
|
||||
if res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately
|
||||
stream != nil || // We can't retry with a body (which is not restartable in the general case)
|
||||
attempts == backoffNumIterations {
|
||||
return res, err
|
||||
return res, nil
|
||||
}
|
||||
// close response body before retry or context done
|
||||
res.Body.Close()
|
||||
@ -671,10 +681,14 @@ func parseRegistryWarningHeader(header string) string {
|
||||
|
||||
// warning-value = warn-code SP warn-agent SP warn-text [ SP warn-date ]
|
||||
// distribution-spec requires warn-code=299, warn-agent="-", warn-date missing
|
||||
if !strings.HasPrefix(header, expectedPrefix) || !strings.HasSuffix(header, expectedSuffix) {
|
||||
header, ok := strings.CutPrefix(header, expectedPrefix)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
header, ok = strings.CutSuffix(header, expectedSuffix)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
header = header[len(expectedPrefix) : len(header)-len(expectedSuffix)]
|
||||
|
||||
// ”Recipients that process the value of a quoted-string MUST handle a quoted-pair
|
||||
// as if it were replaced by the octet following the backslash.”, so let’s do that…
|
||||
@ -1007,11 +1021,7 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R
|
||||
if remoteErrors == nil {
|
||||
return nil, 0, nil // fallback to non-external blob
|
||||
}
|
||||
err := fmt.Errorf("failed fetching external blob from all urls: %w", remoteErrors[0])
|
||||
for _, e := range remoteErrors[1:] {
|
||||
err = fmt.Errorf("%s, %w", err, e)
|
||||
}
|
||||
return nil, 0, err
|
||||
return nil, 0, fmt.Errorf("failed fetching external blob from all urls: %w", multierr.Format("", ", ", "", remoteErrors))
|
||||
}
|
||||
|
||||
func getBlobSize(resp *http.Response) int64 {
|
||||
|
35
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
35
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@ -344,35 +344,24 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
}
|
||||
|
||||
// Then try reusing blobs from other locations.
|
||||
candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, options.CanSubstitute)
|
||||
candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, blobinfocache.CandidateLocations2Options{
|
||||
CanSubstitute: options.CanSubstitute,
|
||||
PossibleManifestFormats: options.PossibleManifestFormats,
|
||||
RequiredCompression: options.RequiredCompression,
|
||||
})
|
||||
for _, candidate := range candidates {
|
||||
var err error
|
||||
compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
|
||||
if err != nil {
|
||||
logrus.Debugf("OperationAndAlgorithmForCompressor Failed: %v", err)
|
||||
continue
|
||||
}
|
||||
var candidateRepo reference.Named
|
||||
if !candidate.UnknownLocation {
|
||||
var err error
|
||||
candidateRepo, err = parseBICLocationReference(candidate.Location)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !impl.CandidateMatchesTryReusingBlobOptions(options, compressionAlgorithm) {
|
||||
if !candidate.UnknownLocation {
|
||||
logrus.Debugf("Ignoring candidate blob %s in %s, compression %s does not match required %s or MIME types %#v", candidate.Digest.String(), candidateRepo.Name(),
|
||||
optionalCompressionName(compressionAlgorithm), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
|
||||
} else {
|
||||
logrus.Debugf("Ignoring candidate blob %s with no known location, compression %s does not match required %s or MIME types %#v", candidate.Digest.String(),
|
||||
optionalCompressionName(compressionAlgorithm), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !candidate.UnknownLocation {
|
||||
if candidate.CompressorName != blobinfocache.Uncompressed {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
|
||||
if candidate.CompressionAlgorithm != nil {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressionAlgorithm.Name(), candidateRepo.Name())
|
||||
} else {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo %s", candidate.Digest.String(), candidateRepo.Name())
|
||||
}
|
||||
@ -387,8 +376,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if candidate.CompressorName != blobinfocache.Uncompressed {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressorName)
|
||||
if candidate.CompressionAlgorithm != nil {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressionAlgorithm.Name())
|
||||
} else {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo with no location match, checking current repo", candidate.Digest.String())
|
||||
}
|
||||
@ -439,8 +428,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
return true, private.ReusedBlob{
|
||||
Digest: candidate.Digest,
|
||||
Size: size,
|
||||
CompressionOperation: compressionOperation,
|
||||
CompressionAlgorithm: compressionAlgorithm}, nil
|
||||
CompressionOperation: candidate.CompressionOperation,
|
||||
CompressionAlgorithm: candidate.CompressionAlgorithm}, nil
|
||||
}
|
||||
|
||||
return false, private.ReusedBlob{}, nil
|
||||
|
12
vendor/github.com/containers/image/v5/docker/docker_transport.go
generated
vendored
12
vendor/github.com/containers/image/v5/docker/docker_transport.go
generated
vendored
@ -54,16 +54,12 @@ type dockerReference struct {
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
|
||||
func ParseReference(refString string) (types.ImageReference, error) {
|
||||
if !strings.HasPrefix(refString, "//") {
|
||||
refString, ok := strings.CutPrefix(refString, "//")
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("docker: image reference %s does not start with //", refString)
|
||||
}
|
||||
// Check if ref has UnknownDigestSuffix suffixed to it
|
||||
unknownDigest := false
|
||||
if strings.HasSuffix(refString, UnknownDigestSuffix) {
|
||||
unknownDigest = true
|
||||
refString = strings.TrimSuffix(refString, UnknownDigestSuffix)
|
||||
}
|
||||
ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//"))
|
||||
refString, unknownDigest := strings.CutSuffix(refString, UnknownDigestSuffix)
|
||||
ref, err := reference.ParseNormalizedNamed(refString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
3
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
3
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
@ -12,6 +12,7 @@ import (
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/rootless"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -93,7 +94,7 @@ func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) stri
|
||||
return sys.RegistriesDirPath
|
||||
}
|
||||
userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir)
|
||||
if _, err := os.Stat(userRegistriesDirPath); err == nil {
|
||||
if err := fileutils.Exists(userRegistriesDirPath); err == nil {
|
||||
return userRegistriesDirPath
|
||||
}
|
||||
if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
|
||||
|
24
vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
generated
vendored
24
vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
generated
vendored
@ -1,8 +1,6 @@
|
||||
package blobinfocache
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
@ -32,7 +30,7 @@ func (bic *v1OnlyBlobInfoCache) Close() {
|
||||
func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
|
||||
}
|
||||
|
||||
func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 {
|
||||
func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -48,23 +46,3 @@ func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.B
|
||||
}
|
||||
return candidates
|
||||
}
|
||||
|
||||
// OperationAndAlgorithmForCompressor returns CompressionOperation and CompressionAlgorithm
|
||||
// values suitable for inclusion in a types.BlobInfo structure, based on the name of the
|
||||
// compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by
|
||||
// TryReusingBlob() implementations to set values in the BlobInfo structure that they return
|
||||
// upon success.
|
||||
func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compressiontypes.Algorithm, error) {
|
||||
switch compressorName {
|
||||
case Uncompressed:
|
||||
return types.Decompress, nil, nil
|
||||
case UnknownCompression:
|
||||
return types.PreserveOriginal, nil, nil
|
||||
default:
|
||||
algo, err := compression.AlgorithmByName(compressorName)
|
||||
if err == nil {
|
||||
return types.Compress, &algo, nil
|
||||
}
|
||||
return types.PreserveOriginal, nil, err
|
||||
}
|
||||
}
|
||||
|
26
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
26
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
@ -1,6 +1,7 @@
|
||||
package blobinfocache
|
||||
|
||||
import (
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
@ -35,19 +36,24 @@ type BlobInfoCache2 interface {
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
|
||||
// that could possibly be reused within the specified (transport scope) (if they still
|
||||
// exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if
|
||||
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
|
||||
CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2
|
||||
}
|
||||
|
||||
// CandidateLocations2Options are used in CandidateLocations2.
|
||||
type CandidateLocations2Options struct {
|
||||
// If !CanSubstitute, the returned candidates will match the submitted digest exactly; if
|
||||
// CanSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
|
||||
// up variants of the blob which have the same uncompressed digest.
|
||||
//
|
||||
// The CompressorName fields in returned data must never be UnknownCompression.
|
||||
CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2
|
||||
CanSubstitute bool
|
||||
PossibleManifestFormats []string // If set, a set of possible manifest formats; at least one should support the reused layer
|
||||
RequiredCompression *compressiontypes.Algorithm // If set, only reuse layers with a matching algorithm
|
||||
}
|
||||
|
||||
// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2.
|
||||
type BICReplacementCandidate2 struct {
|
||||
Digest digest.Digest
|
||||
CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression
|
||||
UnknownLocation bool // is true when `Location` for this blob is not set
|
||||
Location types.BICLocationReference // not set if UnknownLocation is set to `true`
|
||||
Digest digest.Digest
|
||||
CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
|
||||
CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
|
||||
UnknownLocation bool // is true when `Location` for this blob is not set
|
||||
Location types.BICLocationReference // not set if UnknownLocation is set to `true`
|
||||
}
|
||||
|
39
vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go
generated
vendored
39
vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go
generated
vendored
@ -3,40 +3,13 @@ package impl
|
||||
import (
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
compression "github.com/containers/image/v5/pkg/compression/types"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// CandidateMatchesTryReusingBlobOptions validates if compression is required by the caller while selecting a blob, if it is required
|
||||
// then function performs a match against the compression requested by the caller and compression of existing blob
|
||||
// (which can be nil to represent uncompressed or unknown)
|
||||
func CandidateMatchesTryReusingBlobOptions(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool {
|
||||
if options.RequiredCompression != nil {
|
||||
if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName {
|
||||
// HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs.
|
||||
// The caller must re-compress to build those annotations.
|
||||
return false
|
||||
}
|
||||
if candidateCompression == nil ||
|
||||
(options.RequiredCompression.Name() != candidateCompression.Name() && options.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// For candidateCompression == nil, we can’t tell the difference between “uncompressed” and “unknown”;
|
||||
// and “uncompressed” is acceptable in all known formats (well, it seems to work in practice for schema1),
|
||||
// so don’t impose any restrictions if candidateCompression == nil
|
||||
if options.PossibleManifestFormats != nil && candidateCompression != nil {
|
||||
if !slices.ContainsFunc(options.PossibleManifestFormats, func(mt string) bool {
|
||||
return manifest.MIMETypeSupportsCompressionAlgorithm(mt, *candidateCompression)
|
||||
}) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// OriginalCandidateMatchesTryReusingBlobOptions returns true if the original blob passed to TryReusingBlobWithOptions
|
||||
// is acceptable based on opts.
|
||||
func OriginalCandidateMatchesTryReusingBlobOptions(opts private.TryReusingBlobOptions) bool {
|
||||
return CandidateMatchesTryReusingBlobOptions(opts, opts.OriginalCompression)
|
||||
return manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
|
||||
PossibleManifestFormats: opts.PossibleManifestFormats,
|
||||
RequiredCompression: opts.RequiredCompression,
|
||||
}, opts.OriginalCompression)
|
||||
}
|
||||
|
37
vendor/github.com/containers/image/v5/internal/manifest/manifest.go
generated
vendored
37
vendor/github.com/containers/image/v5/internal/manifest/manifest.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
||||
"github.com/containers/libtrust"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// FIXME: Should we just use docker/distribution and docker/docker implementations directly?
|
||||
@ -192,3 +193,39 @@ func MIMETypeSupportsCompressionAlgorithm(mimeType string, algo compressiontypes
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// ReuseConditions are an input to CandidateCompressionMatchesReuseConditions;
|
||||
// it is a struct to allow longer and better-documented field names.
|
||||
type ReuseConditions struct {
|
||||
PossibleManifestFormats []string // If set, a set of possible manifest formats; at least one should support the reused layer
|
||||
RequiredCompression *compressiontypes.Algorithm // If set, only reuse layers with a matching algorithm
|
||||
}
|
||||
|
||||
// CandidateCompressionMatchesReuseConditions returns true if a layer with candidateCompression
|
||||
// (which can be nil to represent uncompressed or unknown) matches reuseConditions.
|
||||
func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool {
|
||||
if c.RequiredCompression != nil {
|
||||
if c.RequiredCompression.Name() == compressiontypes.ZstdChunkedAlgorithmName {
|
||||
// HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs.
|
||||
// The caller must re-compress to build those annotations.
|
||||
return false
|
||||
}
|
||||
if candidateCompression == nil ||
|
||||
(c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// For candidateCompression == nil, we can’t tell the difference between “uncompressed” and “unknown”;
|
||||
// and “uncompressed” is acceptable in all known formats (well, it seems to work in practice for schema1),
|
||||
// so don’t impose any restrictions if candidateCompression == nil
|
||||
if c.PossibleManifestFormats != nil && candidateCompression != nil {
|
||||
if !slices.ContainsFunc(c.PossibleManifestFormats, func(mt string) bool {
|
||||
return MIMETypeSupportsCompressionAlgorithm(mt, *candidateCompression)
|
||||
}) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
34
vendor/github.com/containers/image/v5/internal/multierr/multierr.go
generated
vendored
Normal file
34
vendor/github.com/containers/image/v5/internal/multierr/multierr.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
package multierr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Format creates an error value from the input array (which should not be empty)
|
||||
// If the input contains a single error value, it is returned as is.
|
||||
// If there are multiple, they are formatted as a multi-error (with Unwrap() []error) with the provided initial, separator, and ending strings.
|
||||
//
|
||||
// Typical usage:
|
||||
//
|
||||
// var errs []error
|
||||
// // …
|
||||
// errs = append(errs, …)
|
||||
// // …
|
||||
// if errs != nil { return multierr.Format("Failures doing $FOO", "\n* ", "", errs)}
|
||||
func Format(first, middle, last string, errs []error) error {
|
||||
switch len(errs) {
|
||||
case 0:
|
||||
return fmt.Errorf("internal error: multierr.Format called with 0 errors")
|
||||
case 1:
|
||||
return errs[0]
|
||||
default:
|
||||
// We have to do this — and this function only really exists — because fmt.Errorf(format, errs...) is invalid:
|
||||
// []error is not a valid parameter to a function expecting []any
|
||||
anyErrs := make([]any, 0, len(errs))
|
||||
for _, e := range errs {
|
||||
anyErrs = append(anyErrs, e)
|
||||
}
|
||||
return fmt.Errorf(first+"%w"+strings.Repeat(middle+"%w", len(errs)-1)+last, anyErrs...)
|
||||
}
|
||||
}
|
6
vendor/github.com/containers/image/v5/internal/signature/sigstore.go
generated
vendored
6
vendor/github.com/containers/image/v5/internal/signature/sigstore.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
package signature
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -45,7 +45,7 @@ type sigstoreJSONRepresentation struct {
|
||||
func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, untrustedAnnotations map[string]string) Sigstore {
|
||||
return Sigstore{
|
||||
untrustedMIMEType: untrustedMimeType,
|
||||
untrustedPayload: slices.Clone(untrustedPayload),
|
||||
untrustedPayload: bytes.Clone(untrustedPayload),
|
||||
untrustedAnnotations: maps.Clone(untrustedAnnotations),
|
||||
}
|
||||
}
|
||||
@ -79,7 +79,7 @@ func (s Sigstore) UntrustedMIMEType() string {
|
||||
return s.untrustedMIMEType
|
||||
}
|
||||
func (s Sigstore) UntrustedPayload() []byte {
|
||||
return slices.Clone(s.untrustedPayload)
|
||||
return bytes.Clone(s.untrustedPayload)
|
||||
}
|
||||
|
||||
func (s Sigstore) UntrustedAnnotations() map[string]string {
|
||||
|
8
vendor/github.com/containers/image/v5/internal/signature/simple.go
generated
vendored
8
vendor/github.com/containers/image/v5/internal/signature/simple.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
package signature
|
||||
|
||||
import "golang.org/x/exp/slices"
|
||||
import "bytes"
|
||||
|
||||
// SimpleSigning is a “simple signing” signature.
|
||||
type SimpleSigning struct {
|
||||
@ -10,7 +10,7 @@ type SimpleSigning struct {
|
||||
// SimpleSigningFromBlob converts a “simple signing” signature into a SimpleSigning object.
|
||||
func SimpleSigningFromBlob(blobChunk []byte) SimpleSigning {
|
||||
return SimpleSigning{
|
||||
untrustedSignature: slices.Clone(blobChunk),
|
||||
untrustedSignature: bytes.Clone(blobChunk),
|
||||
}
|
||||
}
|
||||
|
||||
@ -21,9 +21,9 @@ func (s SimpleSigning) FormatID() FormatID {
|
||||
// blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
|
||||
// Almost everyone should use signature.Blob() instead.
|
||||
func (s SimpleSigning) blobChunk() ([]byte, error) {
|
||||
return slices.Clone(s.untrustedSignature), nil
|
||||
return bytes.Clone(s.untrustedSignature), nil
|
||||
}
|
||||
|
||||
func (s SimpleSigning) UntrustedSignature() []byte {
|
||||
return slices.Clone(s.untrustedSignature)
|
||||
return bytes.Clone(s.untrustedSignature)
|
||||
}
|
||||
|
7
vendor/github.com/containers/image/v5/manifest/docker_schema2.go
generated
vendored
7
vendor/github.com/containers/image/v5/manifest/docker_schema2.go
generated
vendored
@ -54,9 +54,10 @@ type Schema2HealthConfig struct {
|
||||
Test []string `json:",omitempty"`
|
||||
|
||||
// Zero means to inherit. Durations are expressed as integer nanoseconds.
|
||||
StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check.
|
||||
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
|
||||
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
|
||||
StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check.
|
||||
StartInterval time.Duration `json:",omitempty"` // StartInterval is the time to wait between checks during the start period.
|
||||
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
|
||||
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
|
||||
|
||||
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
|
||||
// Zero means inherit.
|
||||
|
7
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
7
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
@ -179,14 +179,15 @@ func getEncryptedMediaType(mediatype string) (string, error) {
|
||||
return "", fmt.Errorf("unsupported mediaType to encrypt: %v", mediatype)
|
||||
}
|
||||
|
||||
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
|
||||
// getDecryptedMediaType will return the mediatype to its encrypted counterpart and return
|
||||
// an error if the mediatype does not support decryption
|
||||
func getDecryptedMediaType(mediatype string) (string, error) {
|
||||
if !strings.HasSuffix(mediatype, "+encrypted") {
|
||||
res, ok := strings.CutSuffix(mediatype, "+encrypted")
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unsupported mediaType to decrypt: %v", mediatype)
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(mediatype, "+encrypted"), nil
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Serialize returns the manifest in a blob format.
|
||||
|
6
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
6
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
@ -6,6 +6,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@ -16,6 +17,7 @@ import (
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspec "github.com/opencontainers/image-spec/specs-go"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -301,7 +303,7 @@ func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error
|
||||
}
|
||||
|
||||
func ensureDirectoryExists(path string) error {
|
||||
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
|
||||
if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -317,7 +319,7 @@ func ensureParentDirectoryExists(path string) error {
|
||||
// indexExists checks whether the index location specified in the OCI reference exists.
|
||||
// The implementation is opinionated, since in case of unexpected errors false is returned
|
||||
func indexExists(ref ociReference) bool {
|
||||
_, err := os.Stat(ref.indexPath())
|
||||
err := fileutils.Exists(ref.indexPath())
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
|
29
vendor/github.com/containers/image/v5/openshift/openshift-copies.go
generated
vendored
29
vendor/github.com/containers/image/v5/openshift/openshift-copies.go
generated
vendored
@ -18,6 +18,7 @@ import (
|
||||
"time"
|
||||
|
||||
"dario.cat/mergo"
|
||||
"github.com/containers/image/v5/internal/multierr"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
@ -459,12 +460,6 @@ func (config *directClientConfig) getCluster() clientcmdCluster {
|
||||
return mergedClusterInfo
|
||||
}
|
||||
|
||||
// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.
|
||||
// This helper implements the error and Errors interfaces. Keeping it private
|
||||
// prevents people from making an aggregate of 0 errors, which is not
|
||||
// an error, but does satisfy the error interface.
|
||||
type aggregateErr []error
|
||||
|
||||
// newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate.
|
||||
// NewAggregate converts a slice of errors into an Aggregate interface, which
|
||||
// is itself an implementation of the error interface. If the slice is empty,
|
||||
@ -485,29 +480,9 @@ func newAggregate(errlist []error) error {
|
||||
if len(errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return aggregateErr(errs)
|
||||
return multierr.Format("[", ", ", "]", errs)
|
||||
}
|
||||
|
||||
// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error.
|
||||
// Error is part of the error interface.
|
||||
func (agg aggregateErr) Error() string {
|
||||
if len(agg) == 0 {
|
||||
// This should never happen, really.
|
||||
return ""
|
||||
}
|
||||
if len(agg) == 1 {
|
||||
return agg[0].Error()
|
||||
}
|
||||
result := fmt.Sprintf("[%s", agg[0].Error())
|
||||
for i := 1; i < len(agg); i++ {
|
||||
result += fmt.Sprintf(", %s", agg[i].Error())
|
||||
}
|
||||
result += "]"
|
||||
return result
|
||||
}
|
||||
|
||||
// REMOVED: aggregateErr.Errors
|
||||
|
||||
// errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid.
|
||||
// errConfigurationInvalid is a set of errors indicating the configuration is invalid.
|
||||
type errConfigurationInvalid []error
|
||||
|
4
vendor/github.com/containers/image/v5/ostree/ostree_dest.go
generated
vendored
4
vendor/github.com/containers/image/v5/ostree/ostree_dest.go
generated
vendored
@ -11,6 +11,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@ -29,6 +30,7 @@ import (
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/klauspost/pgzip"
|
||||
"github.com/opencontainers/go-digest"
|
||||
selinux "github.com/opencontainers/selinux/go-selinux"
|
||||
@ -504,7 +506,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
|
||||
}
|
||||
|
||||
func ensureDirectoryExists(path string) error {
|
||||
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
|
||||
if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/ostree/ostree_src.go
generated
vendored
2
vendor/github.com/containers/image/v5/ostree/ostree_src.go
generated
vendored
@ -190,7 +190,7 @@ func (o ostreeReader) Read(p []byte) (int, error) {
|
||||
if count == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count]
|
||||
data := unsafe.Slice((*byte)(C.g_bytes_get_data(b, nil)), count)
|
||||
copy(p, data)
|
||||
return count, nil
|
||||
}
|
||||
|
107
vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go
generated
vendored
107
vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go
generated
vendored
@ -1,13 +1,17 @@
|
||||
// Package prioritize provides utilities for prioritizing locations in
|
||||
// Package prioritize provides utilities for filtering and prioritizing locations in
|
||||
// types.BlobInfoCache.CandidateLocations.
|
||||
package prioritize
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// replacementAttempts is the number of blob replacement candidates with known location returned by destructivelyPrioritizeReplacementCandidates,
|
||||
@ -20,28 +24,67 @@ const replacementAttempts = 5
|
||||
// This is a heuristic/guess, and could well use a different value.
|
||||
const replacementUnknownLocationAttempts = 2
|
||||
|
||||
// CandidateCompression returns (true, compressionOp, compressionAlgo) if a blob
|
||||
// with compressionName (which can be Uncompressed or UnknownCompression) is acceptable for a CandidateLocations* call with v2Options.
|
||||
//
|
||||
// v2Options can be set to nil if the call is CandidateLocations (i.e. compression is not required to be known);
|
||||
// if not nil, the call is assumed to be CandidateLocations2.
|
||||
//
|
||||
// The (compressionOp, compressionAlgo) values are suitable for BICReplacementCandidate2
|
||||
func CandidateCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, compressorName string) (bool, types.LayerCompression, *compression.Algorithm) {
|
||||
if v2Options == nil {
|
||||
return true, types.PreserveOriginal, nil // Anything goes. The (compressionOp, compressionAlgo) values are not used.
|
||||
}
|
||||
|
||||
var op types.LayerCompression
|
||||
var algo *compression.Algorithm
|
||||
switch compressorName {
|
||||
case blobinfocache.Uncompressed:
|
||||
op = types.Decompress
|
||||
algo = nil
|
||||
case blobinfocache.UnknownCompression:
|
||||
logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String())
|
||||
return false, types.PreserveOriginal, nil // Not allowed with CandidateLocations2
|
||||
default:
|
||||
op = types.Compress
|
||||
algo_, err := compression.AlgorithmByName(compressorName)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v",
|
||||
digest.String(), compressorName, err)
|
||||
return false, types.PreserveOriginal, nil // The BICReplacementCandidate2.CompressionAlgorithm field is required
|
||||
}
|
||||
algo = &algo_
|
||||
}
|
||||
if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
|
||||
PossibleManifestFormats: v2Options.PossibleManifestFormats,
|
||||
RequiredCompression: v2Options.RequiredCompression,
|
||||
}, algo) {
|
||||
requiredCompresssion := "nil"
|
||||
if v2Options.RequiredCompression != nil {
|
||||
requiredCompresssion = v2Options.RequiredCompression.Name()
|
||||
}
|
||||
logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v",
|
||||
digest.String(), compressorName, requiredCompresssion, v2Options.PossibleManifestFormats)
|
||||
return false, types.PreserveOriginal, nil
|
||||
}
|
||||
|
||||
return true, op, algo
|
||||
}
|
||||
|
||||
// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
|
||||
type CandidateWithTime struct {
|
||||
Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
|
||||
LastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation)
|
||||
}
|
||||
|
||||
// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
|
||||
// along with the specially-treated digest values for the implementation of sort.Interface.Less
|
||||
// candidateSortState is a closure for a comparison used by slices.SortFunc on candidates to prioritize,
|
||||
// along with the specially-treated digest values relevant to the ordering.
|
||||
type candidateSortState struct {
|
||||
cs []CandidateWithTime // The entries to sort
|
||||
primaryDigest digest.Digest // The digest the user actually asked for
|
||||
uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
|
||||
primaryDigest digest.Digest // The digest the user actually asked for
|
||||
uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Len() int {
|
||||
return len(css.cs)
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Less(i, j int) bool {
|
||||
xi := css.cs[i]
|
||||
xj := css.cs[j]
|
||||
|
||||
func (css *candidateSortState) compare(xi, xj CandidateWithTime) int {
|
||||
// primaryDigest entries come first, more recent first.
|
||||
// uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first.
|
||||
// Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
|
||||
@ -50,36 +93,40 @@ func (css *candidateSortState) Less(i, j int) bool {
|
||||
if xi.Candidate.Digest != xj.Candidate.Digest {
|
||||
// - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
|
||||
if xi.Candidate.Digest == css.primaryDigest {
|
||||
return true
|
||||
return -1
|
||||
}
|
||||
if xj.Candidate.Digest == css.primaryDigest {
|
||||
return false
|
||||
return 1
|
||||
}
|
||||
if css.uncompressedDigest != "" {
|
||||
if xi.Candidate.Digest == css.uncompressedDigest {
|
||||
return false
|
||||
return 1
|
||||
}
|
||||
if xj.Candidate.Digest == css.uncompressedDigest {
|
||||
return true
|
||||
return -1
|
||||
}
|
||||
}
|
||||
} else { // xi.Candidate.Digest == xj.Candidate.Digest
|
||||
// The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
|
||||
if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) {
|
||||
return xi.LastSeen.After(xj.LastSeen)
|
||||
return -xi.LastSeen.Compare(xj.LastSeen)
|
||||
}
|
||||
}
|
||||
|
||||
// Neither of the digests are primaryDigest/uncompressedDigest:
|
||||
if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time
|
||||
return xi.LastSeen.After(xj.LastSeen)
|
||||
if cmp := xi.LastSeen.Compare(xj.LastSeen); cmp != 0 { // Order primarily by time
|
||||
return -cmp
|
||||
}
|
||||
// Fall back to digest, if timestamps end up _exactly_ the same (how?!)
|
||||
return xi.Candidate.Digest < xj.Candidate.Digest
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Swap(i, j int) {
|
||||
css.cs[i], css.cs[j] = css.cs[j], css.cs[i]
|
||||
// FIXME: Use cmp.Compare after we update to Go 1.21.
|
||||
switch {
|
||||
case xi.Candidate.Digest < xj.Candidate.Digest:
|
||||
return -1
|
||||
case xi.Candidate.Digest > xj.Candidate.Digest:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
@ -100,12 +147,10 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
|
||||
var unknownLocationCandidates []CandidateWithTime
|
||||
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
|
||||
// compare equal.
|
||||
// FIXME: Use slices.SortFunc after we update to Go 1.20 (Go 1.21?) and Time.Compare and cmp.Compare are available.
|
||||
sort.Sort(&candidateSortState{
|
||||
cs: cs,
|
||||
slices.SortFunc(cs, (&candidateSortState{
|
||||
primaryDigest: primaryDigest,
|
||||
uncompressedDigest: uncompressedDigest,
|
||||
})
|
||||
}).compare)
|
||||
for _, candidate := range cs {
|
||||
if candidate.Candidate.UnknownLocation {
|
||||
unknownLocationCandidates = append(unknownLocationCandidates, candidate)
|
||||
|
55
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
55
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
@ -135,14 +135,17 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso
|
||||
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory
|
||||
// with corresponding compression info from mem.compressors, and returns the result of appending
|
||||
// them to candidates. v2Output allows including candidates with unknown location, and filters out
|
||||
// candidates with unknown compression.
|
||||
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) []prioritize.CandidateWithTime {
|
||||
// them to candidates.
|
||||
// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates
|
||||
// with unknown compression.
|
||||
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
|
||||
v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime {
|
||||
compressorName := blobinfocache.UnknownCompression
|
||||
if v, ok := mem.compressors[digest]; ok {
|
||||
compressorName = v
|
||||
}
|
||||
if compressorName == blobinfocache.UnknownCompression && v2Output {
|
||||
ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName)
|
||||
if !ok {
|
||||
return candidates
|
||||
}
|
||||
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
|
||||
@ -150,20 +153,22 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
||||
for l, t := range locations {
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressorName,
|
||||
Location: l,
|
||||
Digest: digest,
|
||||
CompressionOperation: compressionOp,
|
||||
CompressionAlgorithm: compressionAlgo,
|
||||
Location: l,
|
||||
},
|
||||
LastSeen: t,
|
||||
})
|
||||
}
|
||||
} else if v2Output {
|
||||
} else if v2Options != nil {
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressorName,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
Digest: digest,
|
||||
CompressionOperation: compressionOp,
|
||||
CompressionAlgorithm: compressionAlgo,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
},
|
||||
LastSeen: time.Time{},
|
||||
})
|
||||
@ -178,24 +183,24 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
|
||||
return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, nil))
|
||||
}
|
||||
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
|
||||
// that could possibly be reused within the specified (transport scope) (if they still
|
||||
// exist, which is not guaranteed).
|
||||
func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
|
||||
return mem.candidateLocations(transport, scope, primaryDigest, options.CanSubstitute, &options)
|
||||
}
|
||||
|
||||
func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
// candidateLocations implements CandidateLocations / CandidateLocations2.
|
||||
// v2Options is not nil if the caller is CandidateLocations2.
|
||||
func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool,
|
||||
v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Output)
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Options)
|
||||
var uncompressedDigest digest.Digest // = ""
|
||||
if canSubstitute {
|
||||
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
|
||||
@ -203,12 +208,12 @@ func (mem *cache) candidateLocations(transport types.ImageTransport, scope types
|
||||
if otherDigests != nil {
|
||||
for _, d := range otherDigests.Values() {
|
||||
if d != primaryDigest && d != uncompressedDigest {
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, d, v2Output)
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, d, v2Options)
|
||||
}
|
||||
}
|
||||
}
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Output)
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Options)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
132
vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
132
vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
@ -428,88 +428,86 @@ func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressor
|
||||
}
|
||||
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest),
|
||||
// and returns the result of appending them to candidates. v2Output allows including candidates with unknown
|
||||
// location, and filters out candidates with unknown compression.
|
||||
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) ([]prioritize.CandidateWithTime, error) {
|
||||
var rows *sql.Rows
|
||||
var err error
|
||||
if v2Output {
|
||||
rows, err = tx.Query("SELECT location, time, compressor FROM KnownLocations JOIN DigestCompressors "+
|
||||
"ON KnownLocations.digest = DigestCompressors.digest "+
|
||||
"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
|
||||
transport.Name(), scope.Opaque, digest.String())
|
||||
} else {
|
||||
rows, err = tx.Query("SELECT location, time, IFNULL(compressor, ?) FROM KnownLocations "+
|
||||
"LEFT JOIN DigestCompressors ON KnownLocations.digest = DigestCompressors.digest "+
|
||||
"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
|
||||
blobinfocache.UnknownCompression,
|
||||
transport.Name(), scope.Opaque, digest.String())
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("looking up candidate locations: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
for rows.Next() {
|
||||
var location string
|
||||
var time time.Time
|
||||
var compressorName string
|
||||
if err := rows.Scan(&location, &time, &compressorName); err != nil {
|
||||
return nil, fmt.Errorf("scanning candidate: %w", err)
|
||||
}
|
||||
res = append(res, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressorName,
|
||||
Location: types.BICLocationReference{Opaque: location},
|
||||
},
|
||||
LastSeen: time,
|
||||
})
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("iterating through locations: %w", err)
|
||||
}
|
||||
|
||||
if len(res) == 0 && v2Output {
|
||||
// and returns the result of appending them to candidates.
|
||||
// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates
|
||||
// with unknown compression.
|
||||
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
|
||||
v2Options *blobinfocache.CandidateLocations2Options) ([]prioritize.CandidateWithTime, error) {
|
||||
compressorName := blobinfocache.UnknownCompression
|
||||
if v2Options != nil {
|
||||
compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scanning compressorName: %w", err)
|
||||
}
|
||||
if found {
|
||||
res = append(res, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressor,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
},
|
||||
LastSeen: time.Time{},
|
||||
})
|
||||
compressorName = compressor
|
||||
}
|
||||
}
|
||||
candidates = append(candidates, res...)
|
||||
ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName)
|
||||
if !ok {
|
||||
return candidates, nil
|
||||
}
|
||||
|
||||
rows, err := tx.Query("SELECT location, time FROM KnownLocations "+
|
||||
"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
|
||||
transport.Name(), scope.Opaque, digest.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("looking up candidate locations: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
rowAdded := false
|
||||
for rows.Next() {
|
||||
var location string
|
||||
var time time.Time
|
||||
if err := rows.Scan(&location, &time); err != nil {
|
||||
return nil, fmt.Errorf("scanning candidate: %w", err)
|
||||
}
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressionOperation: compressionOp,
|
||||
CompressionAlgorithm: compressionAlgo,
|
||||
Location: types.BICLocationReference{Opaque: location},
|
||||
},
|
||||
LastSeen: time,
|
||||
})
|
||||
rowAdded = true
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("iterating through locations: %w", err)
|
||||
}
|
||||
|
||||
if !rowAdded && v2Options != nil {
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressionOperation: compressionOp,
|
||||
CompressionAlgorithm: compressionAlgo,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
},
|
||||
LastSeen: time.Time{},
|
||||
})
|
||||
}
|
||||
return candidates, nil
|
||||
}
|
||||
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
|
||||
// that could possibly be reused within the specified (transport scope) (if they still
|
||||
// exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if
|
||||
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
|
||||
// up variants of the blob which have the same uncompressed digest.
|
||||
//
|
||||
// The CompressorName fields in returned data must never be UnknownCompression.
|
||||
func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
return sqc.candidateLocations(transport, scope, digest, canSubstitute, true)
|
||||
func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
|
||||
return sqc.candidateLocations(transport, scope, digest, options.CanSubstitute, &options)
|
||||
}
|
||||
|
||||
func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
// candidateLocations implements CandidateLocations / CandidateLocations2.
|
||||
// v2Options is not nil if the caller is CandidateLocations2.
|
||||
func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool,
|
||||
v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
|
||||
var uncompressedDigest digest.Digest // = ""
|
||||
res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) {
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Output)
|
||||
res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -538,7 +536,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
|
||||
return nil, err
|
||||
}
|
||||
if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Output)
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -549,7 +547,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
|
||||
}
|
||||
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Output)
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -571,5 +569,5 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (sqc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, false))
|
||||
return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, nil))
|
||||
}
|
||||
|
55
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
55
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
@ -13,14 +13,15 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/multierr"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
helperclient "github.com/docker/docker-credential-helpers/client"
|
||||
"github.com/docker/docker-credential-helpers/credentials"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -231,7 +232,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t
|
||||
return types.DockerAuthConfig{}, err
|
||||
}
|
||||
|
||||
var multiErr error
|
||||
var multiErr []error
|
||||
for _, helper := range helpers {
|
||||
var (
|
||||
creds types.DockerAuthConfig
|
||||
@ -253,7 +254,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err)
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
multiErr = append(multiErr, err)
|
||||
continue
|
||||
}
|
||||
if creds != (types.DockerAuthConfig{}) {
|
||||
@ -266,7 +267,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t
|
||||
}
|
||||
}
|
||||
if multiErr != nil {
|
||||
return types.DockerAuthConfig{}, multiErr
|
||||
return types.DockerAuthConfig{}, multierr.Format("errors looking up credentials:\n\t* ", "\nt* ", "\n", multiErr)
|
||||
}
|
||||
|
||||
logrus.Debugf("No credentials for %s found", key)
|
||||
@ -313,7 +314,7 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s
|
||||
}
|
||||
|
||||
// Make sure to collect all errors.
|
||||
var multiErr error
|
||||
var multiErr []error
|
||||
for _, helper := range helpers {
|
||||
var desc string
|
||||
var err error
|
||||
@ -345,14 +346,14 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
multiErr = append(multiErr, err)
|
||||
logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper)
|
||||
return desc, nil
|
||||
}
|
||||
return "", multiErr
|
||||
return "", multierr.Format("Errors storing credentials\n\t* ", "\n\t* ", "\n", multiErr)
|
||||
}
|
||||
|
||||
func unsupportedNamespaceErr(helper string) error {
|
||||
@ -376,53 +377,56 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
var multiErr error
|
||||
isLoggedIn := false
|
||||
|
||||
removeFromCredHelper := func(helper string) {
|
||||
removeFromCredHelper := func(helper string) error {
|
||||
if isNamespaced {
|
||||
logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
err := deleteCredsFromCredHelper(helper, key)
|
||||
if err == nil {
|
||||
logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper)
|
||||
isLoggedIn = true
|
||||
return
|
||||
return nil
|
||||
}
|
||||
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
|
||||
logrus.Debugf("Not logged in to %s with credential helper %s", key, helper)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
multiErr = multierror.Append(multiErr, fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err))
|
||||
return fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err)
|
||||
}
|
||||
|
||||
var multiErr []error
|
||||
for _, helper := range helpers {
|
||||
var err error
|
||||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
_, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
|
||||
var helperErr error
|
||||
if innerHelper, exists := fileContents.CredHelpers[key]; exists {
|
||||
removeFromCredHelper(innerHelper)
|
||||
helperErr = removeFromCredHelper(innerHelper)
|
||||
}
|
||||
if _, ok := fileContents.AuthConfigs[key]; ok {
|
||||
isLoggedIn = true
|
||||
delete(fileContents.AuthConfigs, key)
|
||||
}
|
||||
return true, "", multiErr
|
||||
return true, "", helperErr
|
||||
})
|
||||
if err != nil {
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
multiErr = append(multiErr, err)
|
||||
}
|
||||
// External helpers.
|
||||
default:
|
||||
removeFromCredHelper(helper)
|
||||
if err := removeFromCredHelper(helper); err != nil {
|
||||
multiErr = append(multiErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if multiErr != nil {
|
||||
return multiErr
|
||||
return multierr.Format("errors removing credentials\n\t* ", "\n\t*", "\n", multiErr)
|
||||
}
|
||||
if !isLoggedIn {
|
||||
return ErrNotLoggedIn
|
||||
@ -439,7 +443,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
|
||||
return err
|
||||
}
|
||||
|
||||
var multiErr error
|
||||
var multiErr []error
|
||||
for _, helper := range helpers {
|
||||
var err error
|
||||
switch helper {
|
||||
@ -479,13 +483,16 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("Error removing credentials from credential helper %s: %v", helper, err)
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
multiErr = append(multiErr, err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("All credentials removed from credential helper %s", helper)
|
||||
}
|
||||
|
||||
return multiErr
|
||||
if multiErr != nil {
|
||||
return multierr.Format("errors removing all credentials:\n\t* ", "\n\t* ", "\n", multiErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// prepareForEdit processes sys and key (if keyRelevant) to return:
|
||||
@ -570,9 +577,9 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool,
|
||||
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
|
||||
if runtimeDir != "" {
|
||||
// This function does not in general need to separately check that the returned path exists; that’s racy, and callers will fail accessing the file anyway.
|
||||
// We are checking for os.IsNotExist here only to give the user better guidance what to do in this special case.
|
||||
_, err := os.Stat(runtimeDir)
|
||||
if os.IsNotExist(err) {
|
||||
// We are checking for fs.ErrNotExist here only to give the user better guidance what to do in this special case.
|
||||
err := fileutils.Exists(runtimeDir)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory
|
||||
// or made a typo while setting the environment variable,
|
||||
// so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside.
|
||||
|
22
vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
generated
vendored
22
vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/multierr"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/manifoldco/promptui"
|
||||
@ -169,26 +170,17 @@ func (r *Resolved) Description() string {
|
||||
// Note that nil is returned if len(pullErrors) == 0. Otherwise, the amount of
|
||||
// pull errors must equal the amount of pull candidates.
|
||||
func (r *Resolved) FormatPullErrors(pullErrors []error) error {
|
||||
if len(pullErrors) > 0 && len(pullErrors) != len(r.PullCandidates) {
|
||||
if len(pullErrors) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(pullErrors) != len(r.PullCandidates) {
|
||||
pullErrors = append(slices.Clone(pullErrors),
|
||||
fmt.Errorf("internal error: expected %d instead of %d errors for %d pull candidates",
|
||||
len(r.PullCandidates), len(pullErrors), len(r.PullCandidates)))
|
||||
}
|
||||
|
||||
switch len(pullErrors) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return pullErrors[0]
|
||||
default:
|
||||
var sb strings.Builder
|
||||
sb.WriteString(fmt.Sprintf("%d errors occurred while pulling:", len(pullErrors)))
|
||||
for _, e := range pullErrors {
|
||||
sb.WriteString("\n * ")
|
||||
sb.WriteString(e.Error())
|
||||
}
|
||||
return errors.New(sb.String())
|
||||
}
|
||||
return multierr.Format(fmt.Sprintf("%d errors occurred while pulling:\n * ", len(pullErrors)), "\n * ", "", pullErrors)
|
||||
}
|
||||
|
||||
// PullCandidate is a resolved name. Once the Value has been used
|
||||
|
7
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
generated
vendored
7
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/multierr"
|
||||
"github.com/containers/image/v5/internal/rootless"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
@ -297,11 +298,7 @@ func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAl
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
err := errs[0]
|
||||
for i := 1; i < len(errs); i++ {
|
||||
err = fmt.Errorf("%v\n: %w", errs[i], err)
|
||||
}
|
||||
return nil, err
|
||||
return nil, multierr.Format("", "\n", "", errs)
|
||||
}
|
||||
return &res, nil
|
||||
}
|
||||
|
3
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
3
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
@ -13,6 +13,7 @@ import (
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/regexp"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -564,7 +565,7 @@ func newConfigWrapperWithHomeDir(ctx *types.SystemContext, homeDir string) confi
|
||||
// decide configPath using per-user path or system file
|
||||
if ctx != nil && ctx.SystemRegistriesConfPath != "" {
|
||||
wrapper.configPath = ctx.SystemRegistriesConfPath
|
||||
} else if _, err := os.Stat(userRegistriesFilePath); err == nil {
|
||||
} else if err := fileutils.Exists(userRegistriesFilePath); err == nil {
|
||||
// per-user registries.conf exists, not reading system dir
|
||||
// return config dirs from ctx or per-user one
|
||||
wrapper.configPath = userRegistriesFilePath
|
||||
|
8
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
8
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
@ -55,9 +55,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
|
||||
}
|
||||
tlsc.RootCAs.AppendCertsFromPEM(data)
|
||||
}
|
||||
if strings.HasSuffix(f.Name(), ".cert") {
|
||||
if base, ok := strings.CutSuffix(f.Name(), ".cert"); ok {
|
||||
certName := f.Name()
|
||||
keyName := certName[:len(certName)-5] + ".key"
|
||||
keyName := base + ".key"
|
||||
logrus.Debugf(" cert: %s", fullPath)
|
||||
if !hasFile(fs, keyName) {
|
||||
return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
|
||||
@ -68,9 +68,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
|
||||
}
|
||||
tlsc.Certificates = append(slices.Clone(tlsc.Certificates), cert)
|
||||
}
|
||||
if strings.HasSuffix(f.Name(), ".key") {
|
||||
if base, ok := strings.CutSuffix(f.Name(), ".key"); ok {
|
||||
keyName := f.Name()
|
||||
certName := keyName[:len(keyName)-4] + ".cert"
|
||||
certName := base + ".cert"
|
||||
logrus.Debugf(" key: %s", fullPath)
|
||||
if !hasFile(fs, certName) {
|
||||
return fmt.Errorf("missing client certificate %s for key %s", certName, keyName)
|
||||
|
3
vendor/github.com/containers/image/v5/signature/policy_config.go
generated
vendored
3
vendor/github.com/containers/image/v5/signature/policy_config.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"github.com/containers/image/v5/signature/internal"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/regexp"
|
||||
)
|
||||
@ -65,7 +66,7 @@ func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) stri
|
||||
return sys.SignaturePolicyPath
|
||||
}
|
||||
userPolicyFilePath := filepath.Join(homeDir, userPolicyFile)
|
||||
if _, err := os.Stat(userPolicyFilePath); err == nil {
|
||||
if err := fileutils.Exists(userPolicyFilePath); err == nil {
|
||||
return userPolicyFilePath
|
||||
}
|
||||
if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
|
||||
|
2
vendor/github.com/containers/image/v5/signature/policy_eval.go
generated
vendored
2
vendor/github.com/containers/image/v5/signature/policy_eval.go
generated
vendored
@ -94,7 +94,7 @@ const (
|
||||
pcDestroyed policyContextState = "Destroyed"
|
||||
)
|
||||
|
||||
// changeContextState changes pc.state, or fails if the state is unexpected
|
||||
// changeState changes pc.state, or fails if the state is unexpected
|
||||
func (pc *PolicyContext) changeState(expected, new policyContextState) error {
|
||||
if pc.state != expected {
|
||||
return fmt.Errorf(`Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
|
||||
|
9
vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
generated
vendored
9
vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
generated
vendored
@ -7,8 +7,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/internal/multierr"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
@ -134,12 +134,7 @@ func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image private.U
|
||||
case 1:
|
||||
summary = rejections[0]
|
||||
default:
|
||||
var msgs []string
|
||||
for _, e := range rejections {
|
||||
msgs = append(msgs, e.Error())
|
||||
}
|
||||
summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s",
|
||||
strings.Join(msgs, "; ")))
|
||||
summary = PolicyRequirementError(multierr.Format("None of the signatures were accepted, reasons: ", "; ", "", rejections).Error())
|
||||
}
|
||||
return false, summary
|
||||
}
|
||||
|
9
vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go
generated
vendored
9
vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go
generated
vendored
@ -10,8 +10,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/internal/multierr"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
@ -270,12 +270,7 @@ func (pr *prSigstoreSigned) isRunningImageAllowed(ctx context.Context, image pri
|
||||
case 1:
|
||||
summary = rejections[0]
|
||||
default:
|
||||
var msgs []string
|
||||
for _, e := range rejections {
|
||||
msgs = append(msgs, e.Error())
|
||||
}
|
||||
summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s",
|
||||
strings.Join(msgs, "; ")))
|
||||
summary = PolicyRequirementError(multierr.Format("None of the signatures were accepted, reasons: ", "; ", "", rejections).Error())
|
||||
}
|
||||
return false, summary
|
||||
}
|
||||
|
12
vendor/github.com/containers/image/v5/signature/sigstore/rekor/rekor.go
generated
vendored
12
vendor/github.com/containers/image/v5/signature/sigstore/rekor/rekor.go
generated
vendored
@ -17,7 +17,6 @@ import (
|
||||
"github.com/containers/image/v5/signature/internal"
|
||||
signerInternal "github.com/containers/image/v5/signature/sigstore/internal"
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/go-openapi/swag"
|
||||
rekor "github.com/sigstore/rekor/pkg/client"
|
||||
"github.com/sigstore/rekor/pkg/generated/client"
|
||||
"github.com/sigstore/rekor/pkg/generated/client/entries"
|
||||
@ -114,17 +113,22 @@ func (u *uploader) uploadEntry(ctx context.Context, proposedEntry models.Propose
|
||||
return resp.GetPayload(), nil
|
||||
}
|
||||
|
||||
// stringPtr returns a pointer to the provided string value.
|
||||
func stringPtr(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
||||
// uploadKeyOrCert integrates this code into sigstore/internal.Signer.
|
||||
// Given components of the created signature, it returns a SET that should be added to the signature.
|
||||
func (u *uploader) uploadKeyOrCert(ctx context.Context, keyOrCertBytes []byte, signatureBytes []byte, payloadBytes []byte) ([]byte, error) {
|
||||
payloadHash := sha256.Sum256(payloadBytes) // HashedRecord only accepts SHA-256
|
||||
proposedEntry := models.Hashedrekord{
|
||||
APIVersion: swag.String(internal.HashedRekordV001APIVersion),
|
||||
APIVersion: stringPtr(internal.HashedRekordV001APIVersion),
|
||||
Spec: models.HashedrekordV001Schema{
|
||||
Data: &models.HashedrekordV001SchemaData{
|
||||
Hash: &models.HashedrekordV001SchemaDataHash{
|
||||
Algorithm: swag.String(models.HashedrekordV001SchemaDataHashAlgorithmSha256),
|
||||
Value: swag.String(hex.EncodeToString(payloadHash[:])),
|
||||
Algorithm: stringPtr(models.HashedrekordV001SchemaDataHashAlgorithmSha256),
|
||||
Value: stringPtr(hex.EncodeToString(payloadHash[:])),
|
||||
},
|
||||
},
|
||||
Signature: &models.HashedrekordV001SchemaSignature{
|
||||
|
2
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
@ -1189,7 +1189,7 @@ func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.manifest = slices.Clone(manifestBlob)
|
||||
s.manifest = bytes.Clone(manifestBlob)
|
||||
s.manifestDigest = digest
|
||||
return nil
|
||||
}
|
||||
|
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
@ -8,10 +8,10 @@ const (
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 30
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 0
|
||||
VersionPatch = 1
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
VersionDev = "-dev"
|
||||
)
|
||||
|
||||
// Version is the specification version that the package types support.
|
||||
|
14
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
14
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
@ -23,7 +23,7 @@ env:
|
||||
# GCE project where images live
|
||||
IMAGE_PROJECT: "libpod-218412"
|
||||
# VM Image built in containers/automation_images
|
||||
IMAGE_SUFFIX: "c20240102t155643z-f39f38d13"
|
||||
IMAGE_SUFFIX: "c20240320t153921z-f39f38d13"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
||||
|
||||
@ -116,6 +116,7 @@ debian_testing_task: &debian_testing
|
||||
|
||||
|
||||
lint_task:
|
||||
alias: lint
|
||||
env:
|
||||
CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage"
|
||||
container:
|
||||
@ -134,6 +135,7 @@ lint_task:
|
||||
|
||||
# Update metadata on VM images referenced by this repository state
|
||||
meta_task:
|
||||
alias: meta
|
||||
|
||||
container:
|
||||
image: "quay.io/libpod/imgts:latest"
|
||||
@ -156,6 +158,7 @@ meta_task:
|
||||
|
||||
|
||||
vendor_task:
|
||||
alias: vendor
|
||||
container:
|
||||
image: golang
|
||||
modules_cache:
|
||||
@ -166,13 +169,20 @@ vendor_task:
|
||||
|
||||
|
||||
cross_task:
|
||||
alias: cross
|
||||
container:
|
||||
image: golang:1.20
|
||||
build_script: make cross
|
||||
|
||||
|
||||
# Represent overall pass/fail status from required dependent tasks
|
||||
# Status aggregator for all tests. This task simply ensures a defined
|
||||
# set of tasks all passed, and allows confirming that based on the status
|
||||
# of this task.
|
||||
success_task:
|
||||
alias: success
|
||||
# N/B: The prow merge-bot (tide) is sensitized to this exact name, DO NOT CHANGE IT.
|
||||
# Ref: https://github.com/openshift/release/pull/49820
|
||||
name: "Total Success"
|
||||
depends_on:
|
||||
- lint
|
||||
- fedora_testing
|
||||
|
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@ -1 +1 @@
|
||||
1.53.0
|
||||
1.53.1-dev
|
||||
|
5
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
@ -41,6 +41,7 @@ import (
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/locker"
|
||||
mountpk "github.com/containers/storage/pkg/mount"
|
||||
@ -243,7 +244,7 @@ func (a *Driver) Metadata(id string) (map[string]string, error) {
|
||||
// Exists returns true if the given id is registered with
|
||||
// this driver
|
||||
func (a *Driver) Exists(id string) bool {
|
||||
if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil {
|
||||
if err := fileutils.Lexists(path.Join(a.rootPath(), "layers", id)); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@ -431,7 +432,7 @@ func atomicRemove(source string) error {
|
||||
case err == nil, os.IsNotExist(err):
|
||||
case os.IsExist(err):
|
||||
// Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove
|
||||
if _, e := os.Stat(source); !os.IsNotExist(e) {
|
||||
if e := fileutils.Exists(source); !os.IsNotExist(e) {
|
||||
return fmt.Errorf("target rename dir '%s' exists but should not, this needs to be manually cleaned up: %w", target, err)
|
||||
}
|
||||
default:
|
||||
|
7
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
7
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
@ -32,6 +32,7 @@ import (
|
||||
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
@ -589,11 +590,11 @@ func (d *Driver) setStorageSize(dir string, driver *Driver) error {
|
||||
// Remove the filesystem with given id.
|
||||
func (d *Driver) Remove(id string) error {
|
||||
dir := d.subvolumesDirID(id)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
if err := fileutils.Exists(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
quotasDir := d.quotasDirID(id)
|
||||
if _, err := os.Stat(quotasDir); err == nil {
|
||||
if err := fileutils.Exists(quotasDir); err == nil {
|
||||
if err := os.Remove(quotasDir); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -669,7 +670,7 @@ func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
|
||||
// Exists checks if the id exists in the filesystem.
|
||||
func (d *Driver) Exists(id string) bool {
|
||||
dir := d.subvolumesDirID(id)
|
||||
_, err := os.Stat(dir)
|
||||
err := fileutils.Exists(dir)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
|
5
vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/devicemapper"
|
||||
"github.com/containers/storage/pkg/dmesg"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/loopback"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
@ -257,7 +258,7 @@ func (devices *DeviceSet) hasImage(name string) bool {
|
||||
dirname := devices.loopbackDir()
|
||||
filename := path.Join(dirname, name)
|
||||
|
||||
_, err := os.Stat(filename)
|
||||
err := fileutils.Exists(filename)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
@ -1192,7 +1193,7 @@ func (devices *DeviceSet) growFS(info *devInfo) error {
|
||||
defer devices.deactivateDevice(info)
|
||||
|
||||
fsMountPoint := "/run/containers/storage/mnt"
|
||||
if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) {
|
||||
if err := fileutils.Exists(fsMountPoint); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(fsMountPoint, 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
3
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
3
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
@ -12,6 +12,7 @@ import (
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/devicemapper"
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/locker"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
@ -222,7 +223,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
||||
}
|
||||
|
||||
idFile := path.Join(mp, "id")
|
||||
if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) {
|
||||
if err := fileutils.Exists(idFile); err != nil && os.IsNotExist(err) {
|
||||
// Create an "id" file with the container/image id in it to help reconstruct this in case
|
||||
// of later problems
|
||||
if err := os.WriteFile(idFile, []byte(id), 0o600); err != nil {
|
||||
|
3
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
3
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -471,7 +472,7 @@ func ScanPriorDrivers(root string) map[string]bool {
|
||||
|
||||
for driver := range drivers {
|
||||
p := filepath.Join(root, driver)
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
if err := fileutils.Exists(p); err == nil {
|
||||
driversMap[driver] = true
|
||||
}
|
||||
}
|
||||
|
48
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
48
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/fsutils"
|
||||
"github.com/containers/storage/pkg/idmap"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
@ -574,7 +575,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
||||
case "mount_program":
|
||||
logrus.Debugf("overlay: mount_program=%s", val)
|
||||
if val != "" {
|
||||
_, err := os.Stat(val)
|
||||
err := fileutils.Exists(val)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("overlay: can't stat program %q: %w", val, err)
|
||||
}
|
||||
@ -676,7 +677,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
|
||||
}
|
||||
|
||||
for _, dir := range []string{home, runhome} {
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
if err := fileutils.Exists(dir); err != nil {
|
||||
_ = idtools.MkdirAllAs(dir, 0o700, 0, 0)
|
||||
}
|
||||
}
|
||||
@ -854,7 +855,7 @@ func (d *Driver) Status() [][2]string {
|
||||
// LowerDir, UpperDir, WorkDir and MergeDir used to store data.
|
||||
func (d *Driver) Metadata(id string) (map[string]string, error) {
|
||||
dir := d.dir(id)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
if err := fileutils.Exists(dir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -1016,7 +1017,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl
|
||||
rootGID = int(st.GID())
|
||||
}
|
||||
|
||||
if _, err := system.Lstat(dir); err == nil {
|
||||
if err := fileutils.Lexists(dir); err == nil {
|
||||
logrus.Warnf("Trying to create a layer %#v while directory %q already exists; removing it first", id, dir)
|
||||
// Don’t just os.RemoveAll(dir) here; d.Remove also removes the link in linkDir,
|
||||
// so that we can’t end up with two symlinks in linkDir pointing to the same layer.
|
||||
@ -1144,7 +1145,7 @@ func (d *Driver) getLower(parent string) (string, error) {
|
||||
parentDir := d.dir(parent)
|
||||
|
||||
// Ensure parent exists
|
||||
if _, err := os.Lstat(parentDir); err != nil {
|
||||
if err := fileutils.Lexists(parentDir); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@ -1197,10 +1198,10 @@ func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) {
|
||||
|
||||
newpath := path.Join(homedir, id)
|
||||
|
||||
if _, err := os.Stat(newpath); err != nil {
|
||||
if err := fileutils.Exists(newpath); err != nil {
|
||||
for _, p := range d.getAllImageStores() {
|
||||
l := path.Join(p, d.name, id)
|
||||
_, err = os.Stat(l)
|
||||
err = fileutils.Exists(l)
|
||||
if err == nil {
|
||||
return l, homedir, true
|
||||
}
|
||||
@ -1340,7 +1341,7 @@ func (d *Driver) recreateSymlinks() error {
|
||||
linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n"))
|
||||
// Check if the symlink exists, and if it doesn't, create it again with the
|
||||
// name we got from the "link" file
|
||||
_, err = os.Lstat(linkPath)
|
||||
err = fileutils.Lexists(linkPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if err := os.Symlink(path.Join("..", dir.Name(), "diff"), linkPath); err != nil {
|
||||
errs = multierror.Append(errs, err)
|
||||
@ -1417,7 +1418,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
||||
|
||||
func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) {
|
||||
dir, _, inAdditionalStore := d.dir2(id, false)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
if err := fileutils.Exists(dir); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@ -1528,8 +1529,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
composeFsLayersDir := filepath.Join(dir, "composefs-layers")
|
||||
maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) {
|
||||
composefsBlob := d.getComposefsData(lowerID)
|
||||
_, err = os.Stat(composefsBlob)
|
||||
if err != nil {
|
||||
if err := fileutils.Exists(composefsBlob); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return "", nil
|
||||
}
|
||||
@ -1633,11 +1633,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
|
||||
absLowers = append(absLowers, lower)
|
||||
diffN = 1
|
||||
_, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN)))
|
||||
err = fileutils.Exists(dumbJoin(lower, "..", nameWithSuffix("diff", diffN)))
|
||||
for err == nil {
|
||||
absLowers = append(absLowers, dumbJoin(lower, "..", nameWithSuffix("diff", diffN)))
|
||||
diffN++
|
||||
_, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN)))
|
||||
err = fileutils.Exists(dumbJoin(lower, "..", nameWithSuffix("diff", diffN)))
|
||||
}
|
||||
}
|
||||
|
||||
@ -1660,15 +1660,17 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
return "", err
|
||||
}
|
||||
// if it is in an additional store, do not fail if the directory already exists
|
||||
if _, err2 := os.Stat(diffDir); err2 != nil {
|
||||
if err2 := fileutils.Exists(diffDir); err2 != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
mergedDir := path.Join(dir, "merged")
|
||||
// Create the driver merged dir
|
||||
if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) {
|
||||
return "", err
|
||||
// Attempt to create the merged dir only if it doesn't exist.
|
||||
if err := fileutils.Exists(mergedDir); err != nil && os.IsNotExist(err) {
|
||||
if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if count := d.ctr.Increment(mergedDir); count > 1 {
|
||||
return mergedDir, nil
|
||||
@ -1834,14 +1836,14 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
// Put unmounts the mount path created for the give id.
|
||||
func (d *Driver) Put(id string) error {
|
||||
dir, _, inAdditionalStore := d.dir2(id, false)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
if err := fileutils.Exists(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
mountpoint := path.Join(dir, "merged")
|
||||
if count := d.ctr.Decrement(mountpoint); count > 0 {
|
||||
return nil
|
||||
}
|
||||
if _, err := os.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) {
|
||||
if err := fileutils.Exists(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1849,7 +1851,7 @@ func (d *Driver) Put(id string) error {
|
||||
|
||||
mappedRoot := filepath.Join(d.home, id, "mapped")
|
||||
// It should not happen, but cleanup any mapped mount if it was leaked.
|
||||
if _, err := os.Stat(mappedRoot); err == nil {
|
||||
if err := fileutils.Exists(mappedRoot); err == nil {
|
||||
mounts, err := os.ReadDir(mappedRoot)
|
||||
if err == nil {
|
||||
// Go through all of the mapped mounts.
|
||||
@ -1920,7 +1922,7 @@ func (d *Driver) Put(id string) error {
|
||||
|
||||
// Exists checks to see if the id is already mounted.
|
||||
func (d *Driver) Exists(id string) bool {
|
||||
_, err := os.Stat(d.dir(id))
|
||||
err := fileutils.Exists(d.dir(id))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
@ -2332,7 +2334,7 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
|
||||
}
|
||||
for err == nil {
|
||||
i++
|
||||
_, err = os.Stat(nameWithSuffix(diffDir, i))
|
||||
err = fileutils.Exists(nameWithSuffix(diffDir, i))
|
||||
}
|
||||
|
||||
for i > 0 {
|
||||
@ -2417,7 +2419,7 @@ func (d *Driver) getAdditionalLayerPath(dgst digest.Digest, ref string) (string,
|
||||
filepath.Join(target, "info"),
|
||||
filepath.Join(target, "blob"),
|
||||
} {
|
||||
if _, err := os.Stat(p); err != nil {
|
||||
if err := fileutils.Exists(p); err != nil {
|
||||
wrapped := fmt.Errorf("failed to stat additional layer %q: %w", p, err)
|
||||
return "", fmt.Errorf("%v: %w", wrapped, graphdriver.ErrLayerUnknown)
|
||||
}
|
||||
|
5
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
@ -12,6 +12,7 @@ import (
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
@ -210,7 +211,7 @@ func (d *Driver) dir2(id string, useImageStore bool) string {
|
||||
} else {
|
||||
homedir = filepath.Join(d.home, "dir", filepath.Base(id))
|
||||
}
|
||||
if _, err := os.Stat(homedir); err != nil {
|
||||
if err := fileutils.Exists(homedir); err != nil {
|
||||
additionalHomes := d.additionalHomes[:]
|
||||
if d.imageStore != "" {
|
||||
additionalHomes = append(additionalHomes, d.imageStore)
|
||||
@ -269,7 +270,7 @@ func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
|
||||
|
||||
// Exists checks to see if the directory exists for the given id.
|
||||
func (d *Driver) Exists(id string) bool {
|
||||
_, err := os.Stat(d.dir(id))
|
||||
err := fileutils.Exists(d.dir(id))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user