vendor in latest containers/(storage,common,image)

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
Daniel J Walsh
2022-04-21 15:15:41 -04:00
parent 121dde6234
commit 17105028e5
54 changed files with 439 additions and 2083 deletions

6
go.mod
View File

@ -12,12 +12,12 @@ require (
github.com/containernetworking/cni v1.0.1 github.com/containernetworking/cni v1.0.1
github.com/containernetworking/plugins v1.1.1 github.com/containernetworking/plugins v1.1.1
github.com/containers/buildah v1.25.2-0.20220406205807-5b8e79118057 github.com/containers/buildah v1.25.2-0.20220406205807-5b8e79118057
github.com/containers/common v0.47.5-0.20220421072908-49f1a40067b2 github.com/containers/common v0.47.5-0.20220421111103-112a47964ddb
github.com/containers/conmon v2.0.20+incompatible github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.21.1-0.20220405081457-d1b64686e1d0 github.com/containers/image/v5 v5.21.1-0.20220421124950-8527e238867c
github.com/containers/ocicrypt v1.1.3 github.com/containers/ocicrypt v1.1.3
github.com/containers/psgo v1.7.2 github.com/containers/psgo v1.7.2
github.com/containers/storage v1.39.1-0.20220414183333-eea4e0f5f1f9 github.com/containers/storage v1.39.1-0.20220421071128-4899f8265d63
github.com/coreos/go-systemd/v22 v22.3.2 github.com/coreos/go-systemd/v22 v22.3.2
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3 github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
github.com/cyphar/filepath-securejoin v0.2.3 github.com/cyphar/filepath-securejoin v0.2.3

17
go.sum
View File

@ -126,6 +126,7 @@ github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmU
github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/ProtonMail/go-crypto v0.0.0-20210920160938-87db9fbc61c7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/ProtonMail/go-crypto v0.0.0-20210920160938-87db9fbc61c7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
@ -356,14 +357,14 @@ github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19
github.com/containers/buildah v1.25.2-0.20220406205807-5b8e79118057 h1:lKSxhMBpcHyyQrj2QJYzcm56uiSeibRdSL2KoppF6rg= github.com/containers/buildah v1.25.2-0.20220406205807-5b8e79118057 h1:lKSxhMBpcHyyQrj2QJYzcm56uiSeibRdSL2KoppF6rg=
github.com/containers/buildah v1.25.2-0.20220406205807-5b8e79118057/go.mod h1:iSoopbYRb6K4b5c3hXgXNkGTI/T085t2+XiGjceud94= github.com/containers/buildah v1.25.2-0.20220406205807-5b8e79118057/go.mod h1:iSoopbYRb6K4b5c3hXgXNkGTI/T085t2+XiGjceud94=
github.com/containers/common v0.47.5-0.20220331143923-5f14ec785c18/go.mod h1:Vr2Fn6EdzD6JNAbz8L8bTv3uWLv2p31Ih2O3EAK6Hyc= github.com/containers/common v0.47.5-0.20220331143923-5f14ec785c18/go.mod h1:Vr2Fn6EdzD6JNAbz8L8bTv3uWLv2p31Ih2O3EAK6Hyc=
github.com/containers/common v0.47.5-0.20220421072908-49f1a40067b2 h1:NadhQUF7FRaZkDeW7xDcU3nxk7kV6b2yRmwGWDp+BNY= github.com/containers/common v0.47.5-0.20220421111103-112a47964ddb h1:TBrx1KcmWcesByqTb4Cq7F6bg7bDOjqCf6+6rbi8x4k=
github.com/containers/common v0.47.5-0.20220421072908-49f1a40067b2/go.mod h1:BBq6jdyjXvJh69YzQPvIuZjBho0MRdA0XGaqBnsO+1Y= github.com/containers/common v0.47.5-0.20220421111103-112a47964ddb/go.mod h1:r80nWTmJrG9EoLkuI6WfbWQDUNQVqkVuB8Oaj1VVjOA=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.19.2-0.20220224100137-1045fb70b094/go.mod h1:XoYK6kE0dpazFNcuS+a8lra+QfbC6s8tzv+cUuCrZpE= github.com/containers/image/v5 v5.19.2-0.20220224100137-1045fb70b094/go.mod h1:XoYK6kE0dpazFNcuS+a8lra+QfbC6s8tzv+cUuCrZpE=
github.com/containers/image/v5 v5.20.1-0.20220404163228-d03e80fc66b3/go.mod h1:2nEPM0WuinC/0ssPsMv5Iy8YaRueUUTmTp3C7bn5uro= github.com/containers/image/v5 v5.20.1-0.20220404163228-d03e80fc66b3/go.mod h1:2nEPM0WuinC/0ssPsMv5Iy8YaRueUUTmTp3C7bn5uro=
github.com/containers/image/v5 v5.21.1-0.20220405081457-d1b64686e1d0 h1:Md1CckW9KSYkdtMdKG70Fc+YqCCVgT+HAr7NS9Ilf8E= github.com/containers/image/v5 v5.21.1-0.20220421124950-8527e238867c h1:hshgYt6RAs4L0KhOEc2/qLF++2MryOfAXvTWmxYu4v4=
github.com/containers/image/v5 v5.21.1-0.20220405081457-d1b64686e1d0/go.mod h1:JhGkIpC7vKBpLc6mTBE4S8cZUAD+8HgicsxYaLv6BsQ= github.com/containers/image/v5 v5.21.1-0.20220421124950-8527e238867c/go.mod h1:qpUuaiE2mON6xMA0PRO9GteyH9+KT+C6WygZzL5RhnE=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU= github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@ -380,8 +381,8 @@ github.com/containers/storage v1.38.2/go.mod h1:INP0RPLHWBxx+pTsO5uiHlDUGHDFvWZP
github.com/containers/storage v1.38.3-0.20220301151551-d06b0f81c0aa/go.mod h1:LkkL34WRi4dI4jt9Cp+ImdZi/P5i36glSHimT5CP5zM= github.com/containers/storage v1.38.3-0.20220301151551-d06b0f81c0aa/go.mod h1:LkkL34WRi4dI4jt9Cp+ImdZi/P5i36glSHimT5CP5zM=
github.com/containers/storage v1.39.0/go.mod h1:UAD0cKLouN4BOQRgZut/nMjrh/EnTCjSNPgp4ZuGWMs= github.com/containers/storage v1.39.0/go.mod h1:UAD0cKLouN4BOQRgZut/nMjrh/EnTCjSNPgp4ZuGWMs=
github.com/containers/storage v1.39.1-0.20220330193934-f3200eb5a5d9/go.mod h1:IMa2AfBI+Fxxk2hQqLTGhpJX6z2pZS1/I785QJeUwUY= github.com/containers/storage v1.39.1-0.20220330193934-f3200eb5a5d9/go.mod h1:IMa2AfBI+Fxxk2hQqLTGhpJX6z2pZS1/I785QJeUwUY=
github.com/containers/storage v1.39.1-0.20220414183333-eea4e0f5f1f9 h1:cB2AvqxpfyqyyffXtDN0txJhD0lIaZWktbSRI92WpN4= github.com/containers/storage v1.39.1-0.20220421071128-4899f8265d63 h1:57UXh6fThYqCUJ6iFwHnlFNoWSWlXylkW4H1VRs05mM=
github.com/containers/storage v1.39.1-0.20220414183333-eea4e0f5f1f9/go.mod h1:hFiHLMgNU0r3MiUpE97hEBaEKCN8fEIuEEBXoFC9eN0= github.com/containers/storage v1.39.1-0.20220421071128-4899f8265d63/go.mod h1:hFiHLMgNU0r3MiUpE97hEBaEKCN8fEIuEEBXoFC9eN0=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@ -994,7 +995,6 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs=
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
@ -1314,8 +1314,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/sylabs/release-tools v0.1.0/go.mod h1:pqP/z/11/rYMQ0OM/Nn7TxGijw7KfZwW9UolD/J1TUo= github.com/sylabs/release-tools v0.1.0/go.mod h1:pqP/z/11/rYMQ0OM/Nn7TxGijw7KfZwW9UolD/J1TUo=
github.com/sylabs/sif/v2 v2.3.2/go.mod h1:IrLX2pzmQ2O4qgv5iy3HdKJcBNYds9DTMd9Je8A9tX4= github.com/sylabs/sif/v2 v2.3.2/go.mod h1:IrLX2pzmQ2O4qgv5iy3HdKJcBNYds9DTMd9Je8A9tX4=
github.com/sylabs/sif/v2 v2.4.2 h1:L4jcqeOF33JfSnH+8GJKC7/ooVpzpZ2K7wotGG4ZzqQ=
github.com/sylabs/sif/v2 v2.4.2/go.mod h1:6gQvzNKRIqr4FS08XBfHpkpnxv9b7h58GLkSJ1zdK9A= github.com/sylabs/sif/v2 v2.4.2/go.mod h1:6gQvzNKRIqr4FS08XBfHpkpnxv9b7h58GLkSJ1zdK9A=
github.com/sylabs/sif/v2 v2.6.0 h1:nrWbtSAavp4T6gETg/QgZXxs67qTpSNEgqs2H1y228w=
github.com/sylabs/sif/v2 v2.6.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ=
github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=

View File

@ -95,6 +95,13 @@ type ContainersConfig struct {
// Annotation to add to all containers // Annotation to add to all containers
Annotations []string `toml:"annotations,omitempty"` Annotations []string `toml:"annotations,omitempty"`
// BaseHostsFile is the path to a hosts file, the entries from this file
// are added to the containers hosts file. As special value "image" is
// allowed which uses the /etc/hosts file from within the image and "none"
// which uses no base file at all. If it is empty we should default
// to /etc/hosts.
BaseHostsFile string `toml:"base_hosts_file,omitempty"`
// Default way to create a cgroup namespace for the container // Default way to create a cgroup namespace for the container
CgroupNS string `toml:"cgroupns,omitempty"` CgroupNS string `toml:"cgroupns,omitempty"`
@ -136,6 +143,9 @@ type ContainersConfig struct {
// EnvHost Pass all host environment variables into the container. // EnvHost Pass all host environment variables into the container.
EnvHost bool `toml:"env_host,omitempty"` EnvHost bool `toml:"env_host,omitempty"`
// HostContainersInternalIP is used to set a specific host.containers.internal ip.
HostContainersInternalIP string `toml:"host_containers_internal_ip,omitempty"`
// HTTPProxy is the proxy environment variable list to apply to container process // HTTPProxy is the proxy environment variable list to apply to container process
HTTPProxy bool `toml:"http_proxy,omitempty"` HTTPProxy bool `toml:"http_proxy,omitempty"`

View File

@ -26,6 +26,13 @@
# #
#apparmor_profile = "container-default" #apparmor_profile = "container-default"
# The hosts entries from the base hosts file are added to the containers hosts
# file. This must be either an absolute path or as special values "image" which
# uses the hosts file from the container image or "none" which means
# no base hosts file is used. The default is "" which will use /etc/hosts.
#
#base_hosts_file = ""
# Default way to to create a cgroup namespace for the container # Default way to to create a cgroup namespace for the container
# Options are: # Options are:
# `private` Create private Cgroup Namespace for the container. # `private` Create private Cgroup Namespace for the container.
@ -114,6 +121,16 @@ default_sysctls = [
# #
#env_host = false #env_host = false
# Set the ip for the host.containers.internal entry in the containers /etc/hosts
# file. This can be set to "none" to disable adding this entry. By default it
# will automatically choose the host ip.
#
# NOTE: When using podman machine this entry will never be added to the containers
# hosts file instead the gvproxy dns resolver will resolve this hostname. Therefore
# it is not possible to disable the entry in this case.
#
#host_containers_internal_ip = ""
# Default proxy environment variables passed into the container. # Default proxy environment variables passed into the container.
# The environment variables passed in include: # The environment variables passed in include:
# http_proxy, https_proxy, ftp_proxy, no_proxy, and the upper case versions of # http_proxy, https_proxy, ftp_proxy, no_proxy, and the upper case versions of
@ -464,9 +481,26 @@ default_sysctls = [
#network_cmd_path = "" #network_cmd_path = ""
# Default options to pass to the slirp4netns binary. # Default options to pass to the slirp4netns binary.
# For example "allow_host_loopback=true" # Valid options values are:
# #
#network_cmd_options = ["enable_ipv6=true",] # - allow_host_loopback=true|false: Allow the slirp4netns to reach the host loopback IP (`10.0.2.2`).
# Default is false.
# - mtu=MTU: Specify the MTU to use for this network. (Default is `65520`).
# - cidr=CIDR: Specify ip range to use for this network. (Default is `10.0.2.0/24`).
# - enable_ipv6=true|false: Enable IPv6. Default is true. (Required for `outbound_addr6`).
# - outbound_addr=INTERFACE: Specify the outbound interface slirp should bind to (ipv4 traffic only).
# - outbound_addr=IPv4: Specify the outbound ipv4 address slirp should bind to.
# - outbound_addr6=INTERFACE: Specify the outbound interface slirp should bind to (ipv6 traffic only).
# - outbound_addr6=IPv6: Specify the outbound ipv6 address slirp should bind to.
# - port_handler=rootlesskit: Use rootlesskit for port forwarding. Default.
# Note: Rootlesskit changes the source IP address of incoming packets to a IP address in the container
# network namespace, usually `10.0.2.100`. If your application requires the real source IP address,
# e.g. web server logs, use the slirp4netns port handler. The rootlesskit port handler is also used for
# rootless containers when connected to user-defined networks.
# - port_handler=slirp4netns: Use the slirp4netns port forwarding, it is slower than rootlesskit but
# preserves the correct source IP address. This port handler cannot be used for user-defined networks.
#
#network_cmd_options = []
# Whether to use chroot instead of pivot_root in the runtime # Whether to use chroot instead of pivot_root in the runtime
# #
@ -644,4 +678,3 @@ default_sysctls = [
# TOML does not provide a way to end a table other than a further table being # TOML does not provide a way to end a table other than a further table being
# defined, so every key hereafter will be part of [machine] and not the # defined, so every key hereafter will be part of [machine] and not the
# main config. # main config.

View File

@ -122,6 +122,8 @@ const (
CgroupfsCgroupsManager = "cgroupfs" CgroupfsCgroupsManager = "cgroupfs"
// DefaultApparmorProfile specifies the default apparmor profile for the container. // DefaultApparmorProfile specifies the default apparmor profile for the container.
DefaultApparmorProfile = apparmor.Profile DefaultApparmorProfile = apparmor.Profile
// DefaultHostsFile is the default path to the hosts file
DefaultHostsFile = "/etc/hosts"
// SystemdCgroupsManager represents systemd native cgroup manager // SystemdCgroupsManager represents systemd native cgroup manager
SystemdCgroupsManager = "systemd" SystemdCgroupsManager = "systemd"
// DefaultLogSizeMax is the default value for the maximum log size // DefaultLogSizeMax is the default value for the maximum log size
@ -189,6 +191,7 @@ func DefaultConfig() (*Config, error) {
Volumes: []string{}, Volumes: []string{},
Annotations: []string{}, Annotations: []string{},
ApparmorProfile: DefaultApparmorProfile, ApparmorProfile: DefaultApparmorProfile,
BaseHostsFile: "",
CgroupNS: cgroupNS, CgroupNS: cgroupNS,
Cgroups: "enabled", Cgroups: "enabled",
DefaultCapabilities: DefaultCapabilities, DefaultCapabilities: DefaultCapabilities,
@ -299,9 +302,6 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
c.ServiceTimeout = uint(5) c.ServiceTimeout = uint(5)
c.StopTimeout = uint(10) c.StopTimeout = uint(10)
c.ExitCommandDelay = uint(5 * 60) c.ExitCommandDelay = uint(5 * 60)
c.NetworkCmdOptions = []string{
"enable_ipv6=true",
}
c.Remote = isRemote() c.Remote = isRemote()
c.OCIRuntimes = map[string][]string{ c.OCIRuntimes = map[string][]string{
"crun": { "crun": {

View File

@ -3,12 +3,12 @@ package shelldriver
import ( import (
"bytes" "bytes"
"context" "context"
"fmt"
"os" "os"
"os/exec" "os/exec"
"sort" "sort"
"strings" "strings"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -27,22 +27,33 @@ var (
type driverConfig struct { type driverConfig struct {
// DeleteCommand contains a shell command that deletes a secret. // DeleteCommand contains a shell command that deletes a secret.
// The secret id is provided as environment variable SECRET_ID // The secret id is provided as environment variable SECRET_ID
DeleteCommand string `mapstructure:"delete"` DeleteCommand string
// ListCommand contains a shell command that lists all secrets. // ListCommand contains a shell command that lists all secrets.
// The output is expected to be one id per line // The output is expected to be one id per line
ListCommand string `mapstructure:"list"` ListCommand string
// LookupCommand contains a shell command that retrieves a secret. // LookupCommand contains a shell command that retrieves a secret.
// The secret id is provided as environment variable SECRET_ID // The secret id is provided as environment variable SECRET_ID
LookupCommand string `mapstructure:"lookup"` LookupCommand string
// StoreCommand contains a shell command that stores a secret. // StoreCommand contains a shell command that stores a secret.
// The secret id is provided as environment variable SECRET_ID // The secret id is provided as environment variable SECRET_ID
// The secret value itself is provided over stdin // The secret value itself is provided over stdin
StoreCommand string `mapstructure:"store"` StoreCommand string
} }
func (cfg *driverConfig) ParseOpts(opts map[string]string) error { func (cfg *driverConfig) ParseOpts(opts map[string]string) error {
if err := mapstructure.Decode(opts, cfg); err != nil { for key, value := range opts {
return err switch key {
case "delete":
cfg.DeleteCommand = value
case "list":
cfg.ListCommand = value
case "lookup":
cfg.LookupCommand = value
case "store":
cfg.StoreCommand = value
default:
return fmt.Errorf("invalid shell driver option: %q", key)
}
} }
if cfg.DeleteCommand == "" || if cfg.DeleteCommand == "" ||
cfg.ListCommand == "" || cfg.ListCommand == "" ||

View File

@ -5,7 +5,6 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"reflect" "reflect"
"strings" "strings"
@ -199,7 +198,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
return nil, err return nil, err
} }
reportWriter := ioutil.Discard reportWriter := io.Discard
if options.ReportWriter != nil { if options.ReportWriter != nil {
reportWriter = options.ReportWriter reportWriter = options.ReportWriter
@ -232,7 +231,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
// createProgressBar() will print a single line instead. // createProgressBar() will print a single line instead.
progressOutput := reportWriter progressOutput := reportWriter
if !isTTY(reportWriter) { if !isTTY(reportWriter) {
progressOutput = ioutil.Discard progressOutput = io.Discard
} }
c := &copier{ c := &copier{
@ -1091,7 +1090,7 @@ func customPartialBlobDecorFunc(s decor.Statistics) string {
} }
// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter // createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
// is ioutil.Discard, the progress bar's output will be discarded // is io.Discard, the progress bar's output will be discarded
// NOTE: Every progress bar created within a progress pool must either successfully // NOTE: Every progress bar created within a progress pool must either successfully
// complete or be aborted, or pool.Wait() will hang. That is typically done // complete or be aborted, or pool.Wait() will hang. That is typically done
// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called. // using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called.
@ -1143,7 +1142,7 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.
), ),
) )
} }
if c.progressOutput == ioutil.Discard { if c.progressOutput == io.Discard {
c.Printf("Copying %s %s\n", kind, info.Digest) c.Printf("Copying %s %s\n", kind, info.Digest)
} }
return bar return bar
@ -1669,7 +1668,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
// sent there if we are not already at EOF. // sent there if we are not already at EOF.
if getOriginalLayerCopyWriter != nil { if getOriginalLayerCopyWriter != nil {
logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
_, err := io.Copy(ioutil.Discard, originalLayerReader) _, err := io.Copy(io.Discard, originalLayerReader)
if err != nil { if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest) return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest)
} }

View File

@ -3,7 +3,6 @@ package directory
import ( import (
"context" "context"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"runtime" "runtime"
@ -62,7 +61,7 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (types.Imag
return nil, errors.Wrapf(err, "checking if path exists %q", d.ref.versionPath()) return nil, errors.Wrapf(err, "checking if path exists %q", d.ref.versionPath())
} }
if versionExists { if versionExists {
contents, err := ioutil.ReadFile(d.ref.versionPath()) contents, err := os.ReadFile(d.ref.versionPath())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -86,7 +85,7 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (types.Imag
} }
} }
// create version file // create version file
err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644) err = os.WriteFile(d.ref.versionPath(), []byte(version), 0644)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "creating version file %q", d.ref.versionPath()) return nil, errors.Wrapf(err, "creating version file %q", d.ref.versionPath())
} }
@ -149,7 +148,7 @@ func (d *dirImageDestination) HasThreadSafePutBlob() bool {
// to any other readers for download using the supplied digest. // to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob") blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob")
if err != nil { if err != nil {
return types.BlobInfo{}, err return types.BlobInfo{}, err
} }
@ -232,7 +231,7 @@ func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.Blo
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error { func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error {
return ioutil.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644) return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644)
} }
// PutSignatures writes a set of signatures to the destination. // PutSignatures writes a set of signatures to the destination.
@ -240,7 +239,7 @@ func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte,
// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
for i, sig := range signatures { for i, sig := range signatures {
if err := ioutil.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil { if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil {
return err return err
} }
} }
@ -272,7 +271,7 @@ func pathExists(path string) (bool, error) {
// returns true if directory is empty // returns true if directory is empty
func isDirEmpty(path string) (bool, error) { func isDirEmpty(path string) (bool, error) {
files, err := ioutil.ReadDir(path) files, err := os.ReadDir(path)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -281,7 +280,7 @@ func isDirEmpty(path string) (bool, error) {
// deletes the contents of a directory // deletes the contents of a directory
func removeDirContents(path string) error { func removeDirContents(path string) error {
files, err := ioutil.ReadDir(path) files, err := os.ReadDir(path)
if err != nil { if err != nil {
return err return err
} }

View File

@ -3,7 +3,6 @@ package directory
import ( import (
"context" "context"
"io" "io"
"io/ioutil"
"os" "os"
"github.com/containers/image/v5/manifest" "github.com/containers/image/v5/manifest"
@ -37,7 +36,7 @@ func (s *dirImageSource) Close() error {
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
m, err := ioutil.ReadFile(s.ref.manifestPath(instanceDigest)) m, err := os.ReadFile(s.ref.manifestPath(instanceDigest))
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }
@ -71,7 +70,7 @@ func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache
func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
signatures := [][]byte{} signatures := [][]byte{}
for i := 0; ; i++ { for i := 0; ; i++ {
signature, err := ioutil.ReadFile(s.ref.signaturePath(i, instanceDigest)) signature, err := os.ReadFile(s.ref.signaturePath(i, instanceDigest))
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
break break

View File

@ -7,7 +7,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
@ -654,7 +653,7 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
params.Add("refresh_token", c.auth.IdentityToken) params.Add("refresh_token", c.auth.IdentityToken)
params.Add("client_id", "containers/image") params.Add("client_id", "containers/image")
authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode())) authReq.Body = io.NopCloser(bytes.NewBufferString(params.Encode()))
authReq.Header.Add("User-Agent", c.userAgent) authReq.Header.Add("User-Agent", c.userAgent)
authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded") authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted()) logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted())

View File

@ -7,7 +7,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
@ -592,7 +591,7 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte)
if err != nil { if err != nil {
return err return err
} }
err = ioutil.WriteFile(url.Path, signature, 0644) err = os.WriteFile(url.Path, signature, 0644)
if err != nil { if err != nil {
return err return err
} }

View File

@ -4,7 +4,6 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"mime" "mime"
"mime/multipart" "mime/multipart"
"net/http" "net/http"
@ -308,7 +307,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
break break
} }
toSkip := c.Offset - currentOffset toSkip := c.Offset - currentOffset
if _, err := io.Copy(ioutil.Discard, io.LimitReader(body, int64(toSkip))); err != nil { if _, err := io.Copy(io.Discard, io.LimitReader(body, int64(toSkip))); err != nil {
errs <- err errs <- err
break break
} }
@ -316,7 +315,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
} }
s := signalCloseReader{ s := signalCloseReader{
closed: make(chan interface{}), closed: make(chan interface{}),
stream: ioutil.NopCloser(io.LimitReader(body, int64(c.Length))), stream: io.NopCloser(io.LimitReader(body, int64(c.Length))),
consumeStream: true, consumeStream: true,
} }
streams <- s streams <- s
@ -515,7 +514,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
switch url.Scheme { switch url.Scheme {
case "file": case "file":
logrus.Debugf("Reading %s", url.Path) logrus.Debugf("Reading %s", url.Path)
sig, err := ioutil.ReadFile(url.Path) sig, err := os.ReadFile(url.Path)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil, true, nil return nil, true, nil
@ -765,7 +764,7 @@ func (s signalCloseReader) Read(p []byte) (int, error) {
func (s signalCloseReader) Close() error { func (s signalCloseReader) Close() error {
defer close(s.closed) defer close(s.closed)
if s.consumeStream { if s.consumeStream {
if _, err := io.Copy(ioutil.Discard, s.stream); err != nil { if _, err := io.Copy(io.Discard, s.stream); err != nil {
s.stream.Close() s.stream.Close()
return err return err
} }

View File

@ -4,7 +4,6 @@ import (
"archive/tar" "archive/tar"
"encoding/json" "encoding/json"
"io" "io"
"io/ioutil"
"os" "os"
"path" "path"
@ -53,7 +52,7 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
// The caller should call .Close() on the returned archive when done. // The caller should call .Close() on the returned archive when done.
func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) { func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) {
// Save inputStream to a temporary file // Save inputStream to a temporary file
tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar") tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
if err != nil { if err != nil {
return nil, errors.Wrap(err, "creating temporary file") return nil, errors.Wrap(err, "creating temporary file")
} }

View File

@ -6,7 +6,6 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"io" "io"
"io/ioutil"
"os" "os"
"path" "path"
"sync" "sync"
@ -170,7 +169,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
uncompressedSize := h.Size uncompressedSize := h.Size
if isCompressed { if isCompressed {
uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream) uncompressedSize, err = io.Copy(io.Discard, uncompressedStream)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "reading %s to find its size", layerPath) return nil, errors.Wrapf(err, "reading %s to find its size", layerPath)
} }
@ -263,7 +262,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
} }
if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256. if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil return io.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
} }
if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,

View File

@ -2,7 +2,6 @@ package docker
import ( import (
"fmt" "fmt"
"io/ioutil"
"net/url" "net/url"
"os" "os"
"path" "path"
@ -146,7 +145,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
continue continue
} }
configPath := filepath.Join(dirPath, configName) configPath := filepath.Join(dirPath, configName)
configBytes, err := ioutil.ReadFile(configPath) configBytes, err := os.ReadFile(configPath)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -2,7 +2,6 @@ package iolimits
import ( import (
"io" "io"
"io/ioutil"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -47,7 +46,7 @@ const (
func ReadAtMost(reader io.Reader, limit int) ([]byte, error) { func ReadAtMost(reader io.Reader, limit int) ([]byte, error) {
limitedReader := io.LimitReader(reader, int64(limit+1)) limitedReader := io.LimitReader(reader, int64(limit+1))
res, err := ioutil.ReadAll(limitedReader) res, err := io.ReadAll(limitedReader)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -3,7 +3,6 @@ package streamdigest
import ( import (
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"github.com/containers/image/v5/internal/putblobdigest" "github.com/containers/image/v5/internal/putblobdigest"
@ -16,7 +15,7 @@ import (
// It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file. // It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file.
// If an error occurs, inputInfo is not modified. // If an error occurs, inputInfo is not modified.
func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) { func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) {
diskBlob, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob") diskBlob, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob")
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err) return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err)
} }

View File

@ -3,7 +3,6 @@ package archive
import ( import (
"context" "context"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"strings" "strings"
@ -161,7 +160,7 @@ func (t *tempDirOCIRef) deleteTempDir() error {
// createOCIRef creates the oci reference of the image // createOCIRef creates the oci reference of the image
// If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files // If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files
func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) { func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) {
dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci") dir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
if err != nil { if err != nil {
return tempDirOCIRef{}, errors.Wrapf(err, "creating temp directory") return tempDirOCIRef{}, errors.Wrapf(err, "creating temp directory")
} }

View File

@ -4,7 +4,6 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"runtime" "runtime"
@ -124,7 +123,7 @@ func (d *ociImageDestination) HasThreadSafePutBlob() bool {
// to any other readers for download using the supplied digest. // to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
if err != nil { if err != nil {
return types.BlobInfo{}, err return types.BlobInfo{}, err
} }
@ -238,7 +237,7 @@ func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanc
if err := ensureParentDirectoryExists(blobPath); err != nil { if err := ensureParentDirectoryExists(blobPath); err != nil {
return err return err
} }
if err := ioutil.WriteFile(blobPath, m, 0644); err != nil { if err := os.WriteFile(blobPath, m, 0644); err != nil {
return err return err
} }
@ -309,14 +308,14 @@ func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]
// - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error { func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error {
if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { if err := os.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
return err return err
} }
indexJSON, err := json.Marshal(d.index) indexJSON, err := json.Marshal(d.index)
if err != nil { if err != nil {
return err return err
} }
return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644) return os.WriteFile(d.ref.indexPath(), indexJSON, 0644)
} }
func ensureDirectoryExists(path string) error { func ensureDirectoryExists(path string) error {

View File

@ -3,7 +3,6 @@ package layout
import ( import (
"context" "context"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
@ -93,7 +92,7 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
return nil, "", err return nil, "", err
} }
m, err := ioutil.ReadFile(manifestPath) m, err := os.ReadFile(manifestPath)
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }

View File

@ -5,7 +5,6 @@ import (
"crypto/x509" "crypto/x509"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil"
"net" "net"
"net/http" "net/http"
"net/url" "net/url"
@ -625,7 +624,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile // loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile
// LoadFromFile takes a filename and deserializes the contents into Config object // LoadFromFile takes a filename and deserializes the contents into Config object
func loadFromFile(filename string) (*clientcmdConfig, error) { func loadFromFile(filename string) (*clientcmdConfig, error) {
kubeconfigBytes, err := ioutil.ReadFile(filename) kubeconfigBytes, err := os.ReadFile(filename)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1013,7 +1012,7 @@ func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
return data, nil return data, nil
} }
if len(file) > 0 { if len(file) > 0 {
fileData, err := ioutil.ReadFile(file) fileData, err := os.ReadFile(file)
if err != nil { if err != nil {
return []byte{}, err return []byte{}, err
} }

View File

@ -10,7 +10,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
@ -148,7 +147,7 @@ func (d *ostreeImageDestination) HasThreadSafePutBlob() bool {
// to any other readers for download using the supplied digest. // to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob") tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob")
if err != nil { if err != nil {
return types.BlobInfo{}, err return types.BlobInfo{}, err
} }
@ -180,20 +179,24 @@ func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader,
} }
func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error {
entries, err := ioutil.ReadDir(dir) entries, err := os.ReadDir(dir)
if err != nil { if err != nil {
return err return err
} }
for _, info := range entries { for _, entry := range entries {
fullpath := filepath.Join(dir, info.Name()) fullpath := filepath.Join(dir, entry.Name())
if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { if entry.Type()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
if err := os.Remove(fullpath); err != nil { if err := os.Remove(fullpath); err != nil {
return err return err
} }
continue continue
} }
info, err := entry.Info()
if err != nil {
return err
}
if selinuxHnd != nil { if selinuxHnd != nil {
relPath, err := filepath.Rel(root, fullpath) relPath, err := filepath.Rel(root, fullpath)
if err != nil { if err != nil {
@ -223,7 +226,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user
} }
} }
if info.IsDir() { if entry.IsDir() {
if usermode { if usermode {
if err := os.Chmod(fullpath, info.Mode()|0700); err != nil { if err := os.Chmod(fullpath, info.Mode()|0700); err != nil {
return err return err
@ -233,7 +236,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user
if err != nil { if err != nil {
return err return err
} }
} else if usermode && (info.Mode().IsRegular()) { } else if usermode && (entry.Type().IsRegular()) {
if err := os.Chmod(fullpath, info.Mode()|0600); err != nil { if err := os.Chmod(fullpath, info.Mode()|0600); err != nil {
return err return err
} }
@ -405,7 +408,7 @@ func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob [
} }
d.digest = digest d.digest = digest
return ioutil.WriteFile(manifestPath, manifestBlob, 0644) return os.WriteFile(manifestPath, manifestBlob, 0644)
} }
// PutSignatures writes signatures to the destination. // PutSignatures writes signatures to the destination.
@ -423,7 +426,7 @@ func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [
for i, sig := range signatures { for i, sig := range signatures {
signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i)) signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i))
if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil { if err := os.WriteFile(signaturePath, sig, 0644); err != nil {
return err return err
} }
} }

View File

@ -9,7 +9,6 @@ import (
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"strconv" "strconv"
"strings" "strings"
"unsafe" "unsafe"
@ -369,7 +368,7 @@ func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *d
} }
defer sigReader.Close() defer sigReader.Close()
sig, err := ioutil.ReadAll(sigReader) sig, err := os.ReadAll(sigReader)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -4,7 +4,6 @@ import (
"bytes" "bytes"
"context" "context"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"sync" "sync"
@ -196,7 +195,7 @@ func (s *blobCacheSource) Close() error {
func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
if instanceDigest != nil { if instanceDigest != nil {
filename := filepath.Join(s.reference.directory, makeFilename(*instanceDigest, false)) filename := filepath.Join(s.reference.directory, makeFilename(*instanceDigest, false))
manifestBytes, err := ioutil.ReadFile(filename) manifestBytes, err := os.ReadFile(filename)
if err == nil { if err == nil {
s.cacheHits++ s.cacheHits++
return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil
@ -280,10 +279,10 @@ func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest
switch s.reference.compress { switch s.reference.compress {
case types.Compress: case types.Compress:
alternate = blobFile + compressedNote alternate = blobFile + compressedNote
replaceDigest, err = ioutil.ReadFile(alternate) replaceDigest, err = os.ReadFile(alternate)
case types.Decompress: case types.Decompress:
alternate = blobFile + decompressedNote alternate = blobFile + decompressedNote
replaceDigest, err = ioutil.ReadFile(alternate) replaceDigest, err = os.ReadFile(alternate)
} }
if err == nil && digest.Digest(replaceDigest).Validate() == nil { if err == nil && digest.Digest(replaceDigest).Validate() == nil {
alternate = filepath.Join(filepath.Dir(alternate), makeFilename(digest.Digest(replaceDigest), false)) alternate = filepath.Join(filepath.Dir(alternate), makeFilename(digest.Digest(replaceDigest), false))
@ -373,7 +372,7 @@ func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os
_, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed) _, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed)
} else { } else {
// Drain the pipe to keep from stalling the PutBlob() thread. // Drain the pipe to keep from stalling the PutBlob() thread.
if _, err := io.Copy(ioutil.Discard, decompressReader); err != nil { if _, err := io.Copy(io.Discard, decompressReader); err != nil {
logrus.Debugf("error draining the pipe: %v", err) logrus.Debugf("error draining the pipe: %v", err)
} }
} }
@ -423,7 +422,7 @@ func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, in
compression := archive.Uncompressed compression := archive.Uncompressed
if inputInfo.Digest != "" { if inputInfo.Digest != "" {
filename := filepath.Join(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) filename := filepath.Join(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
tempfile, err = ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) tempfile, err = os.CreateTemp(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
if err == nil { if err == nil {
stream = io.TeeReader(stream, tempfile) stream = io.TeeReader(stream, tempfile)
defer func() { defer func() {
@ -457,7 +456,7 @@ func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, in
if compression == archive.Gzip { if compression == archive.Gzip {
// The stream is compressed, so create a file which we'll // The stream is compressed, so create a file which we'll
// use to store a decompressed copy. // use to store a decompressed copy.
decompressedTemp, err2 := ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) decompressedTemp, err2 := os.CreateTemp(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
if err2 != nil { if err2 != nil {
logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err2) logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err2)
decompressedTemp.Close() decompressedTemp.Close()

View File

@ -5,7 +5,6 @@ import (
"compress/bzip2" "compress/bzip2"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"github.com/containers/image/v5/pkg/compression/internal" "github.com/containers/image/v5/pkg/compression/internal"
"github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/pkg/compression/types"
@ -65,7 +64,7 @@ func GzipDecompressor(r io.Reader) (io.ReadCloser, error) {
// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. // Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm.
func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) {
return ioutil.NopCloser(bzip2.NewReader(r)), nil return io.NopCloser(bzip2.NewReader(r)), nil
} }
// XzDecompressor is a DecompressorFunc for the xz compression algorithm. // XzDecompressor is a DecompressorFunc for the xz compression algorithm.
@ -74,7 +73,7 @@ func XzDecompressor(r io.Reader) (io.ReadCloser, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return ioutil.NopCloser(r), nil return io.NopCloser(r), nil
} }
// gzipCompressor is a CompressorFunc for the gzip compression algorithm. // gzipCompressor is a CompressorFunc for the gzip compression algorithm.
@ -161,7 +160,7 @@ func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) {
return nil, false, errors.Wrapf(err, "initializing decompression") return nil, false, errors.Wrapf(err, "initializing decompression")
} }
} else { } else {
res = ioutil.NopCloser(stream) res = io.NopCloser(stream)
} }
return res, decompressor != nil, nil return res, decompressor != nil, nil
} }

View File

@ -4,7 +4,6 @@ import (
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
@ -544,7 +543,7 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, e
func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
var auths dockerConfigFile var auths dockerConfigFile
raw, err := ioutil.ReadFile(path) raw, err := os.ReadFile(path)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
auths.AuthConfigs = map[string]dockerAuthConfig{} auths.AuthConfigs = map[string]dockerAuthConfig{}

View File

@ -2,6 +2,7 @@ package sysregistriesv2
import ( import (
"fmt" "fmt"
"io/fs"
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
@ -643,17 +644,17 @@ func dropInConfigs(wrapper configWrapper) ([]string, error) {
dirPaths = append(dirPaths, wrapper.userConfigDirPath) dirPaths = append(dirPaths, wrapper.userConfigDirPath)
} }
for _, dirPath := range dirPaths { for _, dirPath := range dirPaths {
err := filepath.Walk(dirPath, err := filepath.WalkDir(dirPath,
// WalkFunc to read additional configs // WalkFunc to read additional configs
func(path string, info os.FileInfo, err error) error { func(path string, d fs.DirEntry, err error) error {
switch { switch {
case err != nil: case err != nil:
// return error (could be a permission problem) // return error (could be a permission problem)
return err return err
case info == nil: case d == nil:
// this should only happen when err != nil but let's be sure // this should only happen when err != nil but let's be sure
return nil return nil
case info.IsDir(): case d.IsDir():
if path != dirPath { if path != dirPath {
// make sure to not recurse into sub-directories // make sure to not recurse into sub-directories
return filepath.SkipDir return filepath.SkipDir

View File

@ -2,7 +2,6 @@ package tlsclientconfig
import ( import (
"crypto/tls" "crypto/tls"
"io/ioutil"
"net" "net"
"net/http" "net/http"
"os" "os"
@ -19,7 +18,7 @@ import (
// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc // SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc
func SetupCertificates(dir string, tlsc *tls.Config) error { func SetupCertificates(dir string, tlsc *tls.Config) error {
logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) logrus.Debugf("Looking for TLS certificates and private keys in %s", dir)
fs, err := ioutil.ReadDir(dir) fs, err := os.ReadDir(dir)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil return nil
@ -35,7 +34,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
fullPath := filepath.Join(dir, f.Name()) fullPath := filepath.Join(dir, f.Name())
if strings.HasSuffix(f.Name(), ".crt") { if strings.HasSuffix(f.Name(), ".crt") {
logrus.Debugf(" crt: %s", fullPath) logrus.Debugf(" crt: %s", fullPath)
data, err := ioutil.ReadFile(fullPath) data, err := os.ReadFile(fullPath)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
// Dangling symbolic link? // Dangling symbolic link?
@ -81,7 +80,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
return nil return nil
} }
func hasFile(files []os.FileInfo, name string) bool { func hasFile(files []os.DirEntry, name string) bool {
for _, f := range files { for _, f := range files {
if f.Name() == name { if f.Name() == name {
return true return true

View File

@ -5,7 +5,6 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
@ -103,7 +102,7 @@ func writeInjectedScript(extractedRootPath string, injectedScript []byte) error
if err := os.MkdirAll(parentDirPath, 0755); err != nil { if err := os.MkdirAll(parentDirPath, 0755); err != nil {
return fmt.Errorf("creating %s: %w", parentDirPath, err) return fmt.Errorf("creating %s: %w", parentDirPath, err)
} }
if err := ioutil.WriteFile(filePath, injectedScript, 0755); err != nil { if err := os.WriteFile(filePath, injectedScript, 0755); err != nil {
return fmt.Errorf("writing %s to %s: %w", injectedScriptTargetPath, filePath, err) return fmt.Errorf("writing %s to %s: %w", injectedScriptTargetPath, filePath, err)
} }
return nil return nil
@ -121,7 +120,7 @@ func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, i
conversionCommand := fmt.Sprintf("unsquashfs -d %s -f %s && tar --acls --xattrs -C %s -cpf %s ./", conversionCommand := fmt.Sprintf("unsquashfs -d %s -f %s && tar --acls --xattrs -C %s -cpf %s ./",
extractedRootPath, squashFSPath, extractedRootPath, tarPath) extractedRootPath, squashFSPath, extractedRootPath, tarPath)
script := "#!/bin/sh\n" + conversionCommand + "\n" script := "#!/bin/sh\n" + conversionCommand + "\n"
if err := ioutil.WriteFile(scriptPath, []byte(script), 0755); err != nil { if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil {
return err return err
} }
defer os.Remove(scriptPath) defer os.Remove(scriptPath)
@ -149,7 +148,7 @@ func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, i
// at start, and is exclusively used by the current process (i.e. it is safe // at start, and is exclusively used by the current process (i.e. it is safe
// to use hard-coded relative paths within it). // to use hard-coded relative paths within it).
func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir string) (string, []string, error) { func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir string) (string, []string, error) {
// We could allocate unique names for all of these using ioutil.Temp*, but tempDir is exclusive, // We could allocate unique names for all of these using os.{CreateTemp,MkdirTemp}, but tempDir is exclusive,
// so we can just hard-code a set of unique values here. // so we can just hard-code a set of unique values here.
// We create and/or manage cleanup of these two paths. // We create and/or manage cleanup of these two paths.
squashFSPath := filepath.Join(tempDir, "rootfs.squashfs") squashFSPath := filepath.Join(tempDir, "rootfs.squashfs")

View File

@ -7,7 +7,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"github.com/containers/image/v5/internal/tmpdir" "github.com/containers/image/v5/internal/tmpdir"
@ -65,7 +64,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere
_ = sifImg.UnloadContainer() _ = sifImg.UnloadContainer()
}() }()
workDir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif") workDir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif")
if err != nil { if err != nil {
return nil, fmt.Errorf("creating temp directory: %w", err) return nil, fmt.Errorf("creating temp directory: %w", err)
} }
@ -170,7 +169,7 @@ func (s *sifImageSource) HasThreadSafeGetBlob() bool {
func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
switch info.Digest { switch info.Digest {
case s.configDigest: case s.configDigest:
return ioutil.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil return io.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil
case s.layerDigest: case s.layerDigest:
reader, err := os.Open(s.layerFile) reader, err := os.Open(s.layerFile)
if err != nil { if err != nil {

View File

@ -6,7 +6,7 @@ import (
"bytes" "bytes"
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io"
"strings" "strings"
// This code is used only to parse the data in an explicitly-untrusted // This code is used only to parse the data in an explicitly-untrusted
@ -82,7 +82,7 @@ func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents
if !md.IsSigned { if !md.IsSigned {
return nil, "", errors.New("The input is not a signature") return nil, "", errors.New("The input is not a signature")
} }
content, err := ioutil.ReadAll(md.UnverifiedBody) content, err := io.ReadAll(md.UnverifiedBody)
if err != nil { if err != nil {
// Coverage: An error during reading the body can happen only if // Coverage: An error during reading the body can happen only if
// 1) the message is encrypted, which is not our case (and we dont give ReadMessage the key // 1) the message is encrypted, which is not our case (and we dont give ReadMessage the key

View File

@ -7,7 +7,6 @@ import (
"bytes" "bytes"
"errors" "errors"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"github.com/proglottis/gpgme" "github.com/proglottis/gpgme"
@ -37,7 +36,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWith
// of these keys. // of these keys.
// The caller must call .Close() on the returned SigningMechanism. // The caller must call .Close() on the returned SigningMechanism.
func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) { func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) {
dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-") dir, err := os.MkdirTemp("", "containers-ephemeral-gpg-")
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }

View File

@ -7,7 +7,7 @@ import (
"bytes" "bytes"
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io"
"os" "os"
"path" "path"
"strings" "strings"
@ -44,7 +44,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWith
} }
} }
pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg")) pubring, err := os.ReadFile(path.Join(gpgHome, "pubring.gpg"))
if err != nil { if err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
return nil, err return nil, err
@ -130,7 +130,7 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [
if !md.IsSigned { if !md.IsSigned {
return nil, "", errors.New("not signed") return nil, "", errors.New("not signed")
} }
content, err := ioutil.ReadAll(md.UnverifiedBody) content, err := io.ReadAll(md.UnverifiedBody)
if err != nil { if err != nil {
// Coverage: md.UnverifiedBody.Read only fails if the body is encrypted // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted
// (and possibly also signed, but it _must_ be encrypted) and the signing // (and possibly also signed, but it _must_ be encrypted) and the signing

View File

@ -16,7 +16,6 @@ package signature
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"regexp" "regexp"
@ -80,7 +79,7 @@ func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) stri
// NewPolicyFromFile returns a policy configured in the specified file. // NewPolicyFromFile returns a policy configured in the specified file.
func NewPolicyFromFile(fileName string) (*Policy, error) { func NewPolicyFromFile(fileName string) (*Policy, error) {
contents, err := ioutil.ReadFile(fileName) contents, err := os.ReadFile(fileName)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -5,7 +5,7 @@ package signature
import ( import (
"context" "context"
"fmt" "fmt"
"io/ioutil" "os"
"strings" "strings"
"github.com/containers/image/v5/manifest" "github.com/containers/image/v5/manifest"
@ -33,7 +33,7 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types
if pr.KeyData != nil { if pr.KeyData != nil {
data = pr.KeyData data = pr.KeyData
} else { } else {
d, err := ioutil.ReadFile(pr.KeyPath) d, err := os.ReadFile(pr.KeyPath)
if err != nil { if err != nil {
return sarRejected, nil, err return sarRejected, nil, err
} }

View File

@ -10,7 +10,6 @@ import (
stderrors "errors" stderrors "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"sync" "sync"
@ -155,7 +154,7 @@ func (s *storageImageSource) HasThreadSafeGetBlob() bool {
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
if info.Digest == image.GzippedEmptyLayerDigest { if info.Digest == image.GzippedEmptyLayerDigest {
return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
} }
// NOTE: the blob is first written to a temporary file and subsequently // NOTE: the blob is first written to a temporary file and subsequently
@ -167,7 +166,7 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c
} }
defer rc.Close() defer rc.Close()
tmpFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "") tmpFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "")
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
} }
@ -210,7 +209,7 @@ func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC
} }
r := bytes.NewReader(b) r := bytes.NewReader(b)
logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) logrus.Debugf("exporting opaque data as blob %q", info.Digest.String())
return ioutil.NopCloser(r), int64(r.Len()), "", nil return io.NopCloser(r), int64(r.Len()), "", nil
} }
// Step through the list of matching layers. Tests may want to verify that if we have multiple layers // Step through the list of matching layers. Tests may want to verify that if we have multiple layers
// which claim to have the same contents, that we actually do have multiple layers, otherwise we could // which claim to have the same contents, that we actually do have multiple layers, otherwise we could
@ -395,7 +394,7 @@ func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *
// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until // newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
// it's time to Commit() the image // it's time to Commit() the image
func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) { func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) {
directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage") directory, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage")
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "creating a temporary directory") return nil, errors.Wrapf(err, "creating a temporary directory")
} }
@ -791,7 +790,7 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er
} }
// Assume it's a file, since we're only calling this from a place that expects to read files. // Assume it's a file, since we're only calling this from a place that expects to read files.
if filename, ok := s.filenames[info.Digest]; ok { if filename, ok := s.filenames[info.Digest]; ok {
contents, err2 := ioutil.ReadFile(filename) contents, err2 := os.ReadFile(filename)
if err2 != nil { if err2 != nil {
return nil, errors.Wrapf(err2, `reading blob from file %q`, filename) return nil, errors.Wrapf(err2, `reading blob from file %q`, filename)
} }
@ -1136,7 +1135,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
delete(dataBlobs, layerBlob.Digest) delete(dataBlobs, layerBlob.Digest)
} }
for blob := range dataBlobs { for blob := range dataBlobs {
v, err := ioutil.ReadFile(s.filenames[blob]) v, err := os.ReadFile(s.filenames[blob])
if err != nil { if err != nil {
return errors.Wrapf(err, "copying non-layer blob %q to image", blob) return errors.Wrapf(err, "copying non-layer blob %q to image", blob)
} }

View File

@ -6,7 +6,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"runtime" "runtime"
"strings" "strings"
@ -87,7 +86,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
uncompressed = nil uncompressed = nil
} }
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
n, err := io.Copy(ioutil.Discard, reader) n, err := io.Copy(io.Discard, reader)
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading %q: %v", filename, err) return nil, fmt.Errorf("error reading %q: %v", filename, err)
} }
@ -217,14 +216,14 @@ func (is *tarballImageSource) HasThreadSafeGetBlob() bool {
func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
// We should only be asked about things in the manifest. Maybe the configuration blob. // We should only be asked about things in the manifest. Maybe the configuration blob.
if blobinfo.Digest == is.configID { if blobinfo.Digest == is.configID {
return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil return io.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
} }
// Maybe one of the layer blobs. // Maybe one of the layer blobs.
for i := range is.blobIDs { for i := range is.blobIDs {
if blobinfo.Digest == is.blobIDs[i] { if blobinfo.Digest == is.blobIDs[i] {
// We want to read that layer: open the file or memory block and hand it back. // We want to read that layer: open the file or memory block and hand it back.
if is.filenames[i] == "-" { if is.filenames[i] == "-" {
return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil
} }
reader, err := os.Open(is.filenames[i]) reader, err := os.Open(is.filenames[i])
if err != nil { if err != nil {

View File

@ -3,7 +3,7 @@ package tarball
import ( import (
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io"
"os" "os"
"strings" "strings"
@ -36,7 +36,7 @@ func (t *tarballTransport) ParseReference(reference string) (types.ImageReferenc
filenames := strings.Split(reference, separator) filenames := strings.Split(reference, separator)
for _, filename := range filenames { for _, filename := range filenames {
if filename == "-" { if filename == "-" {
stdin, err = ioutil.ReadAll(os.Stdin) stdin, err = io.ReadAll(os.Stdin)
if err != nil { if err != nil {
return nil, fmt.Errorf("error buffering stdin: %v", err) return nil, fmt.Errorf("error buffering stdin: %v", err)
} }

View File

@ -69,44 +69,44 @@ local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd
done done
cross: ## cross build the binaries for arm, darwin, and\nfreebsd using VMs cross: ## cross build the binaries for arm, darwin, and\nfreebsd using VMs
$(RUNINVM) make local-$@ $(RUNINVM) $(MAKE) local-$@
docs: install.tools ## build the docs on the host docs: install.tools ## build the docs on the host
$(MAKE) -C docs docs $(MAKE) -C docs docs
gccgo: ## build using gccgo using VMs gccgo: ## build using gccgo using VMs
$(RUNINVM) make local-$@ $(RUNINVM) $(MAKE) local-$@
test: local-binary ## build the binaries and run the tests using VMs test: local-binary ## build the binaries and run the tests using VMs
$(RUNINVM) make local-binary local-cross local-test-unit local-test-integration $(RUNINVM) $(MAKE) local-binary local-cross local-test-unit local-test-integration
local-test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges) local-test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges)
@$(GO) test $(MOD_VENDOR) $(BUILDFLAGS) $(TESTFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor) @$(GO) test $(MOD_VENDOR) $(BUILDFLAGS) $(TESTFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
test-unit: local-binary ## run the unit tests using VMs test-unit: local-binary ## run the unit tests using VMs
$(RUNINVM) make local-$@ $(RUNINVM) $(MAKE) local-$@
local-test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges) local-test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges)
@cd tests; ./test_runner.bash @cd tests; ./test_runner.bash
test-integration: local-binary ## run the integration tests using VMs test-integration: local-binary ## run the integration tests using VMs
$(RUNINVM) make local-$@ $(RUNINVM) $(MAKE) local-$@
local-validate: ## validate DCO and gofmt on the host local-validate: ## validate DCO and gofmt on the host
@./hack/git-validation.sh @./hack/git-validation.sh
@./hack/gofmt.sh @./hack/gofmt.sh
validate: ## validate DCO, gofmt, ./pkg/ isolation, golint,\ngo vet and vendor using VMs validate: ## validate DCO, gofmt, ./pkg/ isolation, golint,\ngo vet and vendor using VMs
$(RUNINVM) make local-$@ $(RUNINVM) $(MAKE) local-$@
install.tools: install.tools:
make -C tests/tools $(MAKE) -C tests/tools
$(FFJSON): $(FFJSON):
make -C tests/tools $(MAKE) -C tests/tools
install.docs: docs install.docs: docs
make -C docs install $(MAKE) -C docs install
install: install.docs install: install.docs

View File

@ -1,16 +1,45 @@
package graphdriver package graphdriver
import ( import (
"fmt"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
"github.com/containers/storage/pkg/mount"
)
const (
// FsMagicZfs filesystem id for Zfs
FsMagicZfs = FsMagic(0x2fc12fc1)
) )
var ( var (
// Slice of drivers that should be used in an order // Slice of drivers that should be used in an order
priority = []string{ priority = []string{
"zfs", "zfs",
"vfs",
}
// FsNames maps filesystem id to name of the filesystem.
FsNames = map[FsMagic]string{
FsMagicZfs: "zfs",
} }
) )
// NewDefaultChecker returns a check that parses /proc/mountinfo to check
// if the specified path is mounted.
// No-op on FreeBSD.
func NewDefaultChecker() Checker {
return &defaultChecker{}
}
type defaultChecker struct {
}
func (c *defaultChecker) IsMounted(path string) bool {
m, _ := mount.Mounted(path)
return m
}
// Mounted checks if the given path is mounted as the fs type // Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) { func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf unix.Statfs_t var buf unix.Statfs_t

View File

@ -1,4 +1,4 @@
// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris // +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd solaris
package register package register

View File

@ -344,7 +344,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error {
return errors.Wrap(err, "error creating zfs mount") return errors.Wrap(err, "error creating zfs mount")
} }
defer func() { defer func() {
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { if err := detachUnmount(mountpoint); err != nil {
logrus.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) logrus.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err)
} }
}() }()
@ -483,7 +483,7 @@ func (d *Driver) Put(id string) error {
logger.Debugf(`unmount("%s")`, mountpoint) logger.Debugf(`unmount("%s")`, mountpoint)
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { if err := detachUnmount(mountpoint); err != nil {
logger.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) logger.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err)
} }
if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) {

View File

@ -37,3 +37,8 @@ func getMountpoint(id string) string {
return id[:maxlen] return id[:maxlen]
} }
func detachUnmount(mountpoint string) error {
// FreeBSD doesn't have an equivalent to MNT_DETACH
return unix.Unmount(mountpoint, 0)
}

View File

@ -4,6 +4,7 @@ import (
graphdriver "github.com/containers/storage/drivers" graphdriver "github.com/containers/storage/drivers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
) )
func checkRootdirFs(rootDir string) error { func checkRootdirFs(rootDir string) error {
@ -27,3 +28,7 @@ func checkRootdirFs(rootDir string) error {
func getMountpoint(id string) string { func getMountpoint(id string) string {
return id return id
} }
func detachUnmount(mountpoint string) error {
return unix.Unmount(mountpoint, unix.MNT_DETACH)
}

View File

@ -683,7 +683,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID) r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
} }
if layer.UncompressedDigest != "" { if layer.UncompressedDigest != "" {
r.byuncompressedsum[layer.CompressedDigest] = append(r.byuncompressedsum[layer.CompressedDigest], layer.ID) r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
} }
if err := r.Save(); err != nil { if err := r.Save(); err != nil {
r.driver.Remove(id) r.driver.Remove(id)
@ -866,6 +866,14 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
return nil, -1, err return nil, -1, err
} }
delete(layer.Flags, incompleteFlag) delete(layer.Flags, incompleteFlag)
} else {
// applyDiffWithOptions in the `diff != nil` case handles this bit for us
if layer.CompressedDigest != "" {
r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
}
if layer.UncompressedDigest != "" {
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
}
} }
err = r.Save() err = r.Save()
if err != nil { if err != nil {

View File

@ -0,0 +1,205 @@
# This file is is the configuration file for all tools
# that use the containers/storage library. The storage.conf file
# overrides all other storage.conf files. Container engines using the
# container/storage library do not inherit fields from other storage.conf
# files.
#
# Note: The storage.conf file overrides other storage.conf files based on this precedence:
# /usr/containers/storage.conf
# /etc/containers/storage.conf
# $HOME/.config/containers/storage.conf
# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set)
# See man 5 containers-storage.conf for more information
# The "container storage" table contains all of the server options.
[storage]
# Default Storage Driver, Must be set for proper operation.
driver = "zfs"
# Temporary storage location
runroot = "/var/run/containers/storage"
# Primary Read/Write location of container storage
graphroot = "/var/db/containers/storage"
# Storage path for rootless users
#
# rootless_storage_path = "$HOME/.local/share/containers/storage"
[storage.options]
# Storage options to be passed to underlying storage drivers
# AdditionalImageStores is used to pass paths to additional Read/Only image stores
# Must be comma separated list.
additionalimagestores = [
]
# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
# a container, to the UIDs/GIDs as they should appear outside of the container,
# and the length of the range of UIDs/GIDs. Additional mapped sets can be
# listed and will be heeded by libraries, but there are limits to the number of
# mappings which the kernel will allow when you later attempt to run a
# container.
#
# remap-uids = 0:1668442479:65536
# remap-gids = 0:1668442479:65536
# Remap-User/Group is a user name which can be used to look up one or more UID/GID
# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting
# with an in-container ID of 0 and then a host-level ID taken from the lowest
# range that matches the specified name, and using the length of that range.
# Additional ranges are then assigned, using the ranges which specify the
# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID,
# until all of the entries have been used for maps.
#
# remap-user = "containers"
# remap-group = "containers"
# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID
# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned
# to containers configured to create automatically a user namespace. Containers
# configured to automatically create a user namespace can still overlap with containers
# having an explicit mapping set.
# This setting is ignored when running as rootless.
# root-auto-userns-user = "storage"
#
# Auto-userns-min-size is the minimum size for a user namespace created automatically.
# auto-userns-min-size=1024
#
# Auto-userns-max-size is the minimum size for a user namespace created automatically.
# auto-userns-max-size=65536
[storage.options.overlay]
# ignore_chown_errors can be set to allow a non privileged user running with
# a single UID within a user namespace to run containers. The user can pull
# and use any image even those with multiple uids. Note multiple UIDs will be
# squashed down to the default uid in the container. These images will have no
# separation between the users in the container. Only supported for the overlay
# and vfs drivers.
#ignore_chown_errors = "false"
# Inodes is used to set a maximum inodes of the container image.
# inodes = ""
# Path to an helper program to use for mounting the file system instead of mounting it
# directly.
#mount_program = "/usr/bin/fuse-overlayfs"
# mountopt specifies comma separated list of extra mount options
mountopt = "nodev"
# Set to skip a PRIVATE bind mount on the storage home directory.
# skip_mount_home = "false"
# Size is used to set a maximum size of the container image.
# size = ""
# ForceMask specifies the permissions mask that is used for new files and
# directories.
#
# The values "shared" and "private" are accepted.
# Octal permission masks are also accepted.
#
# "": No value specified.
# All files/directories, get set with the permissions identified within the
# image.
# "private": it is equivalent to 0700.
# All files/directories get set with 0700 permissions. The owner has rwx
# access to the files. No other users on the system can access the files.
# This setting could be used with networked based homedirs.
# "shared": it is equivalent to 0755.
# The owner has rwx access to the files and everyone else can read, access
# and execute them. This setting is useful for sharing containers storage
# with other users. For instance have a storage owned by root but shared
# to rootless users as an additional store.
# NOTE: All files within the image are made readable and executable by any
# user on the system. Even /etc/shadow within your image is now readable by
# any user.
#
# OCTAL: Users can experiment with other OCTAL Permissions.
#
# Note: The force_mask Flag is an experimental feature, it could change in the
# future. When "force_mask" is set the original permission mask is stored in
# the "user.containers.override_stat" xattr and the "mount_program" option must
# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the
# extended attribute permissions to processes within containers rather then the
# "force_mask" permissions.
#
# force_mask = ""
[storage.options.thinpool]
# Storage Options for thinpool
# autoextend_percent determines the amount by which pool needs to be
# grown. This is specified in terms of % of pool size. So a value of 20 means
# that when threshold is hit, pool will be grown by 20% of existing
# pool size.
# autoextend_percent = "20"
# autoextend_threshold determines the pool extension threshold in terms
# of percentage of pool size. For example, if threshold is 60, that means when
# pool is 60% full, threshold has been hit.
# autoextend_threshold = "80"
# basesize specifies the size to use when creating the base device, which
# limits the size of images and containers.
# basesize = "10G"
# blocksize specifies a custom blocksize to use for the thin pool.
# blocksize="64k"
# directlvm_device specifies a custom block storage device to use for the
# thin pool. Required if you setup devicemapper.
# directlvm_device = ""
# directlvm_device_force wipes device even if device already has a filesystem.
# directlvm_device_force = "True"
# fs specifies the filesystem type to use for the base device.
# fs="xfs"
# log_level sets the log level of devicemapper.
# 0: LogLevelSuppress 0 (Default)
# 2: LogLevelFatal
# 3: LogLevelErr
# 4: LogLevelWarn
# 5: LogLevelNotice
# 6: LogLevelInfo
# 7: LogLevelDebug
# log_level = "7"
# min_free_space specifies the min free space percent in a thin pool require for
# new device creation to succeed. Valid values are from 0% - 99%.
# Value 0% disables
# min_free_space = "10%"
# mkfsarg specifies extra mkfs arguments to be used when creating the base
# device.
# mkfsarg = ""
# metadata_size is used to set the `pvcreate --metadatasize` options when
# creating thin devices. Default is 128k
# metadata_size = ""
# Size is used to set a maximum size of the container image.
# size = ""
# use_deferred_removal marks devicemapper block device for deferred removal.
# If the thinpool is in use when the driver attempts to remove it, the driver
# tells the kernel to remove it as soon as possible. Note this does not free
# up the disk space, use deferred deletion to fully remove the thinpool.
# use_deferred_removal = "True"
# use_deferred_deletion marks thinpool device for deferred deletion.
# If the device is busy when the driver attempts to delete it, the driver
# will attempt to delete device every 30 seconds until successful.
# If the program using the driver exits, the driver will continue attempting
# to cleanup the next time the driver is used. Deferred deletion permanently
# deletes the device and all data stored in device will be lost.
# use_deferred_deletion = "True"
# xfs_nospace_max_retries specifies the maximum number of retries XFS should
# attempt to complete IO when ENOSPC (no space) error is returned by
# underlying storage device.
# xfs_nospace_max_retries = "0"

View File

@ -1,83 +0,0 @@
## 1.4.3
* Fix cases where `json.Number` didn't decode properly [GH-261]
## 1.4.2
* Custom name matchers to support any sort of casing, formatting, etc. for
field names. [GH-250]
* Fix possible panic in ComposeDecodeHookFunc [GH-251]
## 1.4.1
* Fix regression where `*time.Time` value would be set to empty and not be sent
to decode hooks properly [GH-232]
## 1.4.0
* A new decode hook type `DecodeHookFuncValue` has been added that has
access to the full values. [GH-183]
* Squash is now supported with embedded fields that are struct pointers [GH-205]
* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206]
## 1.3.3
* Decoding maps from maps creates a settable value for decode hooks [GH-203]
## 1.3.2
* Decode into interface type with a struct value is supported [GH-187]
## 1.3.1
* Squash should only squash embedded structs. [GH-194]
## 1.3.0
* Added `",omitempty"` support. This will ignore zero values in the source
structure when encoding. [GH-145]
## 1.2.3
* Fix duplicate entries in Keys list with pointer values. [GH-185]
## 1.2.2
* Do not add unsettable (unexported) values to the unused metadata key
or "remain" value. [GH-150]
## 1.2.1
* Go modules checksum mismatch fix
## 1.2.0
* Added support to capture unused values in a field using the `",remain"` value
in the mapstructure tag. There is an example to showcase usage.
* Added `DecoderConfig` option to always squash embedded structs
* `json.Number` can decode into `uint` types
* Empty slices are preserved and not replaced with nil slices
* Fix panic that can occur in when decoding a map into a nil slice of structs
* Improved package documentation for godoc
## 1.1.2
* Fix error when decode hook decodes interface implementation into interface
type. [GH-140]
## 1.1.1
* Fix panic that can happen in `decodePtr`
## 1.1.0
* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133]
* Support struct to struct decoding [GH-137]
* If source map value is nil, then destination map value is nil (instead of empty)
* If source slice value is nil, then destination slice value is nil (instead of empty)
* If source pointer is nil, then destination pointer is set to nil (instead of
allocated zero value of type)
## 1.0.0
* Initial tagged stable release.

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2013 Mitchell Hashimoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,46 +0,0 @@
# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure)
mapstructure is a Go library for decoding generic map values to structures
and vice versa, while providing helpful error handling.
This library is most useful when decoding values from some data stream (JSON,
Gob, etc.) where you don't _quite_ know the structure of the underlying data
until you read a part of it. You can therefore read a `map[string]interface{}`
and use this library to decode it into the proper underlying native Go
structure.
## Installation
Standard `go get`:
```
$ go get github.com/mitchellh/mapstructure
```
## Usage & Example
For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
The `Decode` function has examples associated with it there.
## But Why?!
Go offers fantastic standard libraries for decoding formats such as JSON.
The standard method is to have a struct pre-created, and populate that struct
from the bytes of the encoded format. This is great, but the problem is if
you have configuration or an encoding that changes slightly depending on
specific fields. For example, consider this JSON:
```json
{
"type": "person",
"name": "Mitchell"
}
```
Perhaps we can't populate a specific structure without first reading
the "type" field from the JSON. We could always do two passes over the
decoding of the JSON (reading the "type" first, and the rest later).
However, it is much simpler to just decode this into a `map[string]interface{}`
structure, read the "type" key, then use something like this library
to decode it into the proper structure.

View File

@ -1,257 +0,0 @@
package mapstructure
import (
"encoding"
"errors"
"fmt"
"net"
"reflect"
"strconv"
"strings"
"time"
)
// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
// Create variables here so we can reference them with the reflect pkg
var f1 DecodeHookFuncType
var f2 DecodeHookFuncKind
var f3 DecodeHookFuncValue
// Fill in the variables into this interface and the rest is done
// automatically using the reflect package.
potential := []interface{}{f1, f2, f3}
v := reflect.ValueOf(h)
vt := v.Type()
for _, raw := range potential {
pt := reflect.ValueOf(raw).Type()
if vt.ConvertibleTo(pt) {
return v.Convert(pt).Interface()
}
}
return nil
}
// DecodeHookExec executes the given decode hook. This should be used
// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
// that took reflect.Kind instead of reflect.Type.
func DecodeHookExec(
raw DecodeHookFunc,
from reflect.Value, to reflect.Value) (interface{}, error) {
switch f := typedDecodeHook(raw).(type) {
case DecodeHookFuncType:
return f(from.Type(), to.Type(), from.Interface())
case DecodeHookFuncKind:
return f(from.Kind(), to.Kind(), from.Interface())
case DecodeHookFuncValue:
return f(from, to)
default:
return nil, errors.New("invalid decode hook signature")
}
}
// ComposeDecodeHookFunc creates a single DecodeHookFunc that
// automatically composes multiple DecodeHookFuncs.
//
// The composed funcs are called in order, with the result of the
// previous transformation.
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
var err error
data := f.Interface()
newFrom := f
for _, f1 := range fs {
data, err = DecodeHookExec(f1, newFrom, t)
if err != nil {
return nil, err
}
newFrom = reflect.ValueOf(data)
}
return data, nil
}
}
// StringToSliceHookFunc returns a DecodeHookFunc that converts
// string to []string by splitting on the given sep.
func StringToSliceHookFunc(sep string) DecodeHookFunc {
return func(
f reflect.Kind,
t reflect.Kind,
data interface{}) (interface{}, error) {
if f != reflect.String || t != reflect.Slice {
return data, nil
}
raw := data.(string)
if raw == "" {
return []string{}, nil
}
return strings.Split(raw, sep), nil
}
}
// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
// strings to time.Duration.
func StringToTimeDurationHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(time.Duration(5)) {
return data, nil
}
// Convert it by parsing
return time.ParseDuration(data.(string))
}
}
// StringToIPHookFunc returns a DecodeHookFunc that converts
// strings to net.IP
func StringToIPHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(net.IP{}) {
return data, nil
}
// Convert it by parsing
ip := net.ParseIP(data.(string))
if ip == nil {
return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
}
return ip, nil
}
}
// StringToIPNetHookFunc returns a DecodeHookFunc that converts
// strings to net.IPNet
func StringToIPNetHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(net.IPNet{}) {
return data, nil
}
// Convert it by parsing
_, net, err := net.ParseCIDR(data.(string))
return net, err
}
}
// StringToTimeHookFunc returns a DecodeHookFunc that converts
// strings to time.Time.
func StringToTimeHookFunc(layout string) DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(time.Time{}) {
return data, nil
}
// Convert it by parsing
return time.Parse(layout, data.(string))
}
}
// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
// the decoder.
//
// Note that this is significantly different from the WeaklyTypedInput option
// of the DecoderConfig.
func WeaklyTypedHook(
f reflect.Kind,
t reflect.Kind,
data interface{}) (interface{}, error) {
dataVal := reflect.ValueOf(data)
switch t {
case reflect.String:
switch f {
case reflect.Bool:
if dataVal.Bool() {
return "1", nil
}
return "0", nil
case reflect.Float32:
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
case reflect.Int:
return strconv.FormatInt(dataVal.Int(), 10), nil
case reflect.Slice:
dataType := dataVal.Type()
elemKind := dataType.Elem().Kind()
if elemKind == reflect.Uint8 {
return string(dataVal.Interface().([]uint8)), nil
}
case reflect.Uint:
return strconv.FormatUint(dataVal.Uint(), 10), nil
}
}
return data, nil
}
func RecursiveStructToMapHookFunc() DecodeHookFunc {
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
if f.Kind() != reflect.Struct {
return f.Interface(), nil
}
var i interface{} = struct{}{}
if t.Type() != reflect.TypeOf(&i).Elem() {
return f.Interface(), nil
}
m := make(map[string]interface{})
t.Set(reflect.ValueOf(m))
return f.Interface(), nil
}
}
// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies
// strings to the UnmarshalText function, when the target type
// implements the encoding.TextUnmarshaler interface
func TextUnmarshallerHookFunc() DecodeHookFuncType {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
result := reflect.New(t).Interface()
unmarshaller, ok := result.(encoding.TextUnmarshaler)
if !ok {
return data, nil
}
if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil {
return nil, err
}
return result, nil
}
}

View File

@ -1,50 +0,0 @@
package mapstructure
import (
"errors"
"fmt"
"sort"
"strings"
)
// Error implements the error interface and can represents multiple
// errors that occur in the course of a single decode.
type Error struct {
Errors []string
}
func (e *Error) Error() string {
points := make([]string, len(e.Errors))
for i, err := range e.Errors {
points[i] = fmt.Sprintf("* %s", err)
}
sort.Strings(points)
return fmt.Sprintf(
"%d error(s) decoding:\n\n%s",
len(e.Errors), strings.Join(points, "\n"))
}
// WrappedErrors implements the errwrap.Wrapper interface to make this
// return value more useful with the errwrap and go-multierror libraries.
func (e *Error) WrappedErrors() []error {
if e == nil {
return nil
}
result := make([]error, len(e.Errors))
for i, e := range e.Errors {
result[i] = errors.New(e)
}
return result
}
func appendErrors(errors []string, err error) []string {
switch e := err.(type) {
case *Error:
return append(errors, e.Errors...)
default:
return append(errors, e.Error())
}
}

View File

@ -1,3 +0,0 @@
module github.com/mitchellh/mapstructure
go 1.14

File diff suppressed because it is too large Load Diff

10
vendor/modules.txt vendored
View File

@ -109,7 +109,7 @@ github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/sshagent github.com/containers/buildah/pkg/sshagent
github.com/containers/buildah/pkg/util github.com/containers/buildah/pkg/util
github.com/containers/buildah/util github.com/containers/buildah/util
# github.com/containers/common v0.47.5-0.20220421072908-49f1a40067b2 # github.com/containers/common v0.47.5-0.20220421111103-112a47964ddb
## explicit ## explicit
github.com/containers/common/libimage github.com/containers/common/libimage
github.com/containers/common/libimage/manifests github.com/containers/common/libimage/manifests
@ -153,7 +153,7 @@ github.com/containers/common/version
# github.com/containers/conmon v2.0.20+incompatible # github.com/containers/conmon v2.0.20+incompatible
## explicit ## explicit
github.com/containers/conmon/runner/config github.com/containers/conmon/runner/config
# github.com/containers/image/v5 v5.21.1-0.20220405081457-d1b64686e1d0 # github.com/containers/image/v5 v5.21.1-0.20220421124950-8527e238867c
## explicit ## explicit
github.com/containers/image/v5/copy github.com/containers/image/v5/copy
github.com/containers/image/v5/directory github.com/containers/image/v5/directory
@ -233,7 +233,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process github.com/containers/psgo/internal/process
# github.com/containers/storage v1.39.1-0.20220414183333-eea4e0f5f1f9 # github.com/containers/storage v1.39.1-0.20220421071128-4899f8265d63
## explicit ## explicit
github.com/containers/storage github.com/containers/storage
github.com/containers/storage/drivers github.com/containers/storage/drivers
@ -475,8 +475,6 @@ github.com/matttproud/golang_protobuf_extensions/pbutil
github.com/miekg/pkcs11 github.com/miekg/pkcs11
# github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible # github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
github.com/mistifyio/go-zfs github.com/mistifyio/go-zfs
# github.com/mitchellh/mapstructure v1.4.3
github.com/mitchellh/mapstructure
# github.com/moby/sys/mount v0.2.0 # github.com/moby/sys/mount v0.2.0
github.com/moby/sys/mount github.com/moby/sys/mount
# github.com/moby/sys/mountinfo v0.6.1 # github.com/moby/sys/mountinfo v0.6.1
@ -645,7 +643,7 @@ github.com/stefanberger/go-pkcs11uri
## explicit ## explicit
github.com/stretchr/testify/assert github.com/stretchr/testify/assert
github.com/stretchr/testify/require github.com/stretchr/testify/require
# github.com/sylabs/sif/v2 v2.4.2 # github.com/sylabs/sif/v2 v2.6.0
github.com/sylabs/sif/v2/pkg/sif github.com/sylabs/sif/v2/pkg/sif
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
## explicit ## explicit