vendor c/common@main

In hope to fix a CI flake.

Signed-off-by: Valentin Rothberg <vrothberg@redhat.com>
This commit is contained in:
Valentin Rothberg
2022-05-10 13:56:10 +02:00
parent f65e13eb7a
commit ecf0177a01
33 changed files with 127 additions and 70 deletions

2
go.mod
View File

@ -12,7 +12,7 @@ require (
github.com/containernetworking/cni v1.1.0 github.com/containernetworking/cni v1.1.0
github.com/containernetworking/plugins v1.1.1 github.com/containernetworking/plugins v1.1.1
github.com/containers/buildah v1.26.1 github.com/containers/buildah v1.26.1
github.com/containers/common v0.48.0 github.com/containers/common v0.48.1-0.20220510094751-400832f41771
github.com/containers/conmon v2.0.20+incompatible github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.21.1 github.com/containers/image/v5 v5.21.1
github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f

3
go.sum
View File

@ -339,8 +339,9 @@ github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNG
github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
github.com/containers/buildah v1.26.1 h1:D65Vuo+orsI14WWtJhSX6KrpgBBa7+hveVWevzG8p8E= github.com/containers/buildah v1.26.1 h1:D65Vuo+orsI14WWtJhSX6KrpgBBa7+hveVWevzG8p8E=
github.com/containers/buildah v1.26.1/go.mod h1:CsWSG8OpJd8v3mlLREJzVAOBgC93DjRNALUVHoi8QsY= github.com/containers/buildah v1.26.1/go.mod h1:CsWSG8OpJd8v3mlLREJzVAOBgC93DjRNALUVHoi8QsY=
github.com/containers/common v0.48.0 h1:997nnXBZ+eNpfSM7L4SxhhZubQrfEyw3jRyNMTSsNlw=
github.com/containers/common v0.48.0/go.mod h1:zPLZCfLXfnd1jI0QRsD4By54fP4k1+ifQs+tulIe3o0= github.com/containers/common v0.48.0/go.mod h1:zPLZCfLXfnd1jI0QRsD4By54fP4k1+ifQs+tulIe3o0=
github.com/containers/common v0.48.1-0.20220510094751-400832f41771 h1:rHd882jzJK1fIXCJWvc1zTX5CIv2aOyzzkqj6mezLLw=
github.com/containers/common v0.48.1-0.20220510094751-400832f41771/go.mod h1:xapcAN0NbthUpjBv2UWZ5uiCGBlYbuj0o1Qg4hCaiL8=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.21.1 h1:Cr3zw2f0FZs4SCkdGlc8SN/mpcmg2AKG4OUuDbeGS/Q= github.com/containers/image/v5 v5.21.1 h1:Cr3zw2f0FZs4SCkdGlc8SN/mpcmg2AKG4OUuDbeGS/Q=

View File

@ -456,13 +456,12 @@ var _ = Describe("Verify podman containers.conf usage", func() {
containersConf = []byte("[engine]\nimage_copy_tmp_dir=\"storage1\"") containersConf = []byte("[engine]\nimage_copy_tmp_dir=\"storage1\"")
err = ioutil.WriteFile(configPath, containersConf, os.ModePerm) err = ioutil.WriteFile(configPath, containersConf, os.ModePerm)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
if IsRemote() {
podmanTest.RestartRemoteService() SkipIfRemote("Restarting the system service will fail loading the broken containers.conf")
}
session = podmanTest.Podman([]string{"info", "--format", "{{.Store.ImageCopyTmpDir}}"}) session = podmanTest.Podman([]string{"info", "--format", "{{.Store.ImageCopyTmpDir}}"})
session.WaitWithDefaultTimeout() session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0)) Expect(session).Should(Exit(125))
Expect(session.Err.Contents()).To(ContainSubstring("invalid image_copy_tmp_dir")) Expect(session.Err.Contents()).To(ContainSubstring("invalid image_copy_tmp_dir"))
}) })

View File

@ -608,7 +608,7 @@ func (i *Image) RepoTags() ([]string, error) {
// NamedTaggedRepoTags returns the repotags associated with the image as a // NamedTaggedRepoTags returns the repotags associated with the image as a
// slice of reference.NamedTagged. // slice of reference.NamedTagged.
func (i *Image) NamedTaggedRepoTags() ([]reference.NamedTagged, error) { func (i *Image) NamedTaggedRepoTags() ([]reference.NamedTagged, error) {
var repoTags []reference.NamedTagged repoTags := make([]reference.NamedTagged, 0, len(i.Names()))
for _, name := range i.Names() { for _, name := range i.Names() {
parsed, err := reference.Parse(name) parsed, err := reference.Parse(name)
if err != nil { if err != nil {

View File

@ -32,8 +32,8 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) (
options = &LoadOptions{} options = &LoadOptions{}
} }
var loadErrors []error // we have 4 functions, so a maximum of 4 errors
loadErrors := make([]error, 0, 4)
for _, f := range []func() ([]string, string, error){ for _, f := range []func() ([]string, string, error){
// OCI // OCI
func() ([]string, string, error) { func() ([]string, string, error) {
@ -88,6 +88,8 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) (
} }
// Give a decent error message if nothing above worked. // Give a decent error message if nothing above worked.
// we want the colon here for the multiline error
//nolint:revive
loadError := fmt.Errorf("payload does not match any of the supported image formats:") loadError := fmt.Errorf("payload does not match any of the supported image formats:")
for _, err := range loadErrors { for _, err := range loadErrors {
loadError = fmt.Errorf("%v\n * %v", loadError, err) loadError = fmt.Errorf("%v\n * %v", loadError, err)

View File

@ -115,7 +115,7 @@ type NameTagPair struct {
func ToNameTagPairs(repoTags []reference.Named) ([]NameTagPair, error) { func ToNameTagPairs(repoTags []reference.Named) ([]NameTagPair, error) {
none := "<none>" none := "<none>"
var pairs []NameTagPair pairs := make([]NameTagPair, 0, len(repoTags))
for i, named := range repoTags { for i, named := range repoTags {
pair := NameTagPair{ pair := NameTagPair{
Name: named.Name(), Name: named.Name(),

View File

@ -413,11 +413,11 @@ func (r *Runtime) imagesIDsForManifest(manifestBytes []byte, sys *types.SystemCo
} }
imageDigest = d imageDigest = d
} }
var results []string
images, err := r.store.ImagesByDigest(imageDigest) images, err := r.store.ImagesByDigest(imageDigest)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "listing images by manifest digest") return nil, errors.Wrapf(err, "listing images by manifest digest")
} }
results := make([]string, 0, len(images))
for _, image := range images { for _, image := range images {
results = append(results, image.ID) results = append(results, image.ID)
} }

View File

@ -6,6 +6,7 @@ import (
"os" "os"
"strings" "strings"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/pkg/shortnames" "github.com/containers/image/v5/pkg/shortnames"
storageTransport "github.com/containers/image/v5/storage" storageTransport "github.com/containers/image/v5/storage"
@ -22,13 +23,16 @@ import (
var json = jsoniter.ConfigCompatibleWithStandardLibrary var json = jsoniter.ConfigCompatibleWithStandardLibrary
// tmpdir returns a path to a temporary directory. // tmpdir returns a path to a temporary directory.
func tmpdir() string { func tmpdir() (string, error) {
tmpdir := os.Getenv("TMPDIR") var tmpdir string
if tmpdir == "" { defaultContainerConfig, err := config.Default()
tmpdir = "/var/tmp" if err == nil {
tmpdir, err = defaultContainerConfig.ImageCopyTmpDir()
if err == nil {
return tmpdir, nil
} }
}
return tmpdir return tmpdir, err
} }
// RuntimeOptions allow for creating a customized Runtime. // RuntimeOptions allow for creating a customized Runtime.
@ -103,7 +107,11 @@ func RuntimeFromStore(store storage.Store, options *RuntimeOptions) (*Runtime, e
systemContext = types.SystemContext{} systemContext = types.SystemContext{}
} }
if systemContext.BigFilesTemporaryDir == "" { if systemContext.BigFilesTemporaryDir == "" {
systemContext.BigFilesTemporaryDir = tmpdir() tmpdir, err := tmpdir()
if err != nil {
return nil, err
}
systemContext.BigFilesTemporaryDir = tmpdir
} }
setRegistriesConfPath(&systemContext) setRegistriesConfPath(&systemContext)
@ -224,7 +232,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
} }
logrus.Debugf("Found image %q in local containers storage (%s)", name, storageRef.StringWithinTransport()) logrus.Debugf("Found image %q in local containers storage (%s)", name, storageRef.StringWithinTransport())
return r.storageToImage(img, storageRef), "", nil return r.storageToImage(img, storageRef), "", nil
} else { }
// Docker compat: strip off the tag iff name is tagged and digested // Docker compat: strip off the tag iff name is tagged and digested
// (e.g., fedora:latest@sha256...). In that case, the tag is stripped // (e.g., fedora:latest@sha256...). In that case, the tag is stripped
// off and entirely ignored. The digest is the sole source of truth. // off and entirely ignored. The digest is the sole source of truth.
@ -233,7 +241,6 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
return nil, "", err return nil, "", err
} }
name = normalizedName name = normalizedName
}
byDigest := false byDigest := false
originalName := name originalName := name

View File

@ -96,7 +96,7 @@ func (n *cniNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) (
newNetwork.ID = getNetworkIDFromName(newNetwork.Name) newNetwork.ID = getNetworkIDFromName(newNetwork.Name)
// when we do not have ipam we must disable dns // when we do not have ipam we must disable dns
internalutil.IpamNoneDisableDns(newNetwork) internalutil.IpamNoneDisableDNS(newNetwork)
// FIXME: Should this be a hard error? // FIXME: Should this be a hard error?
if newNetwork.DNSEnabled && newNetwork.Internal && hasDNSNamePlugin(n.cniPluginDirs) { if newNetwork.DNSEnabled && newNetwork.Internal && hasDNSNamePlugin(n.cniPluginDirs) {

View File

@ -106,7 +106,7 @@ func (n *cniNetwork) Setup(namespacePath string, options types.SetupOptions) (ma
} }
// CNIResultToStatus convert the cni result to status block // CNIResultToStatus convert the cni result to status block
// nolint:golint // nolint:golint,revive
func CNIResultToStatus(res cnitypes.Result) (types.StatusBlock, error) { func CNIResultToStatus(res cnitypes.Result) (types.StatusBlock, error) {
result := types.StatusBlock{} result := types.StatusBlock{}
cniResult, err := types040.GetResult(res) cniResult, err := types040.GetResult(res)

View File

@ -41,7 +41,7 @@ func CommonNetworkCreate(n NetUtil, network *types.Network) error {
return nil return nil
} }
func IpamNoneDisableDns(network *types.Network) { func IpamNoneDisableDNS(network *types.Network) {
if network.IPAMOptions[types.Driver] == types.NoneIPAMDriver { if network.IPAMOptions[types.Driver] == types.NoneIPAMDriver {
logrus.Debugf("dns disabled for network %q because ipam driver is set to none", network.Name) logrus.Debugf("dns disabled for network %q because ipam driver is set to none", network.Name)
network.DNSEnabled = false network.DNSEnabled = false

View File

@ -121,7 +121,7 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
} }
// when we do not have ipam we must disable dns // when we do not have ipam we must disable dns
internalutil.IpamNoneDisableDns(newNetwork) internalutil.IpamNoneDisableDNS(newNetwork)
// add gateway when not internal or dns enabled // add gateway when not internal or dns enabled
addGateway := !newNetwork.Internal || newNetwork.DNSEnabled addGateway := !newNetwork.Internal || newNetwork.DNSEnabled

View File

@ -46,6 +46,9 @@ const (
// 1. read ${graphroot}/defaultNetworkBackend // 1. read ${graphroot}/defaultNetworkBackend
// 2. find netavark binary (if not installed use CNI) // 2. find netavark binary (if not installed use CNI)
// 3. check containers, images and CNI networks and if there are some we have an existing install and should continue to use CNI // 3. check containers, images and CNI networks and if there are some we have an existing install and should continue to use CNI
//
// revive does not like the name because the package is already called network
//nolint:revive
func NetworkBackend(store storage.Store, conf *config.Config, syslog bool) (types.NetworkBackend, types.ContainerNetwork, error) { func NetworkBackend(store storage.Store, conf *config.Config, syslog bool) (types.NetworkBackend, types.ContainerNetwork, error) {
backend := types.NetworkBackend(conf.Network.NetworkBackend) backend := types.NetworkBackend(conf.Network.NetworkBackend)
if backend == "" { if backend == "" {

View File

@ -251,19 +251,17 @@ func CheckProfileAndLoadDefault(name string) (string, error) {
if unshare.IsRootless() { if unshare.IsRootless() {
if name != "" { if name != "" {
return "", errors.Wrapf(ErrApparmorRootless, "cannot load AppArmor profile %q", name) return "", errors.Wrapf(ErrApparmorRootless, "cannot load AppArmor profile %q", name)
} else { }
logrus.Debug("Skipping loading default AppArmor profile (rootless mode)") logrus.Debug("Skipping loading default AppArmor profile (rootless mode)")
return "", nil return "", nil
} }
}
// Check if AppArmor is disabled and error out if a profile is to be set. // Check if AppArmor is disabled and error out if a profile is to be set.
if !runcaa.IsEnabled() { if !runcaa.IsEnabled() {
if name == "" { if name == "" {
return "", nil return "", nil
} else {
return "", errors.Errorf("profile %q specified but AppArmor is disabled on the host", name)
} }
return "", errors.Errorf("profile %q specified but AppArmor is disabled on the host", name)
} }
if name == "" { if name == "" {

View File

@ -26,8 +26,8 @@ func GetDefaultAuthFile() string {
if authfile := os.Getenv("REGISTRY_AUTH_FILE"); authfile != "" { if authfile := os.Getenv("REGISTRY_AUTH_FILE"); authfile != "" {
return authfile return authfile
} }
if auth_env := os.Getenv("DOCKER_CONFIG"); auth_env != "" { if authEnv := os.Getenv("DOCKER_CONFIG"); authEnv != "" {
return filepath.Join(auth_env, "config.json") return filepath.Join(authEnv, "config.json")
} }
return "" return ""
} }
@ -313,7 +313,7 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", key) fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", key)
return nil return nil
} }
return errors.Errorf("Not logged into %s\n", key) return errors.Errorf("not logged into %s", key)
default: default:
return errors.Wrapf(err, "logging out of %q", key) return errors.Wrapf(err, "logging out of %q", key)
} }

View File

@ -104,8 +104,8 @@ func AllCapabilities() []string {
// NormalizeCapabilities normalizes caps by adding a "CAP_" prefix (if not yet // NormalizeCapabilities normalizes caps by adding a "CAP_" prefix (if not yet
// present). // present).
func NormalizeCapabilities(caps []string) ([]string, error) { func NormalizeCapabilities(caps []string) ([]string, error) {
normalized := make([]string, len(caps)) normalized := make([]string, 0, len(caps))
for i, c := range caps { for _, c := range caps {
c = strings.ToUpper(c) c = strings.ToUpper(c)
if c == All { if c == All {
normalized = append(normalized, c) normalized = append(normalized, c)
@ -117,7 +117,7 @@ func NormalizeCapabilities(caps []string) ([]string, error) {
if !stringInSlice(c, capabilityList) { if !stringInSlice(c, capabilityList) {
return nil, errors.Wrapf(ErrUnknownCapability, "%q", c) return nil, errors.Wrapf(ErrUnknownCapability, "%q", c)
} }
normalized[i] = c normalized = append(normalized, c)
} }
sort.Strings(normalized) sort.Strings(normalized)
return normalized, nil return normalized, nil
@ -140,8 +140,6 @@ func ValidateCapabilities(caps []string) error {
// "ALL" in capAdd adds returns known capabilities // "ALL" in capAdd adds returns known capabilities
// "All" in capDrop returns only the capabilities specified in capAdd // "All" in capDrop returns only the capabilities specified in capAdd
func MergeCapabilities(base, adds, drops []string) ([]string, error) { func MergeCapabilities(base, adds, drops []string) ([]string, error) {
var caps []string
// Normalize the base capabilities // Normalize the base capabilities
base, err := NormalizeCapabilities(base) base, err := NormalizeCapabilities(base)
if err != nil { if err != nil {
@ -189,6 +187,7 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) {
} }
} }
caps := make([]string, 0, len(base)+len(capAdd))
// Drop any capabilities in capDrop that are in base // Drop any capabilities in capDrop that are in base
for _, cap := range base { for _, cap := range base {
if stringInSlice(cap, capDrop) { if stringInSlice(cap, capDrop) {

View File

@ -9,6 +9,7 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strconv"
"strings" "strings"
"sync" "sync"
"syscall" "syscall"
@ -96,6 +97,22 @@ func UserOwnsCurrentSystemdCgroup() (bool, error) {
// It differs from os.RemoveAll as it doesn't attempt to unlink files. // It differs from os.RemoveAll as it doesn't attempt to unlink files.
// On cgroupfs we are allowed only to rmdir empty directories. // On cgroupfs we are allowed only to rmdir empty directories.
func rmDirRecursively(path string) error { func rmDirRecursively(path string) error {
killProcesses := func(signal syscall.Signal) {
if signal == unix.SIGKILL {
if err := ioutil.WriteFile(filepath.Join(path, "cgroup.kill"), []byte("1"), 0600); err == nil {
return
}
}
// kill all the processes that are still part of the cgroup
if procs, err := ioutil.ReadFile(filepath.Join(path, "cgroup.procs")); err == nil {
for _, pidS := range strings.Split(string(procs), "\n") {
if pid, err := strconv.Atoi(pidS); err == nil {
_ = unix.Kill(pid, signal)
}
}
}
}
if err := os.Remove(path); err == nil || os.IsNotExist(err) { if err := os.Remove(path); err == nil || os.IsNotExist(err) {
return nil return nil
} }
@ -118,8 +135,16 @@ func rmDirRecursively(path string) error {
return nil return nil
} }
if errors.Is(err, unix.EBUSY) { if errors.Is(err, unix.EBUSY) {
// attempt up to 5 seconds if the cgroup is busy // send a SIGTERM after 3 second
if attempts < 500 { if attempts == 300 {
killProcesses(unix.SIGTERM)
}
// send SIGKILL after 8 seconds
if attempts == 800 {
killProcesses(unix.SIGKILL)
}
// give up after 10 seconds
if attempts < 1000 {
time.Sleep(time.Millisecond * 10) time.Sleep(time.Millisecond * 10)
attempts++ attempts++
continue continue

View File

@ -51,7 +51,7 @@ func AutocompleteCapabilities(cmd *cobra.Command, args []string, toComplete stri
offset = 4 offset = 4
} }
var completions []string completions := make([]string, 0, len(caps))
for _, cap := range caps { for _, cap := range caps {
completions = append(completions, convertCase(cap)[offset:]) completions = append(completions, convertCase(cap)[offset:])
} }

View File

@ -553,6 +553,9 @@ type SecretConfig struct {
} }
// ConfigMapConfig represents the "configmap" TOML config table // ConfigMapConfig represents the "configmap" TOML config table
//
// revive does not like the name because the package is already called config
//nolint:revive
type ConfigMapConfig struct { type ConfigMapConfig struct {
// Driver specifies the configmap driver to use. // Driver specifies the configmap driver to use.
// Current valid value: // Current valid value:
@ -1215,14 +1218,14 @@ func (c *Config) ActiveDestination() (uri, identity string, err error) {
// FindHelperBinary will search the given binary name in the configured directories. // FindHelperBinary will search the given binary name in the configured directories.
// If searchPATH is set to true it will also search in $PATH. // If searchPATH is set to true it will also search in $PATH.
func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) { func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) {
dir_list := c.Engine.HelperBinariesDir dirList := c.Engine.HelperBinariesDir
// If set, search this directory first. This is used in testing. // If set, search this directory first. This is used in testing.
if dir, found := os.LookupEnv("CONTAINERS_HELPER_BINARY_DIR"); found { if dir, found := os.LookupEnv("CONTAINERS_HELPER_BINARY_DIR"); found {
dir_list = append([]string{dir}, dir_list...) dirList = append([]string{dir}, dirList...)
} }
for _, path := range dir_list { for _, path := range dirList {
fullpath := filepath.Join(path, name) fullpath := filepath.Join(path, name)
if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() {
return fullpath, nil return fullpath, nil

View File

@ -36,11 +36,13 @@ func ComputeUntilTimestamp(filterValues []string) (time.Time, error) {
// //
// Please refer to https://github.com/containers/podman/issues/6899 for some // Please refer to https://github.com/containers/podman/issues/6899 for some
// background. // background.
//
// revive does not like the name because the package is already called filters
//nolint:revive
func FiltersFromRequest(r *http.Request) ([]string, error) { func FiltersFromRequest(r *http.Request) ([]string, error) {
var ( var (
compatFilters map[string]map[string]bool compatFilters map[string]map[string]bool
filters map[string][]string filters map[string][]string
libpodFilters []string
raw []byte raw []byte
) )
@ -54,6 +56,7 @@ func FiltersFromRequest(r *http.Request) ([]string, error) {
// Backwards compat with older versions of Docker. // Backwards compat with older versions of Docker.
if err := json.Unmarshal(raw, &compatFilters); err == nil { if err := json.Unmarshal(raw, &compatFilters); err == nil {
libpodFilters := make([]string, 0, len(compatFilters))
for filterKey, filterMap := range compatFilters { for filterKey, filterMap := range compatFilters {
for filterValue, toAdd := range filterMap { for filterValue, toAdd := range filterMap {
if toAdd { if toAdd {
@ -68,6 +71,7 @@ func FiltersFromRequest(r *http.Request) ([]string, error) {
return nil, err return nil, err
} }
libpodFilters := make([]string, 0, len(filters))
for filterKey, filterSlice := range filters { for filterKey, filterSlice := range filters {
f := filterKey f := filterKey
for _, filterValue := range filterSlice { for _, filterValue := range filterSlice {

View File

@ -9,6 +9,8 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
// TODO: change name to MachineMarker since package is already called machine
//nolint:revive
type MachineMarker struct { type MachineMarker struct {
Enabled bool Enabled bool
Type string Type string
@ -54,6 +56,8 @@ func IsPodmanMachine() bool {
return GetMachineMarker().Enabled return GetMachineMarker().Enabled
} }
// TODO: change name to HostType since package is already called machine
//nolint:revive
func MachineHostType() string { func MachineHostType() string {
return GetMachineMarker().Type return GetMachineMarker().Type
} }

View File

@ -13,7 +13,6 @@ import (
) )
func DeviceFromPath(device string) ([]devices.Device, error) { func DeviceFromPath(device string) ([]devices.Device, error) {
var devs []devices.Device
src, dst, permissions, err := Device(device) src, dst, permissions, err := Device(device)
if err != nil { if err != nil {
return nil, err return nil, err
@ -27,7 +26,7 @@ func DeviceFromPath(device string) ([]devices.Device, error) {
} }
if !srcInfo.IsDir() { if !srcInfo.IsDir() {
devs := make([]devices.Device, 0, 1)
dev, err := devices.DeviceFromPath(src, permissions) dev, err := devices.DeviceFromPath(src, permissions)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "%s is not a valid device", src) return nil, errors.Wrapf(err, "%s is not a valid device", src)
@ -42,6 +41,7 @@ func DeviceFromPath(device string) ([]devices.Device, error) {
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error getting source devices from directory %s", src) return nil, errors.Wrapf(err, "error getting source devices from directory %s", src)
} }
devs := make([]devices.Device, 0, len(srcDevices))
for _, d := range srcDevices { for _, d := range srcDevices {
d.Path = filepath.Join(dst, filepath.Base(d.Path)) d.Path = filepath.Join(dst, filepath.Base(d.Path))
d.Permissions = devices.Permissions(permissions) d.Permissions = devices.Permissions(permissions)

View File

@ -17,12 +17,17 @@ import (
) )
// RetryOptions defines the option to retry // RetryOptions defines the option to retry
// revive does not like the name because the package is already called retry
//nolint:revive
type RetryOptions struct { type RetryOptions struct {
MaxRetry int // The number of times to possibly retry MaxRetry int // The number of times to possibly retry
Delay time.Duration // The delay to use between retries, if set Delay time.Duration // The delay to use between retries, if set
} }
// RetryIfNecessary retries the operation in exponential backoff with the retryOptions // RetryIfNecessary retries the operation in exponential backoff with the retryOptions
//
// revive does not like the name because the package is already called retry
//nolint:revive
func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions *RetryOptions) error { func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions *RetryOptions) error {
err := operation() err := operation()
for attempt := 0; err != nil && isRetryable(err) && attempt < retryOptions.MaxRetry; attempt++ { for attempt := 0; err != nil && isRetryable(err) && attempt < retryOptions.MaxRetry; attempt++ {

View File

@ -76,6 +76,7 @@ var (
specs.ActAllow: ActAllow, specs.ActAllow: ActAllow,
specs.ActTrace: ActTrace, specs.ActTrace: ActTrace,
specs.ActLog: ActLog, specs.ActLog: ActLog,
specs.ActNotify: ActNotify,
} }
specOperatorToSeccompOperatorMap = map[specs.LinuxSeccompOperator]Operator{ specOperatorToSeccompOperatorMap = map[specs.LinuxSeccompOperator]Operator{
specs.OpNotEqual: OpNotEqual, specs.OpNotEqual: OpNotEqual,

View File

@ -130,7 +130,7 @@ func matchSyscall(filter *libseccomp.ScmpFilter, call *Syscall) error {
return errors.Wrapf(err, "create seccomp syscall condition for syscall %s", call.Name) return errors.Wrapf(err, "create seccomp syscall condition for syscall %s", call.Name)
} }
argCounts[cond.Index] += 1 argCounts[cond.Index]++
conditions = append(conditions, newCond) conditions = append(conditions, newCond)
} }

View File

@ -75,6 +75,7 @@ const (
ActTrace Action = "SCMP_ACT_TRACE" ActTrace Action = "SCMP_ACT_TRACE"
ActAllow Action = "SCMP_ACT_ALLOW" ActAllow Action = "SCMP_ACT_ALLOW"
ActLog Action = "SCMP_ACT_LOG" ActLog Action = "SCMP_ACT_LOG"
ActNotify Action = "SCMP_ACT_NOTIFY"
) )
// Operator used to match syscall arguments in Seccomp // Operator used to match syscall arguments in Seccomp

View File

@ -55,7 +55,7 @@ func (d *Driver) List() ([]string, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
var allID []string allID := make([]string, 0, len(secretData))
for k := range secretData { for k := range secretData {
allID = append(allID, k) allID = append(allID, k)
} }
@ -134,9 +134,8 @@ func (d *Driver) getAllData() (map[string][]byte, error) {
if os.IsNotExist(err) { if os.IsNotExist(err) {
// the file will be created later on a store() // the file will be created later on a store()
return make(map[string][]byte), nil return make(map[string][]byte), nil
} else {
return nil, err
} }
return nil, err
} }
file, err := os.Open(d.secretsDataFilePath) file, err := os.Open(d.secretsDataFilePath)

View File

@ -53,6 +53,9 @@ var secretsFile = "secrets.json"
var secretNameRegexp = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_.-]*$`) var secretNameRegexp = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_.-]*$`)
// SecretsManager holds information on handling secrets // SecretsManager holds information on handling secrets
//
// revive does not like the name because the package is already called secrets
//nolint:revive
type SecretsManager struct { type SecretsManager struct {
// secretsPath is the path to the db file where secrets are stored // secretsPath is the path to the db file where secrets are stored
secretsDBPath string secretsDBPath string
@ -82,6 +85,9 @@ type Secret struct {
// The driver stores the actual bytes of secret data, as opposed to // The driver stores the actual bytes of secret data, as opposed to
// the secret metadata. // the secret metadata.
// Currently only the unencrypted filedriver is implemented. // Currently only the unencrypted filedriver is implemented.
//
// revive does not like the name because the package is already called secrets
//nolint:revive
type SecretsDriver interface { type SecretsDriver interface {
// List lists all secret ids in the secrets data store // List lists all secret ids in the secrets data store
List() ([]string, error) List() ([]string, error)
@ -234,7 +240,7 @@ func (s *SecretsManager) List() ([]Secret, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
var ls []Secret ls := make([]Secret, 0, len(secrets))
for _, v := range secrets { for _, v := range secrets {
ls = append(ls, v) ls = append(ls, v)
} }
@ -276,9 +282,8 @@ func getDriver(name string, opts map[string]string) (SecretsDriver, error) {
case "file": case "file":
if path, ok := opts["path"]; ok { if path, ok := opts["path"]; ok {
return filedriver.NewDriver(path) return filedriver.NewDriver(path)
} else {
return nil, errors.Wrap(errInvalidDriverOpt, "need path for filedriver")
} }
return nil, errors.Wrap(errInvalidDriverOpt, "need path for filedriver")
case "pass": case "pass":
return passdriver.NewDriver(opts) return passdriver.NewDriver(opts)
case "shell": case "shell":

View File

@ -31,9 +31,8 @@ func (s *SecretsManager) loadDB() error {
// the db cache will show no entries anyway. // the db cache will show no entries anyway.
// The file will be created later on a store() // The file will be created later on a store()
return nil return nil
} else {
return err
} }
return err
} }
// We check if the file has been modified after the last time it was loaded into the cache. // We check if the file has been modified after the last time it was loaded into the cache.

View File

@ -212,8 +212,8 @@ func rchown(chowndir string, uid, gid int) error {
// addSubscriptionsFromMountsFile copies the contents of host directory to container directory // addSubscriptionsFromMountsFile copies the contents of host directory to container directory
// and returns a list of mounts // and returns a list of mounts
func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string, uid, gid int) ([]rspec.Mount, error) { func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string, uid, gid int) ([]rspec.Mount, error) {
var mounts []rspec.Mount
defaultMountsPaths := getMounts(filePath) defaultMountsPaths := getMounts(filePath)
mounts := make([]rspec.Mount, 0, len(defaultMountsPaths))
for _, path := range defaultMountsPaths { for _, path := range defaultMountsPaths {
hostDirOrFile, ctrDirOrFile, err := getMountsMap(path) hostDirOrFile, ctrDirOrFile, err := getMountsMap(path)
if err != nil { if err != nil {

View File

@ -12,6 +12,8 @@ import (
// NUMANodeCount queries the system for the count of Memory Nodes available // NUMANodeCount queries the system for the count of Memory Nodes available
// for use to this process. // for use to this process.
func NUMANodeCount() int { func NUMANodeCount() int {
// this is the correct flag name (not defined in the unix package)
//nolint:revive
MPOL_F_MEMS_ALLOWED := (1 << 2) MPOL_F_MEMS_ALLOWED := (1 << 2)
var mask [1024 / 64]uintptr var mask [1024 / 64]uintptr
_, _, err := unix.RawSyscall6(unix.SYS_GET_MEMPOLICY, 0, uintptr(unsafe.Pointer(&mask[0])), uintptr(len(mask)*8), 0, uintptr(MPOL_F_MEMS_ALLOWED), 0) _, _, err := unix.RawSyscall6(unix.SYS_GET_MEMPOLICY, 0, uintptr(unsafe.Pointer(&mask[0])), uintptr(len(mask)*8), 0, uintptr(MPOL_F_MEMS_ALLOWED), 0)

View File

@ -1,4 +1,4 @@
package version package version
// Version is the version of the build. // Version is the version of the build.
const Version = "0.48.0" const Version = "0.49.0-dev"

2
vendor/modules.txt vendored
View File

@ -109,7 +109,7 @@ github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/sshagent github.com/containers/buildah/pkg/sshagent
github.com/containers/buildah/pkg/util github.com/containers/buildah/pkg/util
github.com/containers/buildah/util github.com/containers/buildah/util
# github.com/containers/common v0.48.0 # github.com/containers/common v0.48.1-0.20220510094751-400832f41771
## explicit ## explicit
github.com/containers/common/libimage github.com/containers/common/libimage
github.com/containers/common/libimage/manifests github.com/containers/common/libimage/manifests