mirror of
https://github.com/containers/podman.git
synced 2025-05-17 15:18:43 +08:00
libpod/runtime: switch to golang native error wrapping
We now use the golang error wrapping format specifier `%w` instead of the deprecated github.com/pkg/errors package. [NO NEW TESTS NEEDED] Signed-off-by: Sascha Grunert <sgrunert@redhat.com>
This commit is contained in:
@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
@ -40,7 +41,6 @@ import (
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/docker/docker/pkg/namesgenerator"
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -146,7 +146,7 @@ func SetXdgDirs() error {
|
||||
}
|
||||
}
|
||||
if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
|
||||
return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR")
|
||||
return fmt.Errorf("cannot set XDG_RUNTIME_DIR: %w", err)
|
||||
}
|
||||
|
||||
if rootless.IsRootless() && os.Getenv("DBUS_SESSION_BUS_ADDRESS") == "" {
|
||||
@ -163,7 +163,7 @@ func SetXdgDirs() error {
|
||||
return err
|
||||
}
|
||||
if err := os.Setenv("XDG_CONFIG_HOME", cfgHomeDir); err != nil {
|
||||
return errors.Wrapf(err, "cannot set XDG_CONFIG_HOME")
|
||||
return fmt.Errorf("cannot set XDG_CONFIG_HOME: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -214,7 +214,7 @@ func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runti
|
||||
// Overwrite config with user-given configuration options
|
||||
for _, opt := range options {
|
||||
if err := opt(runtime); err != nil {
|
||||
return nil, errors.Wrapf(err, "error configuring runtime")
|
||||
return nil, fmt.Errorf("error configuring runtime: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -225,12 +225,12 @@ func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runti
|
||||
}
|
||||
os.Exit(1)
|
||||
return nil
|
||||
}); err != nil && errors.Cause(err) != shutdown.ErrHandlerExists {
|
||||
}); err != nil && !errors.Is(err, shutdown.ErrHandlerExists) {
|
||||
logrus.Errorf("Registering shutdown handler for libpod: %v", err)
|
||||
}
|
||||
|
||||
if err := shutdown.Start(); err != nil {
|
||||
return nil, errors.Wrapf(err, "error starting shutdown signal handler")
|
||||
return nil, fmt.Errorf("error starting shutdown signal handler: %w", err)
|
||||
}
|
||||
|
||||
if err := makeRuntime(runtime); err != nil {
|
||||
@ -256,10 +256,10 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) {
|
||||
lockPath := filepath.Join(runtime.config.Engine.TmpDir, "locks")
|
||||
manager, err = lock.OpenFileLockManager(lockPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
manager, err = lock.NewFileLockManager(lockPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get new file lock manager")
|
||||
return nil, fmt.Errorf("failed to get new file lock manager: %w", err)
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
@ -275,19 +275,19 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) {
|
||||
manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
|
||||
if err != nil {
|
||||
switch {
|
||||
case os.IsNotExist(errors.Cause(err)):
|
||||
case errors.Is(err, os.ErrNotExist):
|
||||
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get new shm lock manager")
|
||||
return nil, fmt.Errorf("failed to get new shm lock manager: %w", err)
|
||||
}
|
||||
case errors.Cause(err) == syscall.ERANGE && runtime.doRenumber:
|
||||
case errors.Is(err, syscall.ERANGE) && runtime.doRenumber:
|
||||
logrus.Debugf("Number of locks does not match - removing old locks")
|
||||
|
||||
// ERANGE indicates a lock numbering mismatch.
|
||||
// Since we're renumbering, this is not fatal.
|
||||
// Remove the earlier set of locks and recreate.
|
||||
if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
|
||||
return nil, errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
|
||||
return nil, fmt.Errorf("error removing libpod locks file %s: %w", lockPath, err)
|
||||
}
|
||||
|
||||
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
|
||||
@ -299,7 +299,7 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) {
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, errors.Wrapf(define.ErrInvalidArg, "unknown lock type %s", runtime.config.Engine.LockType)
|
||||
return nil, fmt.Errorf("unknown lock type %s: %w", runtime.config.Engine.LockType, define.ErrInvalidArg)
|
||||
}
|
||||
return manager, nil
|
||||
}
|
||||
@ -315,17 +315,17 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
runtime.conmonPath = cPath
|
||||
|
||||
if runtime.noStore && runtime.doReset {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "cannot perform system reset if runtime is not creating a store")
|
||||
return fmt.Errorf("cannot perform system reset if runtime is not creating a store: %w", define.ErrInvalidArg)
|
||||
}
|
||||
if runtime.doReset && runtime.doRenumber {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "cannot perform system reset while renumbering locks")
|
||||
return fmt.Errorf("cannot perform system reset while renumbering locks: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
// Make the static files directory if it does not exist
|
||||
if err := os.MkdirAll(runtime.config.Engine.StaticDir, 0700); err != nil {
|
||||
// The directory is allowed to exist
|
||||
if !os.IsExist(err) {
|
||||
return errors.Wrap(err, "error creating runtime static files directory")
|
||||
if !errors.Is(err, os.ErrExist) {
|
||||
return fmt.Errorf("error creating runtime static files directory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -337,9 +337,9 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
// package.
|
||||
switch runtime.config.Engine.StateType {
|
||||
case config.InMemoryStateStore:
|
||||
return errors.Wrapf(define.ErrInvalidArg, "in-memory state is currently disabled")
|
||||
return fmt.Errorf("in-memory state is currently disabled: %w", define.ErrInvalidArg)
|
||||
case config.SQLiteStateStore:
|
||||
return errors.Wrapf(define.ErrInvalidArg, "SQLite state is currently disabled")
|
||||
return fmt.Errorf("SQLite state is currently disabled: %w", define.ErrInvalidArg)
|
||||
case config.BoltDBStateStore:
|
||||
dbPath := filepath.Join(runtime.config.Engine.StaticDir, "bolt_state.db")
|
||||
|
||||
@ -349,7 +349,7 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
}
|
||||
runtime.state = state
|
||||
default:
|
||||
return errors.Wrapf(define.ErrInvalidArg, "unrecognized state type passed (%v)", runtime.config.Engine.StateType)
|
||||
return fmt.Errorf("unrecognized state type passed (%v): %w", runtime.config.Engine.StateType, define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
// Grab config from the database so we can reset some defaults
|
||||
@ -369,7 +369,7 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Wrapf(err, "error retrieving runtime configuration from database")
|
||||
return fmt.Errorf("error retrieving runtime configuration from database: %w", err)
|
||||
}
|
||||
|
||||
runtime.mergeDBConfig(dbConfig)
|
||||
@ -412,7 +412,7 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
}
|
||||
|
||||
if err := runtime.state.SetNamespace(runtime.config.Engine.Namespace); err != nil {
|
||||
return errors.Wrapf(err, "error setting libpod namespace in state")
|
||||
return fmt.Errorf("error setting libpod namespace in state: %w", err)
|
||||
}
|
||||
logrus.Debugf("Set libpod namespace to %q", runtime.config.Engine.Namespace)
|
||||
|
||||
@ -468,16 +468,16 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
// Create the tmpDir
|
||||
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0751); err != nil {
|
||||
// The directory is allowed to exist
|
||||
if !os.IsExist(err) {
|
||||
return errors.Wrap(err, "error creating tmpdir")
|
||||
if !errors.Is(err, os.ErrExist) {
|
||||
return fmt.Errorf("error creating tmpdir: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create events log dir
|
||||
if err := os.MkdirAll(filepath.Dir(runtime.config.Engine.EventsLogFilePath), 0700); err != nil {
|
||||
// The directory is allowed to exist
|
||||
if !os.IsExist(err) {
|
||||
return errors.Wrap(err, "error creating events dirs")
|
||||
if !errors.Is(err, os.ErrExist) {
|
||||
return fmt.Errorf("error creating events dirs: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -514,7 +514,7 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
} else {
|
||||
ociRuntime, ok := runtime.ociRuntimes[runtime.config.Engine.OCIRuntime]
|
||||
if !ok {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "default OCI runtime %q not found", runtime.config.Engine.OCIRuntime)
|
||||
return fmt.Errorf("default OCI runtime %q not found: %w", runtime.config.Engine.OCIRuntime, define.ErrInvalidArg)
|
||||
}
|
||||
runtime.defaultOCIRuntime = ociRuntime
|
||||
}
|
||||
@ -523,19 +523,19 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
|
||||
// Do we have at least one valid OCI runtime?
|
||||
if len(runtime.ociRuntimes) == 0 {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "no OCI runtime has been configured")
|
||||
return fmt.Errorf("no OCI runtime has been configured: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
// Do we have a default runtime?
|
||||
if runtime.defaultOCIRuntime == nil {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "no default OCI runtime was configured")
|
||||
return fmt.Errorf("no default OCI runtime was configured: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
// Make the per-boot files directory if it does not exist
|
||||
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0755); err != nil {
|
||||
// The directory is allowed to exist
|
||||
if !os.IsExist(err) {
|
||||
return errors.Wrapf(err, "error creating runtime temporary files directory")
|
||||
if !errors.Is(err, os.ErrExist) {
|
||||
return fmt.Errorf("error creating runtime temporary files directory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -556,7 +556,7 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
runtimeAliveFile := filepath.Join(runtime.config.Engine.TmpDir, "alive")
|
||||
aliveLock, err := storage.GetLockfile(runtimeAliveLock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error acquiring runtime init lock")
|
||||
return fmt.Errorf("error acquiring runtime init lock: %w", err)
|
||||
}
|
||||
// Acquire the lock and hold it until we return
|
||||
// This ensures that no two processes will be in runtime.refresh at once
|
||||
@ -586,7 +586,7 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
aliveLock.Unlock() // Unlock to avoid deadlock as BecomeRootInUserNS will reexec.
|
||||
pausePid, err := util.GetRootlessPauseProcessPidPathGivenDir(runtime.config.Engine.TmpDir)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get pause process pid file path")
|
||||
return fmt.Errorf("could not get pause process pid file path: %w", err)
|
||||
}
|
||||
became, ret, err := rootless.BecomeRootInUserNS(pausePid)
|
||||
if err != nil {
|
||||
@ -607,10 +607,10 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
// This will trigger on first use as well, but refreshing an
|
||||
// empty state only creates a single file
|
||||
// As such, it's not really a performance concern
|
||||
if os.IsNotExist(err) {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
doRefresh = true
|
||||
} else {
|
||||
return errors.Wrapf(err, "error reading runtime status file %s", runtimeAliveFile)
|
||||
return fmt.Errorf("error reading runtime status file %s: %w", runtimeAliveFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -704,14 +704,14 @@ func findConmon(conmonPaths []string) (string, error) {
|
||||
}
|
||||
|
||||
if foundOutdatedConmon {
|
||||
return "", errors.Wrapf(define.ErrConmonOutdated,
|
||||
"please update to v%d.%d.%d or later",
|
||||
conmonMinMajorVersion, conmonMinMinorVersion, conmonMinPatchVersion)
|
||||
return "", fmt.Errorf(
|
||||
"please update to v%d.%d.%d or later: %w",
|
||||
conmonMinMajorVersion, conmonMinMinorVersion, conmonMinPatchVersion, define.ErrConmonOutdated)
|
||||
}
|
||||
|
||||
return "", errors.Wrapf(define.ErrInvalidArg,
|
||||
"could not find a working conmon binary (configured options: %v)",
|
||||
conmonPaths)
|
||||
return "", fmt.Errorf(
|
||||
"could not find a working conmon binary (configured options: %v): %w",
|
||||
conmonPaths, define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
// probeConmon calls conmon --version and verifies it is a new enough version for
|
||||
@ -728,11 +728,11 @@ func probeConmon(conmonBinary string) error {
|
||||
|
||||
matches := r.FindStringSubmatch(out.String())
|
||||
if len(matches) != 4 {
|
||||
return errors.Wrap(err, define.ErrConmonVersionFormat)
|
||||
return fmt.Errorf("%v: %w", define.ErrConmonVersionFormat, err)
|
||||
}
|
||||
major, err := strconv.Atoi(matches[1])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, define.ErrConmonVersionFormat)
|
||||
return fmt.Errorf("%v: %w", define.ErrConmonVersionFormat, err)
|
||||
}
|
||||
if major < conmonMinMajorVersion {
|
||||
return define.ErrConmonOutdated
|
||||
@ -743,7 +743,7 @@ func probeConmon(conmonBinary string) error {
|
||||
|
||||
minor, err := strconv.Atoi(matches[2])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, define.ErrConmonVersionFormat)
|
||||
return fmt.Errorf("%v: %w", define.ErrConmonVersionFormat, err)
|
||||
}
|
||||
if minor < conmonMinMinorVersion {
|
||||
return define.ErrConmonOutdated
|
||||
@ -754,7 +754,7 @@ func probeConmon(conmonBinary string) error {
|
||||
|
||||
patch, err := strconv.Atoi(matches[3])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, define.ErrConmonVersionFormat)
|
||||
return fmt.Errorf("%v: %w", define.ErrConmonVersionFormat, err)
|
||||
}
|
||||
if patch < conmonMinPatchVersion {
|
||||
return define.ErrConmonOutdated
|
||||
@ -798,7 +798,7 @@ func (r *Runtime) GetConfig() (*config.Config, error) {
|
||||
|
||||
// Copy so the caller won't be able to modify the actual config
|
||||
if err := JSONDeepCopy(rtConfig, config); err != nil {
|
||||
return nil, errors.Wrapf(err, "error copying config")
|
||||
return nil, fmt.Errorf("error copying config: %w", err)
|
||||
}
|
||||
|
||||
return config, nil
|
||||
@ -909,7 +909,7 @@ func (r *Runtime) Shutdown(force bool) error {
|
||||
|
||||
// Note that the libimage runtime shuts down the store.
|
||||
if err := r.libimageRuntime.Shutdown(force); err != nil {
|
||||
lastError = errors.Wrapf(err, "error shutting down container storage")
|
||||
lastError = fmt.Errorf("error shutting down container storage: %w", err)
|
||||
}
|
||||
}
|
||||
if err := r.state.Close(); err != nil {
|
||||
@ -941,15 +941,15 @@ func (r *Runtime) refresh(alivePath string) error {
|
||||
// Containers, pods, and volumes must also reacquire their locks.
|
||||
ctrs, err := r.state.AllContainers()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error retrieving all containers from state")
|
||||
return fmt.Errorf("error retrieving all containers from state: %w", err)
|
||||
}
|
||||
pods, err := r.state.AllPods()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error retrieving all pods from state")
|
||||
return fmt.Errorf("error retrieving all pods from state: %w", err)
|
||||
}
|
||||
vols, err := r.state.AllVolumes()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error retrieving all volumes from state")
|
||||
return fmt.Errorf("error retrieving all volumes from state: %w", err)
|
||||
}
|
||||
// No locks are taken during pod, volume, and container refresh.
|
||||
// Furthermore, the pod/volume/container refresh() functions are not
|
||||
@ -977,7 +977,7 @@ func (r *Runtime) refresh(alivePath string) error {
|
||||
// Create a file indicating the runtime is alive and ready
|
||||
file, err := os.OpenFile(alivePath, os.O_RDONLY|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error creating runtime status file")
|
||||
return fmt.Errorf("error creating runtime status file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
@ -998,13 +998,13 @@ func (r *Runtime) generateName() (string, error) {
|
||||
// Make sure container with this name does not exist
|
||||
if _, err := r.state.LookupContainer(name); err == nil {
|
||||
continue
|
||||
} else if errors.Cause(err) != define.ErrNoSuchCtr {
|
||||
} else if !errors.Is(err, define.ErrNoSuchCtr) {
|
||||
return "", err
|
||||
}
|
||||
// Make sure pod with this name does not exist
|
||||
if _, err := r.state.LookupPod(name); err == nil {
|
||||
continue
|
||||
} else if errors.Cause(err) != define.ErrNoSuchPod {
|
||||
} else if !errors.Is(err, define.ErrNoSuchPod) {
|
||||
return "", err
|
||||
}
|
||||
return name, nil
|
||||
@ -1203,7 +1203,7 @@ func (r *Runtime) getVolumePlugin(name string) (*plugin.VolumePlugin, error) {
|
||||
|
||||
pluginPath, ok := r.config.Engine.VolumePlugins[name]
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(define.ErrMissingPlugin, "no volume plugin with name %s available", name)
|
||||
return nil, fmt.Errorf("no volume plugin with name %s available: %w", name, define.ErrMissingPlugin)
|
||||
}
|
||||
|
||||
return plugin.GetVolumePlugin(name, pluginPath)
|
||||
|
@ -1,11 +1,12 @@
|
||||
package libpod
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -38,7 +39,7 @@ func (r *Runtime) ListStorageContainers() ([]*StorageContainer, error) {
|
||||
// Look up if container is in state
|
||||
hasCtr, err := r.state.HasContainer(ctr.ID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error looking up container %s in state", ctr.ID)
|
||||
return nil, fmt.Errorf("error looking up container %s in state: %w", ctr.ID, err)
|
||||
}
|
||||
|
||||
storageCtr.PresentInLibpod = hasCtr
|
||||
@ -60,20 +61,20 @@ func (r *Runtime) StorageContainer(idOrName string) (*storage.Container, error)
|
||||
func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
|
||||
targetID, err := r.store.Lookup(idOrName)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == storage.ErrLayerUnknown {
|
||||
return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID or name %q found", idOrName)
|
||||
if errors.Is(err, storage.ErrLayerUnknown) {
|
||||
return fmt.Errorf("no container with ID or name %q found: %w", idOrName, define.ErrNoSuchCtr)
|
||||
}
|
||||
return errors.Wrapf(err, "error looking up container %q", idOrName)
|
||||
return fmt.Errorf("error looking up container %q: %w", idOrName, err)
|
||||
}
|
||||
|
||||
// Lookup returns an ID but it's not guaranteed to be a container ID.
|
||||
// So we can still error here.
|
||||
ctr, err := r.store.Container(targetID)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == storage.ErrContainerUnknown {
|
||||
return errors.Wrapf(define.ErrNoSuchCtr, "%q does not refer to a container", idOrName)
|
||||
if errors.Is(err, storage.ErrContainerUnknown) {
|
||||
return fmt.Errorf("%q does not refer to a container: %w", idOrName, define.ErrNoSuchCtr)
|
||||
}
|
||||
return errors.Wrapf(err, "error retrieving container %q", idOrName)
|
||||
return fmt.Errorf("error retrieving container %q: %w", idOrName, err)
|
||||
}
|
||||
|
||||
// Error out if the container exists in libpod
|
||||
@ -82,13 +83,13 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return errors.Wrapf(define.ErrCtrExists, "refusing to remove %q as it exists in libpod as container %s", idOrName, ctr.ID)
|
||||
return fmt.Errorf("refusing to remove %q as it exists in libpod as container %s: %w", idOrName, ctr.ID, define.ErrCtrExists)
|
||||
}
|
||||
|
||||
if !force {
|
||||
timesMounted, err := r.store.Mounted(ctr.ID)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == storage.ErrContainerUnknown {
|
||||
if errors.Is(err, storage.ErrContainerUnknown) {
|
||||
// Container was removed from under us.
|
||||
// It's gone, so don't bother erroring.
|
||||
logrus.Infof("Storage for container %s already removed", ctr.ID)
|
||||
@ -97,7 +98,7 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
|
||||
logrus.Warnf("Checking if container %q is mounted, attempting to delete: %v", idOrName, err)
|
||||
}
|
||||
if timesMounted > 0 {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "container %q is mounted and cannot be removed without using force", idOrName)
|
||||
return fmt.Errorf("container %q is mounted and cannot be removed without using force: %w", idOrName, define.ErrCtrStateInvalid)
|
||||
}
|
||||
} else if _, err := r.store.Unmount(ctr.ID, true); err != nil {
|
||||
if errors.Is(err, storage.ErrContainerUnknown) {
|
||||
@ -109,12 +110,12 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error {
|
||||
}
|
||||
|
||||
if err := r.store.DeleteContainer(ctr.ID); err != nil {
|
||||
if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown {
|
||||
if errors.Is(err, storage.ErrNotAContainer) || errors.Is(err, storage.ErrContainerUnknown) {
|
||||
// Container again gone, no error
|
||||
logrus.Infof("Storage for container %s already removed", ctr.ID)
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "error removing storage for container %q", idOrName)
|
||||
return fmt.Errorf("error removing storage for container %q: %w", idOrName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -2,6 +2,7 @@ package libpod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
@ -26,7 +27,6 @@ import (
|
||||
"github.com/docker/go-units"
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/runtime-tools/generate"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -86,7 +86,7 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config
|
||||
|
||||
ctr, err := r.initContainerVariables(rSpec, config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error initializing container variables")
|
||||
return nil, fmt.Errorf("error initializing container variables: %w", err)
|
||||
}
|
||||
// For an imported checkpoint no one has ever set the StartedTime. Set it now.
|
||||
ctr.state.StartedTime = time.Now()
|
||||
@ -126,7 +126,7 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s
|
||||
// the config was re-written.
|
||||
newConf, err := r.state.GetContainerConfig(ctr.ID())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error retrieving container %s configuration from DB to remove", ctr.ID())
|
||||
return nil, fmt.Errorf("error retrieving container %s configuration from DB to remove: %w", ctr.ID(), err)
|
||||
}
|
||||
ctr.config = newConf
|
||||
|
||||
@ -143,7 +143,7 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s
|
||||
// Set config back to the old name so reflect what is actually
|
||||
// present in the DB.
|
||||
ctr.config.Name = oldName
|
||||
return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
|
||||
return nil, fmt.Errorf("error renaming container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
|
||||
// Step 3: rename the container in c/storage.
|
||||
@ -162,7 +162,7 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s
|
||||
|
||||
func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (*Container, error) {
|
||||
if rSpec == nil {
|
||||
return nil, errors.Wrapf(define.ErrInvalidArg, "must provide a valid runtime spec to create container")
|
||||
return nil, fmt.Errorf("must provide a valid runtime spec to create container: %w", define.ErrInvalidArg)
|
||||
}
|
||||
ctr := new(Container)
|
||||
ctr.config = new(ContainerConfig)
|
||||
@ -172,7 +172,7 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf
|
||||
ctr.config.ID = stringid.GenerateNonCryptoID()
|
||||
size, err := units.FromHumanSize(r.config.Containers.ShmSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "converting containers.conf ShmSize %s to an int", r.config.Containers.ShmSize)
|
||||
return nil, fmt.Errorf("converting containers.conf ShmSize %s to an int: %w", r.config.Containers.ShmSize, err)
|
||||
}
|
||||
ctr.config.ShmSize = size
|
||||
ctr.config.NoShm = false
|
||||
@ -184,7 +184,7 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf
|
||||
// This is a restore from an imported checkpoint
|
||||
ctr.restoreFromCheckpoint = true
|
||||
if err := JSONDeepCopy(config, ctr.config); err != nil {
|
||||
return nil, errors.Wrapf(err, "error copying container config for restore")
|
||||
return nil, fmt.Errorf("error copying container config for restore: %w", err)
|
||||
}
|
||||
// If the ID is empty a new name for the restored container was requested
|
||||
if ctr.config.ID == "" {
|
||||
@ -224,12 +224,12 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
|
||||
ctr, err = r.initContainerVariables(rSpec, nil)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error initializing container variables")
|
||||
return nil, fmt.Errorf("error initializing container variables: %w", err)
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
if err := option(ctr); err != nil {
|
||||
return nil, errors.Wrapf(err, "error running container create option")
|
||||
return nil, fmt.Errorf("error running container create option: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -248,7 +248,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
|
||||
if opts.InterfaceName != "" {
|
||||
// check that no name is assigned to more than network
|
||||
if cutil.StringInSlice(opts.InterfaceName, usedIfNames) {
|
||||
return nil, errors.Errorf("network interface name %q is already assigned to another network", opts.InterfaceName)
|
||||
return nil, fmt.Errorf("network interface name %q is already assigned to another network", opts.InterfaceName)
|
||||
}
|
||||
usedIfNames = append(usedIfNames, opts.InterfaceName)
|
||||
}
|
||||
@ -296,7 +296,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
|
||||
// Allocate a lock for the container
|
||||
lock, err := r.lockManager.AllocateLock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error allocating lock for new container")
|
||||
return nil, fmt.Errorf("error allocating lock for new container: %w", err)
|
||||
}
|
||||
ctr.lock = lock
|
||||
ctr.config.LockID = ctr.lock.ID()
|
||||
@ -319,7 +319,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
|
||||
} else {
|
||||
ociRuntime, ok := r.ociRuntimes[ctr.config.OCIRuntime]
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(define.ErrInvalidArg, "requested OCI runtime %s is not available", ctr.config.OCIRuntime)
|
||||
return nil, fmt.Errorf("requested OCI runtime %s is not available: %w", ctr.config.OCIRuntime, define.ErrInvalidArg)
|
||||
}
|
||||
ctr.ociRuntime = ociRuntime
|
||||
}
|
||||
@ -327,7 +327,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
|
||||
// Check NoCgroups support
|
||||
if ctr.config.NoCgroups {
|
||||
if !ctr.ociRuntime.SupportsNoCgroups() {
|
||||
return nil, errors.Wrapf(define.ErrInvalidArg, "requested OCI runtime %s is not compatible with NoCgroups", ctr.ociRuntime.Name())
|
||||
return nil, fmt.Errorf("requested OCI runtime %s is not compatible with NoCgroups: %w", ctr.ociRuntime.Name(), define.ErrInvalidArg)
|
||||
}
|
||||
}
|
||||
|
||||
@ -336,7 +336,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
|
||||
// Get the pod from state
|
||||
pod, err = r.state.Pod(ctr.config.Pod)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot add container %s to pod %s", ctr.ID(), ctr.config.Pod)
|
||||
return nil, fmt.Errorf("cannot add container %s to pod %s: %w", ctr.ID(), ctr.config.Pod, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -350,14 +350,14 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
|
||||
if pod != nil && pod.config.UsePodCgroup && !ctr.IsInfra() {
|
||||
podCgroup, err := pod.CgroupPath()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error retrieving pod %s cgroup", pod.ID())
|
||||
return nil, fmt.Errorf("error retrieving pod %s cgroup: %w", pod.ID(), err)
|
||||
}
|
||||
expectPodCgroup, err := ctr.expectPodCgroup()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if expectPodCgroup && podCgroup == "" {
|
||||
return nil, errors.Wrapf(define.ErrInternal, "pod %s cgroup is not set", pod.ID())
|
||||
return nil, fmt.Errorf("pod %s cgroup is not set: %w", pod.ID(), define.ErrInternal)
|
||||
}
|
||||
canUseCgroup := !rootless.IsRootless() || isRootlessCgroupSet(podCgroup)
|
||||
if canUseCgroup {
|
||||
@ -367,7 +367,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
|
||||
ctr.config.CgroupParent = CgroupfsDefaultCgroupParent
|
||||
}
|
||||
} else if strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") {
|
||||
return nil, errors.Wrapf(define.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs")
|
||||
return nil, fmt.Errorf("systemd slice received as cgroup parent when using cgroupfs: %w", define.ErrInvalidArg)
|
||||
}
|
||||
case config.SystemdCgroupsManager:
|
||||
if ctr.config.CgroupParent == "" {
|
||||
@ -375,7 +375,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
|
||||
case pod != nil && pod.config.UsePodCgroup && !ctr.IsInfra():
|
||||
podCgroup, err := pod.CgroupPath()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error retrieving pod %s cgroup", pod.ID())
|
||||
return nil, fmt.Errorf("error retrieving pod %s cgroup: %w", pod.ID(), err)
|
||||
}
|
||||
ctr.config.CgroupParent = podCgroup
|
||||
case rootless.IsRootless() && ctr.config.CgroupsMode != cgroupSplit:
|
||||
@ -384,10 +384,10 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
|
||||
ctr.config.CgroupParent = SystemdDefaultCgroupParent
|
||||
}
|
||||
} else if len(ctr.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") {
|
||||
return nil, errors.Wrapf(define.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups")
|
||||
return nil, fmt.Errorf("did not receive systemd slice as cgroup parent when using systemd to manage cgroups: %w", define.ErrInvalidArg)
|
||||
}
|
||||
default:
|
||||
return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported Cgroup manager: %s - cannot validate cgroup parent", r.config.Engine.CgroupManager)
|
||||
return nil, fmt.Errorf("unsupported Cgroup manager: %s - cannot validate cgroup parent: %w", r.config.Engine.CgroupManager, define.ErrInvalidArg)
|
||||
}
|
||||
}
|
||||
|
||||
@ -470,8 +470,8 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
|
||||
ctrNamedVolumes = append(ctrNamedVolumes, dbVol)
|
||||
// The volume exists, we're good
|
||||
continue
|
||||
} else if errors.Cause(err) != define.ErrNoSuchVolume {
|
||||
return nil, errors.Wrapf(err, "error retrieving named volume %s for new container", vol.Name)
|
||||
} else if !errors.Is(err, define.ErrNoSuchVolume) {
|
||||
return nil, fmt.Errorf("error retrieving named volume %s for new container: %w", vol.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -504,7 +504,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
|
||||
}
|
||||
newVol, err := r.newVolume(false, volOptions...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating named volume %q", vol.Name)
|
||||
return nil, fmt.Errorf("error creating named volume %q: %w", vol.Name, err)
|
||||
}
|
||||
|
||||
ctrNamedVolumes = append(ctrNamedVolumes, newVol)
|
||||
@ -527,7 +527,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
|
||||
ctr.config.ShmDir = filepath.Join(ctr.bundlePath(), "shm")
|
||||
if err := os.MkdirAll(ctr.config.ShmDir, 0700); err != nil {
|
||||
if !os.IsExist(err) {
|
||||
return nil, errors.Wrap(err, "unable to create shm dir")
|
||||
return nil, fmt.Errorf("unable to create shm dir: %w", err)
|
||||
}
|
||||
}
|
||||
ctr.config.Mounts = append(ctr.config.Mounts, ctr.config.ShmDir)
|
||||
@ -596,7 +596,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
||||
// exist once we're done.
|
||||
newConf, err := r.state.GetContainerConfig(c.ID())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error retrieving container %s configuration from DB to remove", c.ID())
|
||||
return fmt.Errorf("error retrieving container %s configuration from DB to remove: %w", c.ID(), err)
|
||||
}
|
||||
c.config = newConf
|
||||
|
||||
@ -611,12 +611,12 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
||||
if c.config.Pod != "" && !removePod {
|
||||
pod, err = r.state.Pod(c.config.Pod)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", c.ID(), pod.ID())
|
||||
return fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", c.ID(), pod.ID(), err)
|
||||
}
|
||||
|
||||
// Lock the pod while we're removing container
|
||||
if pod.config.LockID == c.config.LockID {
|
||||
return errors.Wrapf(define.ErrWillDeadlock, "container %s and pod %s share lock ID %d", c.ID(), pod.ID(), c.config.LockID)
|
||||
return fmt.Errorf("container %s and pod %s share lock ID %d: %w", c.ID(), pod.ID(), c.config.LockID, define.ErrWillDeadlock)
|
||||
}
|
||||
pod.lock.Lock()
|
||||
defer pod.lock.Unlock()
|
||||
@ -626,7 +626,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
||||
|
||||
infraID := pod.state.InfraContainerID
|
||||
if c.ID() == infraID {
|
||||
return errors.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
|
||||
return fmt.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
|
||||
}
|
||||
}
|
||||
|
||||
@ -693,7 +693,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
||||
}
|
||||
if len(deps) != 0 {
|
||||
depsStr := strings.Join(deps, ", ")
|
||||
return errors.Wrapf(define.ErrCtrExists, "container %s has dependent containers which must be removed before it: %s", c.ID(), depsStr)
|
||||
return fmt.Errorf("container %s has dependent containers which must be removed before it: %s: %w", c.ID(), depsStr, define.ErrCtrExists)
|
||||
}
|
||||
}
|
||||
|
||||
@ -705,8 +705,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
||||
}
|
||||
// Ignore ErrConmonDead - we couldn't retrieve the container's
|
||||
// exit code properly, but it's still stopped.
|
||||
if err := c.stop(time); err != nil && errors.Cause(err) != define.ErrConmonDead {
|
||||
return errors.Wrapf(err, "cannot remove container %s as it could not be stopped", c.ID())
|
||||
if err := c.stop(time); err != nil && !errors.Is(err, define.ErrConmonDead) {
|
||||
return fmt.Errorf("cannot remove container %s as it could not be stopped: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
// We unlocked as part of stop() above - there's a chance someone
|
||||
@ -717,7 +717,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
||||
if ok, _ := r.state.HasContainer(c.ID()); !ok {
|
||||
// When the container has already been removed, the OCI runtime directory remain.
|
||||
if err := c.cleanupRuntime(ctx); err != nil {
|
||||
return errors.Wrapf(err, "error cleaning up container %s from OCI runtime", c.ID())
|
||||
return fmt.Errorf("error cleaning up container %s from OCI runtime: %w", c.ID(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -729,7 +729,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
||||
// Do this before we set ContainerStateRemoving, to ensure that we can
|
||||
// actually remove from the OCI runtime.
|
||||
if err := c.cleanup(ctx); err != nil {
|
||||
cleanupErr = errors.Wrapf(err, "error cleaning up container %s", c.ID())
|
||||
cleanupErr = fmt.Errorf("error cleaning up container %s: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
// Set ContainerStateRemoving
|
||||
@ -739,7 +739,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
||||
if cleanupErr != nil {
|
||||
logrus.Errorf(err.Error())
|
||||
}
|
||||
return errors.Wrapf(err, "unable to set container %s removing state in database", c.ID())
|
||||
return fmt.Errorf("unable to set container %s removing state in database: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
// Remove all active exec sessions
|
||||
@ -789,7 +789,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
||||
// Deallocate the container's lock
|
||||
if err := c.lock.Free(); err != nil {
|
||||
if cleanupErr == nil {
|
||||
cleanupErr = errors.Wrapf(err, "error freeing lock for container %s", c.ID())
|
||||
cleanupErr = fmt.Errorf("error freeing lock for container %s: %w", c.ID(), err)
|
||||
} else {
|
||||
logrus.Errorf("Free container lock: %v", err)
|
||||
}
|
||||
@ -809,8 +809,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
||||
if !volume.Anonymous() {
|
||||
continue
|
||||
}
|
||||
if err := runtime.removeVolume(ctx, volume, false, timeout, false); err != nil && errors.Cause(err) != define.ErrNoSuchVolume {
|
||||
if errors.Cause(err) == define.ErrVolumeBeingUsed {
|
||||
if err := runtime.removeVolume(ctx, volume, false, timeout, false); err != nil && !errors.Is(err, define.ErrNoSuchVolume) {
|
||||
if errors.Is(err, define.ErrVolumeBeingUsed) {
|
||||
// Ignore error, since podman will report original error
|
||||
volumesFrom, _ := c.volumesFrom()
|
||||
if len(volumesFrom) > 0 {
|
||||
@ -891,7 +891,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
|
||||
c := new(Container)
|
||||
c.config, err = r.state.GetContainerConfig(id)
|
||||
if err != nil {
|
||||
return id, errors.Wrapf(err, "failed to retrieve config for ctr ID %q", id)
|
||||
return id, fmt.Errorf("failed to retrieve config for ctr ID %q: %w", id, err)
|
||||
}
|
||||
c.state = new(ContainerState)
|
||||
|
||||
@ -903,7 +903,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
|
||||
if c.config.Pod != "" {
|
||||
pod, err = r.state.Pod(c.config.Pod)
|
||||
if err != nil {
|
||||
return id, errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", c.ID(), pod.ID())
|
||||
return id, fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", c.ID(), pod.ID(), err)
|
||||
}
|
||||
|
||||
// Lock the pod while we're removing container
|
||||
@ -918,7 +918,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
|
||||
return "", err
|
||||
}
|
||||
if c.ID() == infraID {
|
||||
return id, errors.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
|
||||
return id, fmt.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
|
||||
}
|
||||
}
|
||||
|
||||
@ -1115,7 +1115,7 @@ func (r *Runtime) GetContainersByList(containers []string) ([]*Container, error)
|
||||
for _, inputContainer := range containers {
|
||||
ctr, err := r.LookupContainer(inputContainer)
|
||||
if err != nil {
|
||||
return ctrs, errors.Wrapf(err, "unable to look up container %s", inputContainer)
|
||||
return ctrs, fmt.Errorf("unable to look up container %s: %w", inputContainer, err)
|
||||
}
|
||||
ctrs = append(ctrs, ctr)
|
||||
}
|
||||
@ -1128,7 +1128,7 @@ func (r *Runtime) GetLatestContainer() (*Container, error) {
|
||||
var lastCreatedTime time.Time
|
||||
ctrs, err := r.GetAllContainers()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to find latest container")
|
||||
return nil, fmt.Errorf("unable to find latest container: %w", err)
|
||||
}
|
||||
if len(ctrs) == 0 {
|
||||
return nil, define.ErrNoSuchCtr
|
||||
@ -1209,7 +1209,7 @@ func (r *Runtime) PruneContainers(filterFuncs []ContainerFilter) ([]*reports.Pru
|
||||
// MountStorageContainer mounts the storage container's root filesystem
|
||||
func (r *Runtime) MountStorageContainer(id string) (string, error) {
|
||||
if _, err := r.GetContainer(id); err == nil {
|
||||
return "", errors.Wrapf(define.ErrCtrExists, "ctr %s is a libpod container", id)
|
||||
return "", fmt.Errorf("ctr %s is a libpod container: %w", id, define.ErrCtrExists)
|
||||
}
|
||||
container, err := r.store.Container(id)
|
||||
if err != nil {
|
||||
@ -1217,7 +1217,7 @@ func (r *Runtime) MountStorageContainer(id string) (string, error) {
|
||||
}
|
||||
mountPoint, err := r.store.Mount(container.ID, "")
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error mounting storage for container %s", id)
|
||||
return "", fmt.Errorf("error mounting storage for container %s: %w", id, err)
|
||||
}
|
||||
return mountPoint, nil
|
||||
}
|
||||
@ -1225,7 +1225,7 @@ func (r *Runtime) MountStorageContainer(id string) (string, error) {
|
||||
// UnmountStorageContainer unmounts the storage container's root filesystem
|
||||
func (r *Runtime) UnmountStorageContainer(id string, force bool) (bool, error) {
|
||||
if _, err := r.GetContainer(id); err == nil {
|
||||
return false, errors.Wrapf(define.ErrCtrExists, "ctr %s is a libpod container", id)
|
||||
return false, fmt.Errorf("ctr %s is a libpod container: %w", id, define.ErrCtrExists)
|
||||
}
|
||||
container, err := r.store.Container(id)
|
||||
if err != nil {
|
||||
@ -1239,7 +1239,7 @@ func (r *Runtime) UnmountStorageContainer(id string, force bool) (bool, error) {
|
||||
func (r *Runtime) IsStorageContainerMounted(id string) (bool, string, error) {
|
||||
var path string
|
||||
if _, err := r.GetContainer(id); err == nil {
|
||||
return false, "", errors.Wrapf(define.ErrCtrExists, "ctr %s is a libpod container", id)
|
||||
return false, "", fmt.Errorf("ctr %s is a libpod container: %w", id, define.ErrCtrExists)
|
||||
}
|
||||
|
||||
mountCnt, err := r.storageService.MountedContainerImage(id)
|
||||
@ -1265,13 +1265,13 @@ func (r *Runtime) StorageContainers() ([]storage.Container, error) {
|
||||
|
||||
storeContainers, err := r.store.Containers()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading list of all storage containers")
|
||||
return nil, fmt.Errorf("error reading list of all storage containers: %w", err)
|
||||
}
|
||||
retCtrs := []storage.Container{}
|
||||
for _, container := range storeContainers {
|
||||
exists, err := r.state.HasContainer(container.ID)
|
||||
if err != nil && err != define.ErrNoSuchCtr {
|
||||
return nil, errors.Wrapf(err, "failed to check if %s container exists in database", container.ID)
|
||||
return nil, fmt.Errorf("failed to check if %s container exists in database: %w", container.ID, err)
|
||||
}
|
||||
if exists {
|
||||
continue
|
||||
|
@ -2,6 +2,8 @@ package libpod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -13,7 +15,6 @@ import (
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/libpod/events"
|
||||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -40,14 +41,14 @@ func (r *Runtime) RemoveContainersForImageCallback(ctx context.Context) libimage
|
||||
if ctr.config.IsInfra {
|
||||
pod, err := r.state.Pod(ctr.config.Pod)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", ctr.ID(), ctr.config.Pod)
|
||||
return fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", ctr.ID(), ctr.config.Pod, err)
|
||||
}
|
||||
if err := r.removePod(ctx, pod, true, true, timeout); err != nil {
|
||||
return errors.Wrapf(err, "removing image %s: container %s using image could not be removed", imageID, ctr.ID())
|
||||
return fmt.Errorf("removing image %s: container %s using image could not be removed: %w", imageID, ctr.ID(), err)
|
||||
}
|
||||
} else {
|
||||
if err := r.removeContainer(ctx, ctr, true, false, false, timeout); err != nil {
|
||||
return errors.Wrapf(err, "removing image %s: container %s using image could not be removed", imageID, ctr.ID())
|
||||
return fmt.Errorf("removing image %s: container %s using image could not be removed: %w", imageID, ctr.ID(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -106,7 +107,7 @@ func (r *Runtime) Build(ctx context.Context, options buildahDefine.BuildOptions,
|
||||
func DownloadFromFile(reader *os.File) (string, error) {
|
||||
outFile, err := ioutil.TempFile(util.Tmpdir(), "import")
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "error creating file")
|
||||
return "", fmt.Errorf("error creating file: %w", err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
@ -114,7 +115,7 @@ func DownloadFromFile(reader *os.File) (string, error) {
|
||||
|
||||
_, err = io.Copy(outFile, reader)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error saving %s to %s", reader.Name(), outFile.Name())
|
||||
return "", fmt.Errorf("error saving %s to %s: %w", reader.Name(), outFile.Name(), err)
|
||||
}
|
||||
|
||||
return outFile.Name(), nil
|
||||
|
@ -14,7 +14,6 @@ import (
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/rootless"
|
||||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -22,21 +21,21 @@ func (r *Runtime) stopPauseProcess() error {
|
||||
if rootless.IsRootless() {
|
||||
pausePidPath, err := util.GetRootlessPauseProcessPidPathGivenDir(r.config.Engine.TmpDir)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get pause process pid file path")
|
||||
return fmt.Errorf("could not get pause process pid file path: %w", err)
|
||||
}
|
||||
data, err := ioutil.ReadFile(pausePidPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "cannot read pause process pid file")
|
||||
return fmt.Errorf("cannot read pause process pid file: %w", err)
|
||||
}
|
||||
pausePid, err := strconv.Atoi(string(data))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot parse pause pid file %s", pausePidPath)
|
||||
return fmt.Errorf("cannot parse pause pid file %s: %w", pausePidPath, err)
|
||||
}
|
||||
if err := os.Remove(pausePidPath); err != nil {
|
||||
return errors.Wrapf(err, "cannot delete pause pid file %s", pausePidPath)
|
||||
return fmt.Errorf("cannot delete pause pid file %s: %w", pausePidPath, err)
|
||||
}
|
||||
if err := syscall.Kill(pausePid, syscall.SIGKILL); err != nil {
|
||||
return err
|
||||
@ -60,7 +59,7 @@ func (r *Runtime) migrate() error {
|
||||
for _, ctr := range runningContainers {
|
||||
fmt.Printf("stopped %s\n", ctr.ID())
|
||||
if err := ctr.Stop(); err != nil {
|
||||
return errors.Wrapf(err, "cannot stop container %s", ctr.ID())
|
||||
return fmt.Errorf("cannot stop container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -68,7 +67,7 @@ func (r *Runtime) migrate() error {
|
||||
runtimeChangeRequested := r.migrateRuntime != ""
|
||||
requestedRuntime, runtimeExists := r.ociRuntimes[r.migrateRuntime]
|
||||
if !runtimeExists && runtimeChangeRequested {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "change to runtime %q requested but no such runtime is defined", r.migrateRuntime)
|
||||
return fmt.Errorf("change to runtime %q requested but no such runtime is defined: %w", r.migrateRuntime, define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
for _, ctr := range allCtrs {
|
||||
@ -93,7 +92,7 @@ func (r *Runtime) migrate() error {
|
||||
|
||||
if needsWrite {
|
||||
if err := r.state.RewriteContainerConfig(ctr, ctr.config); err != nil {
|
||||
return errors.Wrapf(err, "error rewriting config for container %s", ctr.ID())
|
||||
return fmt.Errorf("error rewriting config for container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,11 +2,12 @@ package libpod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/containers/common/pkg/util"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Contains the public Runtime API for pods
|
||||
@ -112,7 +113,7 @@ func (r *Runtime) GetLatestPod() (*Pod, error) {
|
||||
var lastCreatedTime time.Time
|
||||
pods, err := r.GetAllPods()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to get all pods")
|
||||
return nil, fmt.Errorf("unable to get all pods: %w", err)
|
||||
}
|
||||
if len(pods) == 0 {
|
||||
return nil, define.ErrNoSuchPod
|
||||
@ -146,7 +147,7 @@ func (r *Runtime) GetRunningPods() ([]*Pod, error) {
|
||||
pods = append(pods, c.PodID())
|
||||
pod, err := r.GetPod(c.PodID())
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrPodRemoved || errors.Cause(err) == define.ErrNoSuchPod {
|
||||
if errors.Is(err, define.ErrPodRemoved) || errors.Is(err, define.ErrNoSuchPod) {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
|
@ -5,6 +5,7 @@ package libpod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
@ -18,7 +19,6 @@ import (
|
||||
"github.com/containers/podman/v4/pkg/rootless"
|
||||
"github.com/containers/podman/v4/pkg/specgen"
|
||||
runcconfig "github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -38,14 +38,14 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
|
||||
|
||||
for _, option := range options {
|
||||
if err := option(pod); err != nil {
|
||||
return nil, errors.Wrapf(err, "error running pod create option")
|
||||
return nil, fmt.Errorf("error running pod create option: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate a lock for the pod
|
||||
lock, err := r.lockManager.AllocateLock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error allocating lock for new pod")
|
||||
return nil, fmt.Errorf("error allocating lock for new pod: %w", err)
|
||||
}
|
||||
pod.lock = lock
|
||||
pod.config.LockID = pod.lock.ID()
|
||||
@ -70,7 +70,7 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
|
||||
if pod.config.CgroupParent == "" {
|
||||
pod.config.CgroupParent = CgroupfsDefaultCgroupParent
|
||||
} else if strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
|
||||
return nil, errors.Wrapf(define.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs")
|
||||
return nil, fmt.Errorf("systemd slice received as cgroup parent when using cgroupfs: %w", define.ErrInvalidArg)
|
||||
}
|
||||
// If we are set to use pod cgroups, set the cgroup parent that
|
||||
// all containers in the pod will share
|
||||
@ -108,14 +108,14 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
|
||||
pod.config.CgroupParent = SystemdDefaultCgroupParent
|
||||
}
|
||||
} else if len(pod.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
|
||||
return nil, errors.Wrapf(define.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups")
|
||||
return nil, fmt.Errorf("did not receive systemd slice as cgroup parent when using systemd to manage cgroups: %w", define.ErrInvalidArg)
|
||||
}
|
||||
// If we are set to use pod cgroups, set the cgroup parent that
|
||||
// all containers in the pod will share
|
||||
if pod.config.UsePodCgroup {
|
||||
cgroupPath, err := systemdSliceFromPath(pod.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", pod.ID()), p.InfraContainerSpec.ResourceLimits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to create pod cgroup for pod %s", pod.ID())
|
||||
return nil, fmt.Errorf("unable to create pod cgroup for pod %s: %w", pod.ID(), err)
|
||||
}
|
||||
pod.state.CgroupPath = cgroupPath
|
||||
if p.InfraContainerSpec != nil {
|
||||
@ -123,7 +123,7 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported Cgroup manager: %s - cannot validate cgroup parent", r.config.Engine.CgroupManager)
|
||||
return nil, fmt.Errorf("unsupported Cgroup manager: %s - cannot validate cgroup parent: %w", r.config.Engine.CgroupManager, define.ErrInvalidArg)
|
||||
}
|
||||
}
|
||||
|
||||
@ -132,7 +132,7 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
|
||||
}
|
||||
|
||||
if !pod.HasInfraContainer() && pod.SharesNamespaces() {
|
||||
return nil, errors.Errorf("Pods must have an infra container to share namespaces")
|
||||
return nil, errors.New("Pods must have an infra container to share namespaces")
|
||||
}
|
||||
if pod.HasInfraContainer() && !pod.SharesNamespaces() {
|
||||
logrus.Infof("Pod has an infra container, but shares no namespaces")
|
||||
@ -157,12 +157,12 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
|
||||
if addPodErr = r.state.AddPod(pod); addPodErr == nil {
|
||||
return pod, nil
|
||||
}
|
||||
if !generateName || (errors.Cause(addPodErr) != define.ErrPodExists && errors.Cause(addPodErr) != define.ErrCtrExists) {
|
||||
if !generateName || (!errors.Is(addPodErr, define.ErrPodExists) && !errors.Is(addPodErr, define.ErrCtrExists)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if addPodErr != nil {
|
||||
return nil, errors.Wrapf(addPodErr, "error adding pod to state")
|
||||
return nil, fmt.Errorf("error adding pod to state: %w", addPodErr)
|
||||
}
|
||||
|
||||
return pod, nil
|
||||
@ -211,7 +211,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
|
||||
force = true
|
||||
}
|
||||
if !removeCtrs && numCtrs > 0 {
|
||||
return errors.Wrapf(define.ErrCtrExists, "pod %s contains containers and cannot be removed", p.ID())
|
||||
return fmt.Errorf("pod %s contains containers and cannot be removed: %w", p.ID(), define.ErrCtrExists)
|
||||
}
|
||||
|
||||
// Go through and lock all containers so we can operate on them all at
|
||||
@ -239,7 +239,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
|
||||
|
||||
// Ensure state appropriate for removal
|
||||
if err := ctr.checkReadyForRemoval(); err != nil {
|
||||
return errors.Wrapf(err, "pod %s has containers that are not ready to be removed", p.ID())
|
||||
return fmt.Errorf("pod %s has containers that are not ready to be removed: %w", p.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -311,7 +311,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
|
||||
|
||||
for volName := range ctrNamedVolumes {
|
||||
volume, err := r.state.Volume(volName)
|
||||
if err != nil && errors.Cause(err) != define.ErrNoSuchVolume {
|
||||
if err != nil && !errors.Is(err, define.ErrNoSuchVolume) {
|
||||
logrus.Errorf("Retrieving volume %s: %v", volName, err)
|
||||
continue
|
||||
}
|
||||
@ -319,7 +319,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
|
||||
continue
|
||||
}
|
||||
if err := r.removeVolume(ctx, volume, false, timeout, false); err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchVolume || errors.Cause(err) == define.ErrVolumeRemoved {
|
||||
if errors.Is(err, define.ErrNoSuchVolume) || errors.Is(err, define.ErrVolumeRemoved) {
|
||||
continue
|
||||
}
|
||||
logrus.Errorf("Removing volume %s: %v", volName, err)
|
||||
@ -340,7 +340,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
|
||||
case config.SystemdCgroupsManager:
|
||||
if err := deleteSystemdCgroup(p.state.CgroupPath, p.ResourceLim()); err != nil {
|
||||
if removalErr == nil {
|
||||
removalErr = errors.Wrapf(err, "error removing pod %s cgroup", p.ID())
|
||||
removalErr = fmt.Errorf("error removing pod %s cgroup: %w", p.ID(), err)
|
||||
} else {
|
||||
logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
|
||||
}
|
||||
@ -354,7 +354,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
|
||||
conmonCgroup, err := cgroups.Load(conmonCgroupPath)
|
||||
if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless {
|
||||
if removalErr == nil {
|
||||
removalErr = errors.Wrapf(err, "error retrieving pod %s conmon cgroup", p.ID())
|
||||
removalErr = fmt.Errorf("error retrieving pod %s conmon cgroup: %w", p.ID(), err)
|
||||
} else {
|
||||
logrus.Debugf("Error retrieving pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
|
||||
}
|
||||
@ -362,7 +362,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
|
||||
if err == nil {
|
||||
if err = conmonCgroup.Delete(); err != nil {
|
||||
if removalErr == nil {
|
||||
removalErr = errors.Wrapf(err, "error removing pod %s conmon cgroup", p.ID())
|
||||
removalErr = fmt.Errorf("error removing pod %s conmon cgroup: %w", p.ID(), err)
|
||||
} else {
|
||||
logrus.Errorf("Deleting pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
|
||||
}
|
||||
@ -371,7 +371,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
|
||||
cgroup, err := cgroups.Load(p.state.CgroupPath)
|
||||
if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless {
|
||||
if removalErr == nil {
|
||||
removalErr = errors.Wrapf(err, "error retrieving pod %s cgroup", p.ID())
|
||||
removalErr = fmt.Errorf("error retrieving pod %s cgroup: %w", p.ID(), err)
|
||||
} else {
|
||||
logrus.Errorf("Retrieving pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
|
||||
}
|
||||
@ -379,7 +379,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
|
||||
if err == nil {
|
||||
if err := cgroup.Delete(); err != nil {
|
||||
if removalErr == nil {
|
||||
removalErr = errors.Wrapf(err, "error removing pod %s cgroup", p.ID())
|
||||
removalErr = fmt.Errorf("error removing pod %s cgroup: %w", p.ID(), err)
|
||||
} else {
|
||||
logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
|
||||
}
|
||||
@ -390,7 +390,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
|
||||
// keep going so we make sure to evict the pod before
|
||||
// ending up with an inconsistent state.
|
||||
if removalErr == nil {
|
||||
removalErr = errors.Wrapf(define.ErrInternal, "unrecognized cgroup manager %s when removing pod %s cgroups", p.runtime.config.Engine.CgroupManager, p.ID())
|
||||
removalErr = fmt.Errorf("unrecognized cgroup manager %s when removing pod %s cgroups: %w", p.runtime.config.Engine.CgroupManager, p.ID(), define.ErrInternal)
|
||||
} else {
|
||||
logrus.Errorf("Unknown cgroups manager %s specified - cannot remove pod %s cgroup", p.runtime.config.Engine.CgroupManager, p.ID())
|
||||
}
|
||||
@ -416,7 +416,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
|
||||
// Deallocate the pod lock
|
||||
if err := p.lock.Free(); err != nil {
|
||||
if removalErr == nil {
|
||||
removalErr = errors.Wrapf(err, "error freeing pod %s lock", p.ID())
|
||||
removalErr = fmt.Errorf("error freeing pod %s lock: %w", p.ID(), err)
|
||||
} else {
|
||||
logrus.Errorf("Freeing pod %s lock: %v", p.ID(), err)
|
||||
}
|
||||
|
@ -1,8 +1,9 @@
|
||||
package libpod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/events"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// renumberLocks reassigns lock numbers for all containers and pods in the
|
||||
@ -26,7 +27,7 @@ func (r *Runtime) renumberLocks() error {
|
||||
for _, ctr := range allCtrs {
|
||||
lock, err := r.lockManager.AllocateLock()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error allocating lock for container %s", ctr.ID())
|
||||
return fmt.Errorf("error allocating lock for container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
|
||||
ctr.config.LockID = lock.ID()
|
||||
@ -43,7 +44,7 @@ func (r *Runtime) renumberLocks() error {
|
||||
for _, pod := range allPods {
|
||||
lock, err := r.lockManager.AllocateLock()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error allocating lock for pod %s", pod.ID())
|
||||
return fmt.Errorf("error allocating lock for pod %s: %w", pod.ID(), err)
|
||||
}
|
||||
|
||||
pod.config.LockID = lock.ID()
|
||||
@ -60,7 +61,7 @@ func (r *Runtime) renumberLocks() error {
|
||||
for _, vol := range allVols {
|
||||
lock, err := r.lockManager.AllocateLock()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error allocating lock for volume %s", vol.Name())
|
||||
return fmt.Errorf("error allocating lock for volume %s: %w", vol.Name(), err)
|
||||
}
|
||||
|
||||
vol.config.LockID = lock.ID()
|
||||
|
@ -2,11 +2,11 @@ package libpod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/libpod/events"
|
||||
"github.com/containers/podman/v4/pkg/domain/entities/reports"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Contains the public Runtime API for volumes
|
||||
@ -133,7 +133,7 @@ func (r *Runtime) PruneVolumes(ctx context.Context, filterFuncs []VolumeFilter)
|
||||
report.Id = vol.Name()
|
||||
var timeout *uint
|
||||
if err := r.RemoveVolume(ctx, vol, false, timeout); err != nil {
|
||||
if errors.Cause(err) != define.ErrVolumeBeingUsed && errors.Cause(err) != define.ErrVolumeRemoved {
|
||||
if !errors.Is(err, define.ErrVolumeBeingUsed) && !errors.Is(err, define.ErrVolumeRemoved) {
|
||||
report.Err = err
|
||||
} else {
|
||||
// We didn't remove the volume for some reason
|
||||
|
@ -5,6 +5,7 @@ package libpod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -17,7 +18,6 @@ import (
|
||||
"github.com/containers/storage/drivers/quota"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
pluginapi "github.com/docker/go-plugins-helpers/volume"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -36,7 +36,7 @@ func (r *Runtime) newVolume(noCreatePluginVolume bool, options ...VolumeCreateOp
|
||||
volume := newVolume(r)
|
||||
for _, option := range options {
|
||||
if err := option(volume); err != nil {
|
||||
return nil, errors.Wrapf(err, "running volume create option")
|
||||
return nil, fmt.Errorf("running volume create option: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -51,17 +51,17 @@ func (r *Runtime) newVolume(noCreatePluginVolume bool, options ...VolumeCreateOp
|
||||
// Check if volume with given name exists.
|
||||
exists, err := r.state.HasVolume(volume.config.Name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "checking if volume with name %s exists", volume.config.Name)
|
||||
return nil, fmt.Errorf("checking if volume with name %s exists: %w", volume.config.Name, err)
|
||||
}
|
||||
if exists {
|
||||
return nil, errors.Wrapf(define.ErrVolumeExists, "volume with name %s already exists", volume.config.Name)
|
||||
return nil, fmt.Errorf("volume with name %s already exists: %w", volume.config.Name, define.ErrVolumeExists)
|
||||
}
|
||||
|
||||
// Plugin can be nil if driver is local, but that's OK - superfluous
|
||||
// assignment doesn't hurt much.
|
||||
plugin, err := r.getVolumePlugin(volume.config.Driver)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "volume %s uses volume plugin %s but it could not be retrieved", volume.config.Name, volume.config.Driver)
|
||||
return nil, fmt.Errorf("volume %s uses volume plugin %s but it could not be retrieved: %w", volume.config.Name, volume.config.Driver, err)
|
||||
}
|
||||
volume.plugin = plugin
|
||||
|
||||
@ -73,13 +73,13 @@ func (r *Runtime) newVolume(noCreatePluginVolume bool, options ...VolumeCreateOp
|
||||
case "device":
|
||||
if strings.ToLower(volume.config.Options["type"]) == "bind" {
|
||||
if _, err := os.Stat(val); err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid volume option %s for driver 'local'", key)
|
||||
return nil, fmt.Errorf("invalid volume option %s for driver 'local': %w", key, err)
|
||||
}
|
||||
}
|
||||
case "o", "type", "uid", "gid", "size", "inodes", "noquota", "copy", "nocopy":
|
||||
// Do nothing, valid keys
|
||||
default:
|
||||
return nil, errors.Wrapf(define.ErrInvalidArg, "invalid mount option %s for driver 'local'", key)
|
||||
return nil, fmt.Errorf("invalid mount option %s for driver 'local': %w", key, define.ErrInvalidArg)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -99,17 +99,17 @@ func (r *Runtime) newVolume(noCreatePluginVolume bool, options ...VolumeCreateOp
|
||||
// Create the mountpoint of this volume
|
||||
volPathRoot := filepath.Join(r.config.Engine.VolumePath, volume.config.Name)
|
||||
if err := os.MkdirAll(volPathRoot, 0700); err != nil {
|
||||
return nil, errors.Wrapf(err, "creating volume directory %q", volPathRoot)
|
||||
return nil, fmt.Errorf("creating volume directory %q: %w", volPathRoot, err)
|
||||
}
|
||||
if err := os.Chown(volPathRoot, volume.config.UID, volume.config.GID); err != nil {
|
||||
return nil, errors.Wrapf(err, "chowning volume directory %q to %d:%d", volPathRoot, volume.config.UID, volume.config.GID)
|
||||
return nil, fmt.Errorf("chowning volume directory %q to %d:%d: %w", volPathRoot, volume.config.UID, volume.config.GID, err)
|
||||
}
|
||||
fullVolPath := filepath.Join(volPathRoot, "_data")
|
||||
if err := os.MkdirAll(fullVolPath, 0755); err != nil {
|
||||
return nil, errors.Wrapf(err, "creating volume directory %q", fullVolPath)
|
||||
return nil, fmt.Errorf("creating volume directory %q: %w", fullVolPath, err)
|
||||
}
|
||||
if err := os.Chown(fullVolPath, volume.config.UID, volume.config.GID); err != nil {
|
||||
return nil, errors.Wrapf(err, "chowning volume directory %q to %d:%d", fullVolPath, volume.config.UID, volume.config.GID)
|
||||
return nil, fmt.Errorf("chowning volume directory %q to %d:%d: %w", fullVolPath, volume.config.UID, volume.config.GID, err)
|
||||
}
|
||||
if err := LabelVolumePath(fullVolPath); err != nil {
|
||||
return nil, err
|
||||
@ -134,7 +134,7 @@ func (r *Runtime) newVolume(noCreatePluginVolume bool, options ...VolumeCreateOp
|
||||
}
|
||||
if projectQuotaSupported {
|
||||
if err := q.SetQuota(fullVolPath, quota); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to set size quota size=%d inodes=%d for volume directory %q", volume.config.Size, volume.config.Inodes, fullVolPath)
|
||||
return nil, fmt.Errorf("failed to set size quota size=%d inodes=%d for volume directory %q: %w", volume.config.Size, volume.config.Inodes, fullVolPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -144,7 +144,7 @@ func (r *Runtime) newVolume(noCreatePluginVolume bool, options ...VolumeCreateOp
|
||||
|
||||
lock, err := r.lockManager.AllocateLock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "allocating lock for new volume")
|
||||
return nil, fmt.Errorf("allocating lock for new volume: %w", err)
|
||||
}
|
||||
volume.lock = lock
|
||||
volume.config.LockID = volume.lock.ID()
|
||||
@ -161,7 +161,7 @@ func (r *Runtime) newVolume(noCreatePluginVolume bool, options ...VolumeCreateOp
|
||||
|
||||
// Add the volume to state
|
||||
if err := r.state.AddVolume(volume); err != nil {
|
||||
return nil, errors.Wrapf(err, "adding volume to state")
|
||||
return nil, fmt.Errorf("adding volume to state: %w", err)
|
||||
}
|
||||
defer volume.newVolumeEvent(events.Create)
|
||||
return volume, nil
|
||||
@ -272,7 +272,7 @@ func makeVolumeInPluginIfNotExist(name string, options map[string]string, plugin
|
||||
createReq.Name = name
|
||||
createReq.Options = options
|
||||
if err := plugin.CreateVolume(createReq); err != nil {
|
||||
return errors.Wrapf(err, "creating volume %q in plugin %s", name, plugin.Name)
|
||||
return fmt.Errorf("creating volume %q in plugin %s: %w", name, plugin.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -305,7 +305,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
|
||||
if len(deps) != 0 {
|
||||
depsStr := strings.Join(deps, ", ")
|
||||
if !force {
|
||||
return errors.Wrapf(define.ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
|
||||
return fmt.Errorf("volume %s is being used by the following container(s): %s: %w", v.Name(), depsStr, define.ErrVolumeBeingUsed)
|
||||
}
|
||||
|
||||
// We need to remove all containers using the volume
|
||||
@ -314,17 +314,17 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
|
||||
if err != nil {
|
||||
// If the container's removed, no point in
|
||||
// erroring.
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr || errors.Cause(err) == define.ErrCtrRemoved {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) {
|
||||
continue
|
||||
}
|
||||
|
||||
return errors.Wrapf(err, "removing container %s that depends on volume %s", dep, v.Name())
|
||||
return fmt.Errorf("removing container %s that depends on volume %s: %w", dep, v.Name(), err)
|
||||
}
|
||||
|
||||
logrus.Debugf("Removing container %s (depends on volume %q)", ctr.ID(), v.Name())
|
||||
|
||||
if err := r.removeContainer(ctx, ctr, force, false, false, timeout); err != nil {
|
||||
return errors.Wrapf(err, "removing container %s that depends on volume %s", ctr.ID(), v.Name())
|
||||
return fmt.Errorf("removing container %s that depends on volume %s: %w", ctr.ID(), v.Name(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -337,7 +337,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
|
||||
// them.
|
||||
logrus.Errorf("Unmounting volume %s: %v", v.Name(), err)
|
||||
} else {
|
||||
return errors.Wrapf(err, "unmounting volume %s", v.Name())
|
||||
return fmt.Errorf("unmounting volume %s: %w", v.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -353,7 +353,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
|
||||
// Do we have a volume driver?
|
||||
if v.plugin == nil {
|
||||
canRemove = false
|
||||
removalErr = errors.Wrapf(define.ErrMissingPlugin, "cannot remove volume %s from plugin %s, but it has been removed from Podman", v.Name(), v.Driver())
|
||||
removalErr = fmt.Errorf("cannot remove volume %s from plugin %s, but it has been removed from Podman: %w", v.Name(), v.Driver(), define.ErrMissingPlugin)
|
||||
} else {
|
||||
// Ping the plugin first to verify the volume still
|
||||
// exists.
|
||||
@ -364,14 +364,14 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
|
||||
getReq.Name = v.Name()
|
||||
if _, err := v.plugin.GetVolume(getReq); err != nil {
|
||||
canRemove = false
|
||||
removalErr = errors.Wrapf(err, "volume %s could not be retrieved from plugin %s, but it has been removed from Podman", v.Name(), v.Driver())
|
||||
removalErr = fmt.Errorf("volume %s could not be retrieved from plugin %s, but it has been removed from Podman: %w", v.Name(), v.Driver(), err)
|
||||
}
|
||||
}
|
||||
if canRemove {
|
||||
req := new(pluginapi.RemoveRequest)
|
||||
req.Name = v.Name()
|
||||
if err := v.plugin.RemoveVolume(req); err != nil {
|
||||
return errors.Wrapf(err, "volume %s could not be removed from plugin %s", v.Name(), v.Driver())
|
||||
return fmt.Errorf("volume %s could not be removed from plugin %s: %w", v.Name(), v.Driver(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -381,13 +381,13 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
|
||||
if removalErr != nil {
|
||||
logrus.Errorf("Removing volume %s from plugin %s: %v", v.Name(), v.Driver(), removalErr)
|
||||
}
|
||||
return errors.Wrapf(err, "removing volume %s", v.Name())
|
||||
return fmt.Errorf("removing volume %s: %w", v.Name(), err)
|
||||
}
|
||||
|
||||
// Free the volume's lock
|
||||
if err := v.lock.Free(); err != nil {
|
||||
if removalErr == nil {
|
||||
removalErr = errors.Wrapf(err, "freeing lock for volume %s", v.Name())
|
||||
removalErr = fmt.Errorf("freeing lock for volume %s: %w", v.Name(), err)
|
||||
} else {
|
||||
logrus.Errorf("Freeing lock for volume %q: %v", v.Name(), err)
|
||||
}
|
||||
@ -397,7 +397,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
|
||||
// from /var/lib/containers/storage/volumes
|
||||
if err := v.teardownStorage(); err != nil {
|
||||
if removalErr == nil {
|
||||
removalErr = errors.Wrapf(err, "cleaning up volume storage for %q", v.Name())
|
||||
removalErr = fmt.Errorf("cleaning up volume storage for %q: %w", v.Name(), err)
|
||||
} else {
|
||||
logrus.Errorf("Cleaning up volume storage for volume %q: %v", v.Name(), err)
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package compat
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
@ -27,7 +28,6 @@ import (
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -46,7 +46,7 @@ func RemoveContainer(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
@ -73,7 +73,7 @@ func RemoveContainer(w http.ResponseWriter, r *http.Request) {
|
||||
name := utils.GetName(r)
|
||||
reports, err := containerEngine.ContainerRm(r.Context(), []string{name}, options)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
utils.ContainerNotFound(w, name, err)
|
||||
return
|
||||
}
|
||||
@ -83,7 +83,7 @@ func RemoveContainer(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
if len(reports) > 0 && reports[0].Err != nil {
|
||||
err = reports[0].Err
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
utils.ContainerNotFound(w, name, err)
|
||||
return
|
||||
}
|
||||
@ -110,12 +110,12 @@ func ListContainers(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
filterMap, err := util.PrepareFilters(r)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to decode filter parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to decode filter parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
@ -164,7 +164,7 @@ func ListContainers(w http.ResponseWriter, r *http.Request) {
|
||||
for _, ctnr := range containers {
|
||||
api, err := LibpodToContainer(ctnr, query.Size)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
// container was removed between the initial fetch of the list and conversion
|
||||
logrus.Debugf("Container %s removed between initial fetch and conversion, ignoring in output", ctnr.ID())
|
||||
continue
|
||||
@ -187,7 +187,7 @@ func GetContainer(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
@ -215,7 +215,7 @@ func KillContainer(w http.ResponseWriter, r *http.Request) {
|
||||
Signal: "KILL",
|
||||
}
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
@ -228,12 +228,12 @@ func KillContainer(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
report, err := containerEngine.ContainerKill(r.Context(), []string{name}, options)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrCtrStateInvalid ||
|
||||
errors.Cause(err) == define.ErrCtrStopped {
|
||||
if errors.Is(err, define.ErrCtrStateInvalid) ||
|
||||
errors.Is(err, define.ErrCtrStopped) {
|
||||
utils.Error(w, http.StatusConflict, err)
|
||||
return
|
||||
}
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
utils.ContainerNotFound(w, name, err)
|
||||
return
|
||||
}
|
||||
@ -512,7 +512,7 @@ func LibpodToContainerJSON(l *libpod.Container, sz bool) (*types.ContainerJSON,
|
||||
for ep := range inspect.HostConfig.PortBindings {
|
||||
splitp := strings.SplitN(ep, "/", 2)
|
||||
if len(splitp) != 2 {
|
||||
return nil, errors.Errorf("PORT/PROTOCOL Format required for %q", ep)
|
||||
return nil, fmt.Errorf("PORT/PROTOCOL Format required for %q", ep)
|
||||
}
|
||||
exposedPort, err := nat.NewPort(splitp[1], splitp[0])
|
||||
if err != nil {
|
||||
@ -616,7 +616,7 @@ func RenameContainer(w http.ResponseWriter, r *http.Request) {
|
||||
Name string `schema:"name"`
|
||||
}{}
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
@ -627,7 +627,7 @@ func RenameContainer(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if _, err := runtime.RenameContainer(r.Context(), ctr, query.Name); err != nil {
|
||||
if errors.Cause(err) == define.ErrPodExists || errors.Cause(err) == define.ErrCtrExists {
|
||||
if errors.Is(err, define.ErrPodExists) || errors.Is(err, define.ErrCtrExists) {
|
||||
utils.Error(w, http.StatusConflict, err)
|
||||
return
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package libpod
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
@ -19,7 +20,6 @@ import (
|
||||
"github.com/containers/podman/v4/pkg/specgenutil"
|
||||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -33,11 +33,11 @@ func PodCreate(w http.ResponseWriter, r *http.Request) {
|
||||
)
|
||||
psg := specgen.PodSpecGenerator{InfraContainerSpec: &specgen.SpecGenerator{}}
|
||||
if err := json.NewDecoder(r.Body).Decode(&psg); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, failedToDecodeSpecgen))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("%v: %w", failedToDecodeSpecgen, err))
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, failedToDecodeSpecgen))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("%v: %w", failedToDecodeSpecgen, err))
|
||||
return
|
||||
}
|
||||
if !psg.NoInfra {
|
||||
@ -51,17 +51,17 @@ func PodCreate(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
err = specgenutil.FillOutSpecGen(psg.InfraContainerSpec, &infraOptions, []string{}) // necessary for default values in many cases (userns, idmappings)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error filling out specgen"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error filling out specgen: %w", err))
|
||||
return
|
||||
}
|
||||
out, err := json.Marshal(psg) // marshal our spec so the matching options can be unmarshaled into infra
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, failedToDecodeSpecgen))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("%v: %w", failedToDecodeSpecgen, err))
|
||||
return
|
||||
}
|
||||
err = json.Unmarshal(out, psg.InfraContainerSpec) // unmarhal matching options
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, failedToDecodeSpecgen))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("%v: %w", failedToDecodeSpecgen, err))
|
||||
return
|
||||
}
|
||||
// a few extra that do not have the same json tags
|
||||
@ -75,10 +75,10 @@ func PodCreate(w http.ResponseWriter, r *http.Request) {
|
||||
pod, err := generate.MakePod(&podSpecComplete, runtime)
|
||||
if err != nil {
|
||||
httpCode := http.StatusInternalServerError
|
||||
if errors.Cause(err) == define.ErrPodExists {
|
||||
if errors.Is(err, define.ErrPodExists) {
|
||||
httpCode = http.StatusConflict
|
||||
}
|
||||
utils.Error(w, httpCode, errors.Wrap(err, "failed to make pod"))
|
||||
utils.Error(w, httpCode, fmt.Errorf("failed to make pod: %w", err))
|
||||
return
|
||||
}
|
||||
utils.WriteResponse(w, http.StatusCreated, entities.IDResponse{ID: pod.ID()})
|
||||
@ -89,7 +89,7 @@ func Pods(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
filterMap, err := util.PrepareFilters(r)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
@ -139,7 +139,7 @@ func PodStop(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
name := utils.GetName(r)
|
||||
@ -164,7 +164,7 @@ func PodStop(w http.ResponseWriter, r *http.Request) {
|
||||
} else {
|
||||
responses, stopError = pod.Stop(r.Context(), false)
|
||||
}
|
||||
if stopError != nil && errors.Cause(stopError) != define.ErrPodPartialFail {
|
||||
if stopError != nil && !errors.Is(stopError, define.ErrPodPartialFail) {
|
||||
utils.Error(w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
@ -178,7 +178,7 @@ func PodStop(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
report := entities.PodStopReport{Id: pod.ID()}
|
||||
for id, err := range responses {
|
||||
report.Errs = append(report.Errs, errors.Wrapf(err, "error stopping container %s", id))
|
||||
report.Errs = append(report.Errs, fmt.Errorf("error stopping container %s: %w", id, err))
|
||||
}
|
||||
|
||||
code := http.StatusOK
|
||||
@ -207,14 +207,14 @@ func PodStart(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
responses, err := pod.Start(r.Context())
|
||||
if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
|
||||
if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
|
||||
utils.Error(w, http.StatusConflict, err)
|
||||
return
|
||||
}
|
||||
|
||||
report := entities.PodStartReport{Id: pod.ID()}
|
||||
for id, err := range responses {
|
||||
report.Errs = append(report.Errs, errors.Wrapf(err, "error starting container "+id))
|
||||
report.Errs = append(report.Errs, fmt.Errorf("%v: %w", "error starting container "+id, err))
|
||||
}
|
||||
|
||||
code := http.StatusOK
|
||||
@ -237,7 +237,7 @@ func PodDelete(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
name := utils.GetName(r)
|
||||
@ -263,14 +263,14 @@ func PodRestart(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
responses, err := pod.Restart(r.Context())
|
||||
if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
|
||||
if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
|
||||
utils.Error(w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
||||
report := entities.PodRestartReport{Id: pod.ID()}
|
||||
for id, err := range responses {
|
||||
report.Errs = append(report.Errs, errors.Wrapf(err, "error restarting container %s", id))
|
||||
report.Errs = append(report.Errs, fmt.Errorf("error restarting container %s: %w", id, err))
|
||||
}
|
||||
|
||||
code := http.StatusOK
|
||||
@ -314,14 +314,14 @@ func PodPause(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
responses, err := pod.Pause(r.Context())
|
||||
if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
|
||||
if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
|
||||
utils.Error(w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
||||
report := entities.PodPauseReport{Id: pod.ID()}
|
||||
for id, v := range responses {
|
||||
report.Errs = append(report.Errs, errors.Wrapf(v, "error pausing container %s", id))
|
||||
report.Errs = append(report.Errs, fmt.Errorf("error pausing container %s: %w", id, v))
|
||||
}
|
||||
|
||||
code := http.StatusOK
|
||||
@ -340,14 +340,14 @@ func PodUnpause(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
responses, err := pod.Unpause(r.Context())
|
||||
if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
|
||||
if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
|
||||
utils.Error(w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
||||
report := entities.PodUnpauseReport{Id: pod.ID()}
|
||||
for id, v := range responses {
|
||||
report.Errs = append(report.Errs, errors.Wrapf(v, "error unpausing container %s", id))
|
||||
report.Errs = append(report.Errs, fmt.Errorf("error unpausing container %s: %w", id, v))
|
||||
}
|
||||
|
||||
code := http.StatusOK
|
||||
@ -374,7 +374,7 @@ func PodTop(w http.ResponseWriter, r *http.Request) {
|
||||
PsArgs: psArgs,
|
||||
}
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
@ -456,7 +456,7 @@ func PodKill(w http.ResponseWriter, r *http.Request) {
|
||||
// override any golang type defaults
|
||||
}
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
if _, found := r.URL.Query()["signal"]; found {
|
||||
@ -465,7 +465,7 @@ func PodKill(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
sig, err := util.ParseSignal(signal)
|
||||
if err != nil {
|
||||
utils.InternalServerError(w, errors.Wrapf(err, "unable to parse signal value"))
|
||||
utils.InternalServerError(w, fmt.Errorf("unable to parse signal value: %w", err))
|
||||
return
|
||||
}
|
||||
name := utils.GetName(r)
|
||||
@ -488,12 +488,12 @@ func PodKill(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
if !hasRunning {
|
||||
utils.Error(w, http.StatusConflict, errors.Errorf("cannot kill a pod with no running containers: %s", pod.ID()))
|
||||
utils.Error(w, http.StatusConflict, fmt.Errorf("cannot kill a pod with no running containers: %s", pod.ID()))
|
||||
return
|
||||
}
|
||||
|
||||
responses, err := pod.Kill(r.Context(), uint(sig))
|
||||
if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
|
||||
if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
|
||||
utils.Error(w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
@ -534,7 +534,7 @@ func PodStats(w http.ResponseWriter, r *http.Request) {
|
||||
// default would go here
|
||||
}
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
@ -549,13 +549,12 @@ func PodStats(w http.ResponseWriter, r *http.Request) {
|
||||
reports, err := containerEngine.PodStats(r.Context(), query.NamesOrIDs, options)
|
||||
|
||||
// Error checks as documented in swagger.
|
||||
switch errors.Cause(err) {
|
||||
case define.ErrNoSuchPod:
|
||||
utils.Error(w, http.StatusNotFound, err)
|
||||
return
|
||||
case nil:
|
||||
// Nothing to do.
|
||||
default:
|
||||
if err != nil {
|
||||
if errors.Is(err, define.ErrNoSuchPod) {
|
||||
utils.Error(w, http.StatusNotFound, err)
|
||||
return
|
||||
}
|
||||
|
||||
utils.InternalServerError(w, err)
|
||||
return
|
||||
}
|
||||
|
@ -2,9 +2,12 @@ package libpod
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/containers/podman/v4/libpod"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/api/handlers/utils"
|
||||
@ -16,7 +19,6 @@ import (
|
||||
"github.com/containers/podman/v4/pkg/domain/infra/abi/parse"
|
||||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func CreateVolume(w http.ResponseWriter, r *http.Request) {
|
||||
@ -30,14 +32,14 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError,
|
||||
errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
input := entities.VolumeCreateOptions{}
|
||||
// decode params from body
|
||||
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
@ -108,7 +110,7 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) {
|
||||
filterMap, err := util.PrepareFilters(r)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError,
|
||||
errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
@ -181,7 +183,7 @@ func RemoveVolume(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError,
|
||||
errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
name := utils.GetName(r)
|
||||
@ -191,7 +193,7 @@ func RemoveVolume(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
if err := runtime.RemoveVolume(r.Context(), vol, query.Force, query.Timeout); err != nil {
|
||||
if errors.Cause(err) == define.ErrVolumeBeingUsed {
|
||||
if errors.Is(err, define.ErrVolumeBeingUsed) {
|
||||
utils.Error(w, http.StatusConflict, err)
|
||||
return
|
||||
}
|
||||
|
@ -1,17 +1,18 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/errorhandling"
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrLinkNotSupport = errors.New("Link is not supported")
|
||||
ErrLinkNotSupport = errors.New("link is not supported")
|
||||
)
|
||||
|
||||
// TODO: document the exported functions in this file and make them more
|
||||
@ -25,7 +26,7 @@ func Error(w http.ResponseWriter, code int, err error) {
|
||||
// Log detailed message of what happened to machine running podman service
|
||||
log.Infof("Request Failed(%s): %s", http.StatusText(code), err.Error())
|
||||
em := errorhandling.ErrorModel{
|
||||
Because: (errors.Cause(err)).Error(),
|
||||
Because: errorhandling.Cause(err).Error(),
|
||||
Message: err.Error(),
|
||||
ResponseCode: code,
|
||||
}
|
||||
@ -33,51 +34,50 @@ func Error(w http.ResponseWriter, code int, err error) {
|
||||
}
|
||||
|
||||
func VolumeNotFound(w http.ResponseWriter, name string, err error) {
|
||||
if errors.Cause(err) != define.ErrNoSuchVolume {
|
||||
if !errors.Is(err, define.ErrNoSuchVolume) {
|
||||
InternalServerError(w, err)
|
||||
}
|
||||
Error(w, http.StatusNotFound, err)
|
||||
}
|
||||
|
||||
func ContainerNotFound(w http.ResponseWriter, name string, err error) {
|
||||
switch errors.Cause(err) {
|
||||
case define.ErrNoSuchCtr, define.ErrCtrExists:
|
||||
if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrExists) {
|
||||
Error(w, http.StatusNotFound, err)
|
||||
default:
|
||||
} else {
|
||||
InternalServerError(w, err)
|
||||
}
|
||||
}
|
||||
|
||||
func ImageNotFound(w http.ResponseWriter, name string, err error) {
|
||||
if errors.Cause(err) != storage.ErrImageUnknown {
|
||||
if !errors.Is(err, storage.ErrImageUnknown) {
|
||||
InternalServerError(w, err)
|
||||
}
|
||||
Error(w, http.StatusNotFound, err)
|
||||
}
|
||||
|
||||
func NetworkNotFound(w http.ResponseWriter, name string, err error) {
|
||||
if errors.Cause(err) != define.ErrNoSuchNetwork {
|
||||
if !errors.Is(err, define.ErrNoSuchNetwork) {
|
||||
InternalServerError(w, err)
|
||||
}
|
||||
Error(w, http.StatusNotFound, err)
|
||||
}
|
||||
|
||||
func PodNotFound(w http.ResponseWriter, name string, err error) {
|
||||
if errors.Cause(err) != define.ErrNoSuchPod {
|
||||
if !errors.Is(err, define.ErrNoSuchPod) {
|
||||
InternalServerError(w, err)
|
||||
}
|
||||
Error(w, http.StatusNotFound, err)
|
||||
}
|
||||
|
||||
func SessionNotFound(w http.ResponseWriter, name string, err error) {
|
||||
if errors.Cause(err) != define.ErrNoSuchExecSession {
|
||||
if !errors.Is(err, define.ErrNoSuchExecSession) {
|
||||
InternalServerError(w, err)
|
||||
}
|
||||
Error(w, http.StatusNotFound, err)
|
||||
}
|
||||
|
||||
func SecretNotFound(w http.ResponseWriter, nameOrID string, err error) {
|
||||
if errors.Cause(err).Error() != "no such secret" {
|
||||
if errorhandling.Cause(err).Error() != "no such secret" {
|
||||
InternalServerError(w, err)
|
||||
}
|
||||
Error(w, http.StatusNotFound, err)
|
||||
@ -92,7 +92,7 @@ func InternalServerError(w http.ResponseWriter, err error) {
|
||||
}
|
||||
|
||||
func BadRequest(w http.ResponseWriter, key string, value string, err error) {
|
||||
e := errors.Wrapf(err, "failed to parse query parameter '%s': %q", key, value)
|
||||
e := fmt.Errorf("failed to parse query parameter '%s': %q: %w", key, value, err)
|
||||
Error(w, http.StatusBadRequest, e)
|
||||
}
|
||||
|
||||
|
@ -231,7 +231,7 @@ func Modify(ctx context.Context, name string, images []string, options *ModifyOp
|
||||
err = errorhandling.JoinErrors(report.Errors)
|
||||
if err != nil {
|
||||
errModel := errorhandling.ErrorModel{
|
||||
Because: (errors.Cause(err)).Error(),
|
||||
Because: errorhandling.Cause(err).Error(),
|
||||
Message: err.Error(),
|
||||
ResponseCode: response.StatusCode,
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package abi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -32,7 +33,6 @@ import (
|
||||
"github.com/containers/podman/v4/pkg/specgenutil"
|
||||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -80,7 +80,7 @@ func getContainersByContext(all, latest bool, names []string, runtime *libpod.Ru
|
||||
func (ic *ContainerEngine) ContainerExists(ctx context.Context, nameOrID string, options entities.ContainerExistsOptions) (*entities.BoolReport, error) {
|
||||
_, err := ic.Libpod.LookupContainer(nameOrID)
|
||||
if err != nil {
|
||||
if errors.Cause(err) != define.ErrNoSuchCtr {
|
||||
if !errors.Is(err, define.ErrNoSuchCtr) {
|
||||
return nil, err
|
||||
}
|
||||
if options.External {
|
||||
@ -120,7 +120,7 @@ func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []stri
|
||||
report := make([]*entities.PauseUnpauseReport, 0, len(ctrs))
|
||||
for _, c := range ctrs {
|
||||
err := c.Pause()
|
||||
if err != nil && options.All && errors.Cause(err) == define.ErrCtrStateInvalid {
|
||||
if err != nil && options.All && errors.Is(err, define.ErrCtrStateInvalid) {
|
||||
logrus.Debugf("Container %s is not running", c.ID())
|
||||
continue
|
||||
}
|
||||
@ -137,7 +137,7 @@ func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []st
|
||||
report := make([]*entities.PauseUnpauseReport, 0, len(ctrs))
|
||||
for _, c := range ctrs {
|
||||
err := c.Unpause()
|
||||
if err != nil && options.All && errors.Cause(err) == define.ErrCtrStateInvalid {
|
||||
if err != nil && options.All && errors.Is(err, define.ErrCtrStateInvalid) {
|
||||
logrus.Debugf("Container %s is not paused", c.ID())
|
||||
continue
|
||||
}
|
||||
@ -148,7 +148,7 @@ func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []st
|
||||
func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []string, options entities.StopOptions) ([]*entities.StopReport, error) {
|
||||
names := namesOrIds
|
||||
ctrs, rawInputs, err := getContainersAndInputByContext(options.All, options.Latest, names, ic.Libpod)
|
||||
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
|
||||
if err != nil && !(options.Ignore && errors.Is(err, define.ErrNoSuchCtr)) {
|
||||
return nil, err
|
||||
}
|
||||
ctrMap := map[string]string{}
|
||||
@ -166,13 +166,13 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin
|
||||
}
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Cause(err) == define.ErrCtrStopped:
|
||||
case errors.Is(err, define.ErrCtrStopped):
|
||||
logrus.Debugf("Container %s is already stopped", c.ID())
|
||||
case options.All && errors.Cause(err) == define.ErrCtrStateInvalid:
|
||||
case options.All && errors.Is(err, define.ErrCtrStateInvalid):
|
||||
logrus.Debugf("Container %s is not running, could not stop", c.ID())
|
||||
// container never created in OCI runtime
|
||||
// docker parity: do nothing just return container id
|
||||
case errors.Cause(err) == define.ErrCtrStateInvalid:
|
||||
case errors.Is(err, define.ErrCtrStateInvalid):
|
||||
logrus.Debugf("Container %s is either not created on runtime or is in a invalid state", c.ID())
|
||||
default:
|
||||
return err
|
||||
@ -238,7 +238,7 @@ func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []strin
|
||||
reports := make([]*entities.KillReport, 0, len(ctrs))
|
||||
for _, con := range ctrs {
|
||||
err := con.Kill(uint(sig))
|
||||
if options.All && errors.Cause(err) == define.ErrCtrStateInvalid {
|
||||
if options.All && errors.Is(err, define.ErrCtrStateInvalid) {
|
||||
logrus.Debugf("Container %s is not running", con.ID())
|
||||
continue
|
||||
}
|
||||
@ -289,8 +289,7 @@ func (ic *ContainerEngine) removeContainer(ctx context.Context, ctr *libpod.Cont
|
||||
return nil
|
||||
}
|
||||
logrus.Debugf("Failed to remove container %s: %s", ctr.ID(), err.Error())
|
||||
switch errors.Cause(err) {
|
||||
case define.ErrNoSuchCtr:
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
// Ignore if the container does not exist (anymore) when either
|
||||
// it has been requested by the user of if the container is a
|
||||
// service one. Service containers are removed along with its
|
||||
@ -301,7 +300,7 @@ func (ic *ContainerEngine) removeContainer(ctx context.Context, ctr *libpod.Cont
|
||||
logrus.Debugf("Ignoring error (--allow-missing): %v", err)
|
||||
return nil
|
||||
}
|
||||
case define.ErrCtrRemoved:
|
||||
} else if errors.Is(err, define.ErrCtrRemoved) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
@ -317,15 +316,15 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
|
||||
for _, ctr := range names {
|
||||
report := reports.RmReport{Id: ctr}
|
||||
report.Err = ic.Libpod.RemoveStorageContainer(ctr, options.Force)
|
||||
switch errors.Cause(report.Err) {
|
||||
case nil:
|
||||
//nolint:gocritic
|
||||
if report.Err == nil {
|
||||
// remove container names that we successfully deleted
|
||||
rmReports = append(rmReports, &report)
|
||||
case define.ErrNoSuchCtr, define.ErrCtrExists:
|
||||
} else if errors.Is(report.Err, define.ErrNoSuchCtr) || errors.Is(report.Err, define.ErrCtrExists) {
|
||||
// There is still a potential this is a libpod container
|
||||
tmpNames = append(tmpNames, ctr)
|
||||
default:
|
||||
if _, err := ic.Libpod.LookupContainer(ctr); errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
} else {
|
||||
if _, err := ic.Libpod.LookupContainer(ctr); errors.Is(err, define.ErrNoSuchCtr) {
|
||||
// remove container failed, but not a libpod container
|
||||
rmReports = append(rmReports, &report)
|
||||
continue
|
||||
@ -337,7 +336,7 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
|
||||
names = tmpNames
|
||||
|
||||
ctrs, err := getContainersByContext(options.All, options.Latest, names, ic.Libpod)
|
||||
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
|
||||
if err != nil && !(options.Ignore && errors.Is(err, define.ErrNoSuchCtr)) {
|
||||
// Failed to get containers. If force is specified, get the containers ID
|
||||
// and evict them
|
||||
if !options.Force {
|
||||
@ -349,7 +348,7 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
|
||||
report := reports.RmReport{Id: ctr}
|
||||
_, err := ic.Libpod.EvictContainer(ctx, ctr, options.Volumes)
|
||||
if err != nil {
|
||||
if options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
if options.Ignore && errors.Is(err, define.ErrNoSuchCtr) {
|
||||
logrus.Debugf("Ignoring error (--allow-missing): %v", err)
|
||||
rmReports = append(rmReports, &report)
|
||||
continue
|
||||
@ -426,7 +425,7 @@ func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []st
|
||||
ctr, err := ic.Libpod.GetLatestContainer()
|
||||
if err != nil {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
return nil, []error{errors.Wrapf(err, "no containers to inspect")}, nil
|
||||
return nil, []error{fmt.Errorf("no containers to inspect: %w", err)}, nil
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -452,7 +451,7 @@ func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []st
|
||||
// ErrNoSuchCtr is non-fatal, other errors will be
|
||||
// treated as fatal.
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
errs = append(errs, errors.Errorf("no such container %s", name))
|
||||
errs = append(errs, fmt.Errorf("no such container %s", name))
|
||||
continue
|
||||
}
|
||||
return nil, nil, err
|
||||
@ -463,7 +462,7 @@ func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []st
|
||||
// ErrNoSuchCtr is non-fatal, other errors will be
|
||||
// treated as fatal.
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
errs = append(errs, errors.Errorf("no such container %s", name))
|
||||
errs = append(errs, fmt.Errorf("no such container %s", name))
|
||||
continue
|
||||
}
|
||||
return nil, nil, err
|
||||
@ -487,7 +486,7 @@ func (ic *ContainerEngine) ContainerTop(ctx context.Context, options entities.To
|
||||
container, err = ic.Libpod.LookupContainer(options.NameOrID)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to look up requested container")
|
||||
return nil, fmt.Errorf("unable to look up requested container: %w", err)
|
||||
}
|
||||
|
||||
// Run Top.
|
||||
@ -512,12 +511,12 @@ func (ic *ContainerEngine) ContainerCommit(ctx context.Context, nameOrID string,
|
||||
case "oci":
|
||||
mimeType = buildah.OCIv1ImageManifest
|
||||
if len(options.Message) > 0 {
|
||||
return nil, errors.Errorf("messages are only compatible with the docker image format (-f docker)")
|
||||
return nil, fmt.Errorf("messages are only compatible with the docker image format (-f docker)")
|
||||
}
|
||||
case "docker":
|
||||
mimeType = manifest.DockerV2Schema2MediaType
|
||||
default:
|
||||
return nil, errors.Errorf("unrecognized image format %q", options.Format)
|
||||
return nil, fmt.Errorf("unrecognized image format %q", options.Format)
|
||||
}
|
||||
|
||||
sc := ic.Libpod.SystemContext()
|
||||
@ -660,7 +659,7 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st
|
||||
// CRImportCheckpoint is expected to import exactly one container from checkpoint image
|
||||
checkpointImageImportErrors = append(
|
||||
checkpointImageImportErrors,
|
||||
errors.Errorf("unable to import checkpoint from image: %q: %v", nameOrID, err),
|
||||
fmt.Errorf("unable to import checkpoint from image: %q: %v", nameOrID, err),
|
||||
)
|
||||
} else {
|
||||
containers = append(containers, importedContainers[0])
|
||||
@ -720,16 +719,16 @@ func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrID string,
|
||||
ctr := ctrs[0]
|
||||
conState, err := ctr.State()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to determine state of %s", ctr.ID())
|
||||
return fmt.Errorf("unable to determine state of %s: %w", ctr.ID(), err)
|
||||
}
|
||||
if conState != define.ContainerStateRunning {
|
||||
return errors.Errorf("you can only attach to running containers")
|
||||
return fmt.Errorf("you can only attach to running containers")
|
||||
}
|
||||
|
||||
// If the container is in a pod, also set to recursively start dependencies
|
||||
err = terminal.StartAttachCtr(ctx, ctr, options.Stdout, options.Stderr, options.Stdin, options.DetachKeys, options.SigProxy, false)
|
||||
if err != nil && errors.Cause(err) != define.ErrDetach {
|
||||
return errors.Wrapf(err, "error attaching to container %s", ctr.ID())
|
||||
if err != nil && !errors.Is(err, define.ErrDetach) {
|
||||
return fmt.Errorf("error attaching to container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
os.Stdout.WriteString("\n")
|
||||
return nil
|
||||
@ -751,12 +750,12 @@ func makeExecConfig(options entities.ExecOptions, rt *libpod.Runtime) (*libpod.E
|
||||
storageConfig := rt.StorageConfig()
|
||||
runtimeConfig, err := rt.GetConfig()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error retrieving Libpod configuration to build exec exit command")
|
||||
return nil, fmt.Errorf("error retrieving Libpod configuration to build exec exit command: %w", err)
|
||||
}
|
||||
// TODO: Add some ability to toggle syslog
|
||||
exitCommandArgs, err := specgenutil.CreateExitCommandArgs(storageConfig, runtimeConfig, logrus.IsLevelEnabled(logrus.DebugLevel), false, true)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error constructing exit command for exec session")
|
||||
return nil, fmt.Errorf("error constructing exit command for exec session: %w", err)
|
||||
}
|
||||
execConfig.ExitCommand = exitCommandArgs
|
||||
|
||||
@ -774,7 +773,7 @@ func checkExecPreserveFDs(options entities.ExecOptions) error {
|
||||
for _, e := range entries {
|
||||
i, err := strconv.Atoi(e.Name())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot parse %s in /proc/self/fd", e.Name())
|
||||
return fmt.Errorf("cannot parse %s in /proc/self/fd: %w", e.Name(), err)
|
||||
}
|
||||
m[i] = true
|
||||
}
|
||||
@ -891,7 +890,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
|
||||
|
||||
if options.Attach {
|
||||
err = terminal.StartAttachCtr(ctx, ctr, options.Stdout, options.Stderr, options.Stdin, options.DetachKeys, options.SigProxy, !ctrRunning)
|
||||
if errors.Cause(err) == define.ErrDetach {
|
||||
if errors.Is(err, define.ErrDetach) {
|
||||
// User manually detached
|
||||
// Exit cleanly immediately
|
||||
reports = append(reports, &entities.ContainerStartReport{
|
||||
@ -903,7 +902,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
|
||||
return reports, nil
|
||||
}
|
||||
|
||||
if errors.Cause(err) == define.ErrWillDeadlock {
|
||||
if errors.Is(err, define.ErrWillDeadlock) {
|
||||
logrus.Debugf("Deadlock error: %v", err)
|
||||
reports = append(reports, &entities.ContainerStartReport{
|
||||
Id: ctr.ID(),
|
||||
@ -911,7 +910,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
|
||||
Err: err,
|
||||
ExitCode: define.ExitCode(err),
|
||||
})
|
||||
return reports, errors.Errorf("attempting to start container %s would cause a deadlock; please run 'podman system renumber' to resolve", ctr.ID())
|
||||
return reports, fmt.Errorf("attempting to start container %s would cause a deadlock; please run 'podman system renumber' to resolve", ctr.ID())
|
||||
}
|
||||
|
||||
if ctrRunning {
|
||||
@ -936,7 +935,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
|
||||
logrus.Errorf("Removing container %s: %v", ctr.ID(), err)
|
||||
}
|
||||
}
|
||||
return reports, errors.Wrapf(err, "unable to start container %s", ctr.ID())
|
||||
return reports, fmt.Errorf("unable to start container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
|
||||
exitCode = ic.GetContainerExitCode(ctx, ctr)
|
||||
@ -960,12 +959,12 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
|
||||
}
|
||||
if err := ctr.Start(ctx, true); err != nil {
|
||||
report.Err = err
|
||||
if errors.Cause(err) == define.ErrWillDeadlock {
|
||||
report.Err = errors.Wrapf(err, "please run 'podman system renumber' to resolve deadlocks")
|
||||
if errors.Is(err, define.ErrWillDeadlock) {
|
||||
report.Err = fmt.Errorf("please run 'podman system renumber' to resolve deadlocks: %w", err)
|
||||
reports = append(reports, report)
|
||||
continue
|
||||
}
|
||||
report.Err = errors.Wrapf(err, "unable to start container %q", ctr.ID())
|
||||
report.Err = fmt.Errorf("unable to start container %q: %w", ctr.ID(), err)
|
||||
reports = append(reports, report)
|
||||
if ctr.AutoRemove() {
|
||||
if err := ic.removeContainer(ctx, ctr, entities.RmOptions{}); err != nil {
|
||||
@ -1001,7 +1000,7 @@ func (ic *ContainerEngine) Diff(ctx context.Context, namesOrIDs []string, opts e
|
||||
if opts.Latest {
|
||||
ctnr, err := ic.Libpod.GetLatestContainer()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to get latest container")
|
||||
return nil, fmt.Errorf("unable to get latest container: %w", err)
|
||||
}
|
||||
base = ctnr.ID()
|
||||
}
|
||||
@ -1064,7 +1063,7 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
|
||||
// We've manually detached from the container
|
||||
// Do not perform cleanup, or wait for container exit code
|
||||
// Just exit immediately
|
||||
if errors.Cause(err) == define.ErrDetach {
|
||||
if errors.Is(err, define.ErrDetach) {
|
||||
report.ExitCode = 0
|
||||
return &report, nil
|
||||
}
|
||||
@ -1074,10 +1073,10 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
|
||||
logrus.Debugf("unable to remove container %s after failing to start and attach to it", ctr.ID())
|
||||
}
|
||||
}
|
||||
if errors.Cause(err) == define.ErrWillDeadlock {
|
||||
if errors.Is(err, define.ErrWillDeadlock) {
|
||||
logrus.Debugf("Deadlock error on %q: %v", ctr.ID(), err)
|
||||
report.ExitCode = define.ExitCode(err)
|
||||
return &report, errors.Errorf("attempting to start container %s would cause a deadlock; please run 'podman system renumber' to resolve", ctr.ID())
|
||||
return &report, fmt.Errorf("attempting to start container %s would cause a deadlock; please run 'podman system renumber' to resolve", ctr.ID())
|
||||
}
|
||||
report.ExitCode = define.ExitCode(err)
|
||||
return &report, err
|
||||
@ -1086,8 +1085,8 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
|
||||
if opts.Rm && !ctr.ShouldRestart(ctx) {
|
||||
var timeout *uint
|
||||
if err := ic.Libpod.RemoveContainer(ctx, ctr, false, true, timeout); err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr ||
|
||||
errors.Cause(err) == define.ErrCtrRemoved {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) ||
|
||||
errors.Is(err, define.ErrCtrRemoved) {
|
||||
logrus.Infof("Container %s was already removed, skipping --rm", ctr.ID())
|
||||
} else {
|
||||
logrus.Errorf("Removing container %s: %v", ctr.ID(), err)
|
||||
@ -1180,12 +1179,12 @@ func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []st
|
||||
var timeout *uint
|
||||
err = ic.Libpod.RemoveContainer(ctx, ctr, false, true, timeout)
|
||||
if err != nil {
|
||||
report.RmErr = errors.Wrapf(err, "failed to clean up and remove container %v", ctr.ID())
|
||||
report.RmErr = fmt.Errorf("failed to clean up and remove container %v: %w", ctr.ID(), err)
|
||||
}
|
||||
} else {
|
||||
err := ctr.Cleanup(ctx)
|
||||
if err != nil {
|
||||
report.CleanErr = errors.Wrapf(err, "failed to clean up container %v", ctr.ID())
|
||||
report.CleanErr = fmt.Errorf("failed to clean up container %v: %w", ctr.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1212,7 +1211,7 @@ func (ic *ContainerEngine) ContainerInit(ctx context.Context, namesOrIds []strin
|
||||
err := ctr.Init(ctx, ctr.PodID() != "")
|
||||
|
||||
// If we're initializing all containers, ignore invalid state errors
|
||||
if options.All && errors.Cause(err) == define.ErrCtrStateInvalid {
|
||||
if options.All && errors.Is(err, define.ErrCtrStateInvalid) {
|
||||
err = nil
|
||||
}
|
||||
report.Err = err
|
||||
@ -1323,7 +1322,7 @@ func (ic *ContainerEngine) ContainerUnmount(ctx context.Context, nameOrIDs []str
|
||||
if mounted {
|
||||
report := entities.ContainerUnmountReport{Id: sctr.ID}
|
||||
if _, report.Err = ic.Libpod.UnmountStorageContainer(sctr.ID, options.Force); report.Err != nil {
|
||||
if errors.Cause(report.Err) != define.ErrCtrExists {
|
||||
if !errors.Is(report.Err, define.ErrCtrExists) {
|
||||
reports = append(reports, &report)
|
||||
}
|
||||
} else {
|
||||
@ -1357,11 +1356,11 @@ func (ic *ContainerEngine) ContainerUnmount(ctx context.Context, nameOrIDs []str
|
||||
|
||||
report := entities.ContainerUnmountReport{Id: ctr.ID()}
|
||||
if err := ctr.Unmount(options.Force); err != nil {
|
||||
if options.All && errors.Cause(err) == storage.ErrLayerNotMounted {
|
||||
if options.All && errors.Is(err, storage.ErrLayerNotMounted) {
|
||||
logrus.Debugf("Error umounting container %s, storage.ErrLayerNotMounted", ctr.ID())
|
||||
continue
|
||||
}
|
||||
report.Err = errors.Wrapf(err, "error unmounting container %s", ctr.ID())
|
||||
report.Err = fmt.Errorf("error unmounting container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
reports = append(reports, &report)
|
||||
}
|
||||
@ -1410,7 +1409,7 @@ func (ic *ContainerEngine) Shutdown(_ context.Context) {
|
||||
|
||||
func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []string, options entities.ContainerStatsOptions) (statsChan chan entities.ContainerStatsReport, err error) {
|
||||
if options.Interval < 1 {
|
||||
return nil, errors.New("Invalid interval, must be a positive number greater zero")
|
||||
return nil, errors.New("invalid interval, must be a positive number greater zero")
|
||||
}
|
||||
if rootless.IsRootless() {
|
||||
unified, err := cgroups.IsCgroup2UnifiedMode()
|
||||
@ -1465,19 +1464,18 @@ func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []stri
|
||||
computeStats := func() ([]define.ContainerStats, error) {
|
||||
containers, err = containerFunc()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to get list of containers")
|
||||
return nil, fmt.Errorf("unable to get list of containers: %w", err)
|
||||
}
|
||||
|
||||
reportStats := []define.ContainerStats{}
|
||||
for _, ctr := range containers {
|
||||
stats, err := ctr.GetContainerStats(containerStats[ctr.ID()])
|
||||
if err != nil {
|
||||
cause := errors.Cause(err)
|
||||
if queryAll && (cause == define.ErrCtrRemoved || cause == define.ErrNoSuchCtr || cause == define.ErrCtrStateInvalid) {
|
||||
if queryAll && (errors.Is(err, define.ErrCtrRemoved) || errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrStateInvalid)) {
|
||||
continue
|
||||
}
|
||||
if cause == cgroups.ErrCgroupV1Rootless {
|
||||
err = cause
|
||||
if errors.Is(err, cgroups.ErrCgroupV1Rootless) {
|
||||
err = cgroups.ErrCgroupV1Rootless
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1,11 +1,11 @@
|
||||
package errorhandling
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -121,3 +121,22 @@ func (e PodConflictErrorModel) Error() string {
|
||||
func (e PodConflictErrorModel) Code() int {
|
||||
return 409
|
||||
}
|
||||
|
||||
// Cause returns the most underlying error for the provided one. There is a
|
||||
// maximum error depth of 100 to avoid endless loops. An additional error log
|
||||
// message will be created if this maximum has reached.
|
||||
func Cause(err error) (cause error) {
|
||||
cause = err
|
||||
|
||||
const maxDepth = 100
|
||||
for i := 0; i <= maxDepth; i++ {
|
||||
res := errors.Unwrap(cause)
|
||||
if res == nil {
|
||||
return cause
|
||||
}
|
||||
cause = res
|
||||
}
|
||||
|
||||
logrus.Errorf("Max error depth of %d reached, cannot unwrap until root cause: %v", maxDepth, err)
|
||||
return cause
|
||||
}
|
||||
|
53
pkg/errorhandling/errorhandling_test.go
Normal file
53
pkg/errorhandling/errorhandling_test.go
Normal file
@ -0,0 +1,53 @@
|
||||
package errorhandling
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCause(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
err func() error
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
name: "nil error",
|
||||
err: func() error { return nil },
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "equal errors",
|
||||
err: func() error { return errors.New("foo") },
|
||||
expectedErr: errors.New("foo"),
|
||||
},
|
||||
{
|
||||
name: "wrapped error",
|
||||
err: func() error { return fmt.Errorf("baz: %w", fmt.Errorf("bar: %w", errors.New("foo"))) },
|
||||
expectedErr: errors.New("foo"),
|
||||
},
|
||||
{
|
||||
name: "max depth reached",
|
||||
err: func() error {
|
||||
err := errors.New("error")
|
||||
for i := 0; i <= 101; i++ {
|
||||
err = fmt.Errorf("%d: %w", i, err)
|
||||
}
|
||||
return err
|
||||
},
|
||||
expectedErr: fmt.Errorf("0: %w", errors.New("error")),
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := Cause(tc.err())
|
||||
assert.Equal(t, tc.expectedErr, err)
|
||||
})
|
||||
}
|
||||
}
|
@ -16,7 +16,6 @@ import (
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/godbus/dbus/v5"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -114,7 +113,7 @@ func UntarToFileSystem(dest string, tarball *os.File, options *archive.TarOption
|
||||
func CreateTarFromSrc(source string, dest string) error {
|
||||
file, err := os.Create(dest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Could not create tarball file '%s'", dest)
|
||||
return fmt.Errorf("could not create tarball file '%s': %w", dest, err)
|
||||
}
|
||||
defer file.Close()
|
||||
return TarToFilesystem(source, file)
|
||||
@ -154,7 +153,7 @@ func RemoveScientificNotationFromFloat(x float64) (float64, error) {
|
||||
}
|
||||
result, err := strconv.ParseFloat(bigNum, 64)
|
||||
if err != nil {
|
||||
return x, errors.Wrapf(err, "unable to remove scientific number from calculations")
|
||||
return x, fmt.Errorf("unable to remove scientific number from calculations: %w", err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@ -181,11 +180,11 @@ func moveProcessPIDFileToScope(pidPath, slice, scope string) error {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "cannot read pid file %s", pidPath)
|
||||
return fmt.Errorf("cannot read pid file %s: %w", pidPath, err)
|
||||
}
|
||||
pid, err := strconv.ParseUint(string(data), 10, 0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot parse pid file %s", pidPath)
|
||||
return fmt.Errorf("cannot parse pid file %s: %w", pidPath, err)
|
||||
}
|
||||
|
||||
return moveProcessToScope(int(pid), slice, scope)
|
||||
|
Reference in New Issue
Block a user