mirror of
https://github.com/containers/podman.git
synced 2025-08-06 19:44:14 +08:00
Vendor in latest containers/(common,image,storage)
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
5
vendor/github.com/containers/common/libnetwork/netavark/config.go
generated
vendored
5
vendor/github.com/containers/common/libnetwork/netavark/config.go
generated
vendored
@ -116,6 +116,11 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
|
||||
}
|
||||
// rust only support "true" or "false" while go can parse 1 and 0 as well so we need to change it
|
||||
newNetwork.Options[types.IsolateOption] = strconv.FormatBool(val)
|
||||
case types.MetricOption:
|
||||
_, err := strconv.ParseUint(value, 10, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported bridge network option %s", key)
|
||||
}
|
||||
|
1
vendor/github.com/containers/common/libnetwork/types/const.go
generated
vendored
1
vendor/github.com/containers/common/libnetwork/types/const.go
generated
vendored
@ -40,6 +40,7 @@ const (
|
||||
MTUOption = "mtu"
|
||||
ModeOption = "mode"
|
||||
IsolateOption = "isolate"
|
||||
MetricOption = "metric"
|
||||
)
|
||||
|
||||
type NetworkBackend string
|
||||
|
13
vendor/github.com/containers/common/pkg/config/config.go
generated
vendored
13
vendor/github.com/containers/common/pkg/config/config.go
generated
vendored
@ -358,6 +358,9 @@ type EngineConfig struct {
|
||||
// OCIRuntimes are the set of configured OCI runtimes (default is runc).
|
||||
OCIRuntimes map[string][]string `toml:"runtimes,omitempty"`
|
||||
|
||||
// PlatformToOCIRuntime requests specific OCI runtime for a specified platform of image.
|
||||
PlatformToOCIRuntime map[string]string `toml:"platform_to_oci_runtime,omitempty"`
|
||||
|
||||
// PodExitPolicy determines the behaviour when the last container of a pod exits.
|
||||
PodExitPolicy PodExitPolicy `toml:"pod_exit_policy,omitempty"`
|
||||
|
||||
@ -619,6 +622,16 @@ type Destination struct {
|
||||
IsMachine bool `toml:"is_machine,omitempty"`
|
||||
}
|
||||
|
||||
// Consumes container image's os and arch and returns if any dedicated runtime was
|
||||
// configured otherwise returns default runtime.
|
||||
func (c *EngineConfig) ImagePlatformToRuntime(os string, arch string) string {
|
||||
platformString := os + "/" + arch
|
||||
if val, ok := c.PlatformToOCIRuntime[platformString]; ok {
|
||||
return val
|
||||
}
|
||||
return c.OCIRuntime
|
||||
}
|
||||
|
||||
// NewConfig creates a new Config. It starts with an empty config and, if
|
||||
// specified, merges the config at `userConfigPath` path. Depending if we're
|
||||
// running as root or rootless, we then merge the system configuration followed
|
||||
|
4
vendor/github.com/containers/common/pkg/config/config_darwin.go
generated
vendored
4
vendor/github.com/containers/common/pkg/config/config_darwin.go
generated
vendored
@ -10,6 +10,10 @@ const (
|
||||
|
||||
// DefaultContainersConfig holds the default containers config path
|
||||
DefaultContainersConfig = "/usr/share/" + _configPath
|
||||
|
||||
// DefaultSignaturePolicyPath is the default value for the
|
||||
// policy.json file.
|
||||
DefaultSignaturePolicyPath = "/etc/containers/policy.json"
|
||||
)
|
||||
|
||||
// podman remote clients on darwin cannot use unshare.isRootless() to determine the configuration file locations.
|
||||
|
4
vendor/github.com/containers/common/pkg/config/config_freebsd.go
generated
vendored
4
vendor/github.com/containers/common/pkg/config/config_freebsd.go
generated
vendored
@ -10,6 +10,10 @@ const (
|
||||
|
||||
// DefaultContainersConfig holds the default containers config path
|
||||
DefaultContainersConfig = "/usr/local/share/" + _configPath
|
||||
|
||||
// DefaultSignaturePolicyPath is the default value for the
|
||||
// policy.json file.
|
||||
DefaultSignaturePolicyPath = "/usr/local/etc/containers/policy.json"
|
||||
)
|
||||
|
||||
// podman remote clients on freebsd cannot use unshare.isRootless() to determine the configuration file locations.
|
||||
|
4
vendor/github.com/containers/common/pkg/config/config_linux.go
generated
vendored
4
vendor/github.com/containers/common/pkg/config/config_linux.go
generated
vendored
@ -13,6 +13,10 @@ const (
|
||||
|
||||
// DefaultContainersConfig holds the default containers config path
|
||||
DefaultContainersConfig = "/usr/share/" + _configPath
|
||||
|
||||
// DefaultSignaturePolicyPath is the default value for the
|
||||
// policy.json file.
|
||||
DefaultSignaturePolicyPath = "/etc/containers/policy.json"
|
||||
)
|
||||
|
||||
func selinuxEnabled() bool {
|
||||
|
4
vendor/github.com/containers/common/pkg/config/config_windows.go
generated
vendored
4
vendor/github.com/containers/common/pkg/config/config_windows.go
generated
vendored
@ -8,6 +8,10 @@ const (
|
||||
|
||||
// DefaultContainersConfig holds the default containers config path
|
||||
DefaultContainersConfig = "/usr/share/" + _configPath
|
||||
|
||||
// DefaultSignaturePolicyPath is the default value for the
|
||||
// policy.json file.
|
||||
DefaultSignaturePolicyPath = "/etc/containers/policy.json"
|
||||
)
|
||||
|
||||
// podman remote clients on windows cannot use unshare.isRootless() to determine the configuration file locations.
|
||||
|
5
vendor/github.com/containers/common/pkg/config/containers.conf
generated
vendored
5
vendor/github.com/containers/common/pkg/config/containers.conf
generated
vendored
@ -263,6 +263,11 @@ default_sysctls = [
|
||||
# If it is empty or commented out, no volumes will be added
|
||||
#
|
||||
#volumes = []
|
||||
#
|
||||
#[engine.platform_to_oci_runtime]
|
||||
#"wasi/wasm" = ["crun-wasm"]
|
||||
#"wasi/wasm32" = ["crun-wasm"]
|
||||
#"wasi/wasm64" = ["crun-wasm"]
|
||||
|
||||
[secrets]
|
||||
#driver = "file"
|
||||
|
17
vendor/github.com/containers/common/pkg/config/default.go
generated
vendored
17
vendor/github.com/containers/common/pkg/config/default.go
generated
vendored
@ -149,9 +149,6 @@ const (
|
||||
DefaultPidsLimit = 2048
|
||||
// DefaultPullPolicy pulls the image if it does not exist locally.
|
||||
DefaultPullPolicy = "missing"
|
||||
// DefaultSignaturePolicyPath is the default value for the
|
||||
// policy.json file.
|
||||
DefaultSignaturePolicyPath = "/etc/containers/policy.json"
|
||||
// DefaultSubnet is the subnet that will be used for the default
|
||||
// network.
|
||||
DefaultSubnet = "10.88.0.0/16"
|
||||
@ -332,6 +329,15 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
|
||||
"/bin/crun",
|
||||
"/run/current-system/sw/bin/crun",
|
||||
},
|
||||
"crun-wasm": {
|
||||
"/usr/bin/crun-wasm",
|
||||
"/usr/sbin/crun-wasm",
|
||||
"/usr/local/bin/crun-wasm",
|
||||
"/usr/local/sbin/crun-wasm",
|
||||
"/sbin/crun-wasm",
|
||||
"/bin/crun-wasm",
|
||||
"/run/current-system/sw/bin/crun-wasm",
|
||||
},
|
||||
"runc": {
|
||||
"/usr/bin/runc",
|
||||
"/usr/sbin/runc",
|
||||
@ -378,6 +384,11 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
|
||||
"/usr/local/bin/ocijail",
|
||||
},
|
||||
}
|
||||
c.PlatformToOCIRuntime = map[string]string{
|
||||
"wasi/wasm": "crun-wasm",
|
||||
"wasi/wasm32": "crun-wasm",
|
||||
"wasi/wasm64": "crun-wasm",
|
||||
}
|
||||
// Needs to be called after populating c.OCIRuntimes.
|
||||
c.OCIRuntime = c.findRuntime()
|
||||
|
||||
|
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@ -1 +1 @@
|
||||
1.43.1-dev
|
||||
1.44.1-dev
|
||||
|
75
vendor/github.com/containers/storage/containers.go
generated
vendored
75
vendor/github.com/containers/storage/containers.go
generated
vendored
@ -191,7 +191,7 @@ func (r *containerStore) startWritingWithReload(canReload bool) error {
|
||||
}()
|
||||
|
||||
if canReload {
|
||||
if err := r.reloadIfChanged(true); err != nil {
|
||||
if _, err := r.reloadIfChanged(true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -215,18 +215,41 @@ func (r *containerStore) stopWriting() {
|
||||
// If this succeeds, the caller MUST call stopReading().
|
||||
func (r *containerStore) startReading() error {
|
||||
r.lockfile.RLock()
|
||||
succeeded := false
|
||||
unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
r.lockfile.Unlock()
|
||||
if unlockFn != nil {
|
||||
unlockFn()
|
||||
}
|
||||
}()
|
||||
|
||||
if err := r.reloadIfChanged(false); err != nil {
|
||||
return err
|
||||
if tryLockedForWriting, err := r.reloadIfChanged(false); err != nil {
|
||||
if !tryLockedForWriting {
|
||||
return err
|
||||
}
|
||||
unlockFn()
|
||||
unlockFn = nil
|
||||
|
||||
r.lockfile.Lock()
|
||||
unlockFn = r.lockfile.Unlock
|
||||
if _, err := r.load(true); err != nil {
|
||||
return err
|
||||
}
|
||||
unlockFn()
|
||||
unlockFn = nil
|
||||
|
||||
r.lockfile.RLock()
|
||||
unlockFn = r.lockfile.Unlock
|
||||
// We need to check for a reload reload once more because the on-disk state could have been modified
|
||||
// after we released the lock.
|
||||
// If that, _again_, finds inconsistent state, just give up.
|
||||
// We could, plausibly, retry a few times, but that inconsistent state (duplicate container names)
|
||||
// shouldn’t be saved (by correct implementations) in the first place.
|
||||
if _, err := r.reloadIfChanged(false); err != nil {
|
||||
return fmt.Errorf("(even after successfully cleaning up once:) %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
unlockFn = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -239,15 +262,23 @@ func (r *containerStore) stopReading() {
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *containerStore) reloadIfChanged(lockedForWriting bool) error {
|
||||
//
|
||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||
// load() with lockedForWriting could succeed. In that case the caller MUST
|
||||
// call load(), not reloadIfChanged() (because the “if changed” state will not
|
||||
// be detected again).
|
||||
func (r *containerStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
|
||||
r.loadMut.Lock()
|
||||
defer r.loadMut.Unlock()
|
||||
|
||||
modified, err := r.lockfile.Modified()
|
||||
if err == nil && modified {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if modified {
|
||||
return r.load(lockedForWriting)
|
||||
}
|
||||
return err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *containerStore) Containers() ([]Container, error) {
|
||||
@ -274,24 +305,27 @@ func (r *containerStore) datapath(id, key string) string {
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *containerStore) load(lockedForWriting bool) error {
|
||||
needSave := false
|
||||
//
|
||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||
// retrying with lockedForWriting could succeed.
|
||||
func (r *containerStore) load(lockedForWriting bool) (bool, error) {
|
||||
rpath := r.containerspath()
|
||||
data, err := os.ReadFile(rpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
containers := []*Container{}
|
||||
if len(data) != 0 {
|
||||
if err := json.Unmarshal(data, &containers); err != nil {
|
||||
return fmt.Errorf("loading %q: %w", rpath, err)
|
||||
return false, fmt.Errorf("loading %q: %w", rpath, err)
|
||||
}
|
||||
}
|
||||
idlist := make([]string, 0, len(containers))
|
||||
layers := make(map[string]*Container)
|
||||
ids := make(map[string]*Container)
|
||||
names := make(map[string]*Container)
|
||||
var errorToResolveBySaving error // == nil
|
||||
for n, container := range containers {
|
||||
idlist = append(idlist, container.ID)
|
||||
ids[container.ID] = containers[n]
|
||||
@ -299,7 +333,7 @@ func (r *containerStore) load(lockedForWriting bool) error {
|
||||
for _, name := range container.Names {
|
||||
if conflict, ok := names[name]; ok {
|
||||
r.removeName(conflict, name)
|
||||
needSave = true
|
||||
errorToResolveBySaving = errors.New("container store is inconsistent and the current caller does not hold a write lock")
|
||||
}
|
||||
names[name] = containers[n]
|
||||
}
|
||||
@ -310,14 +344,13 @@ func (r *containerStore) load(lockedForWriting bool) error {
|
||||
r.byid = ids
|
||||
r.bylayer = layers
|
||||
r.byname = names
|
||||
if needSave {
|
||||
if errorToResolveBySaving != nil {
|
||||
if !lockedForWriting {
|
||||
// Eventually, the callers should be modified to retry with a write lock, instead.
|
||||
return errors.New("container store is inconsistent and the current caller does not hold a write lock")
|
||||
return true, errorToResolveBySaving
|
||||
}
|
||||
return r.Save()
|
||||
return false, r.Save()
|
||||
}
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Save saves the contents of the store to disk. It should be called with
|
||||
@ -358,7 +391,7 @@ func newContainerStore(dir string) (rwContainerStore, error) {
|
||||
return nil, err
|
||||
}
|
||||
defer cstore.stopWriting()
|
||||
if err := cstore.load(true); err != nil {
|
||||
if _, err := cstore.load(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cstore, nil
|
||||
|
56
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
56
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
@ -51,6 +51,7 @@ struct fsxattr {
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
@ -78,6 +79,7 @@ type Control struct {
|
||||
backingFsBlockDev string
|
||||
nextProjectID uint32
|
||||
quotas map[string]uint32
|
||||
basePath string
|
||||
}
|
||||
|
||||
// Attempt to generate a unigue projectid. Multiple directories
|
||||
@ -158,20 +160,22 @@ func NewControl(basePath string) (*Control, error) {
|
||||
Size: 0,
|
||||
Inodes: 0,
|
||||
}
|
||||
if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
q := Control{
|
||||
backingFsBlockDev: backingFsBlockDev,
|
||||
nextProjectID: minProjectID + 1,
|
||||
quotas: make(map[string]uint32),
|
||||
basePath: basePath,
|
||||
}
|
||||
|
||||
if err := q.setProjectQuota(minProjectID, quota); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//
|
||||
// get first project id to be used for next container
|
||||
//
|
||||
err = q.findNextProjectID(basePath)
|
||||
err = q.findNextProjectID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -204,11 +208,11 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error {
|
||||
// set the quota limit for the container's project id
|
||||
//
|
||||
logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID)
|
||||
return setProjectQuota(q.backingFsBlockDev, projectID, quota)
|
||||
return q.setProjectQuota(projectID, quota)
|
||||
}
|
||||
|
||||
// setProjectQuota - set the quota for project id on xfs block device
|
||||
func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error {
|
||||
func (q *Control) setProjectQuota(projectID uint32, quota Quota) error {
|
||||
var d C.fs_disk_quota_t
|
||||
d.d_version = C.FS_DQUOT_VERSION
|
||||
d.d_id = C.__u32(projectID)
|
||||
@ -225,15 +229,35 @@ func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) er
|
||||
d.d_ino_softlimit = d.d_ino_hardlimit
|
||||
}
|
||||
|
||||
var cs = C.CString(backingFsBlockDev)
|
||||
var cs = C.CString(q.backingFsBlockDev)
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
|
||||
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
|
||||
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
|
||||
uintptr(unsafe.Pointer(&d)), 0, 0)
|
||||
if errno != 0 {
|
||||
runQuotactl := func() syscall.Errno {
|
||||
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
|
||||
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
|
||||
uintptr(unsafe.Pointer(&d)), 0, 0)
|
||||
return errno
|
||||
}
|
||||
|
||||
errno := runQuotactl()
|
||||
|
||||
// If the backingFsBlockDev does not exist any more then try to recreate it.
|
||||
if errors.Is(errno, unix.ENOENT) {
|
||||
if _, err := makeBackingFsDev(q.basePath); err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed to recreate missing backingFsBlockDev %s for projid %d: %w",
|
||||
q.backingFsBlockDev, projectID, err,
|
||||
)
|
||||
}
|
||||
|
||||
if errno := runQuotactl(); errno != 0 {
|
||||
return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w",
|
||||
projectID, q.backingFsBlockDev, errno)
|
||||
}
|
||||
|
||||
} else if errno != 0 {
|
||||
return fmt.Errorf("failed to set quota limit for projid %d on %s: %w",
|
||||
projectID, backingFsBlockDev, errno)
|
||||
projectID, q.backingFsBlockDev, errno)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -332,16 +356,16 @@ func setProjectID(targetPath string, projectID uint32) error {
|
||||
|
||||
// findNextProjectID - find the next project id to be used for containers
|
||||
// by scanning driver home directory to find used project ids
|
||||
func (q *Control) findNextProjectID(home string) error {
|
||||
files, err := os.ReadDir(home)
|
||||
func (q *Control) findNextProjectID() error {
|
||||
files, err := os.ReadDir(q.basePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read directory failed : %s", home)
|
||||
return fmt.Errorf("read directory failed : %s", q.basePath)
|
||||
}
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
continue
|
||||
}
|
||||
path := filepath.Join(home, file.Name())
|
||||
path := filepath.Join(q.basePath, file.Name())
|
||||
projid, err := getProjectID(path)
|
||||
if err != nil {
|
||||
return err
|
||||
|
86
vendor/github.com/containers/storage/images.go
generated
vendored
86
vendor/github.com/containers/storage/images.go
generated
vendored
@ -208,7 +208,7 @@ func (r *imageStore) startWritingWithReload(canReload bool) error {
|
||||
}()
|
||||
|
||||
if canReload {
|
||||
if err := r.reloadIfChanged(true); err != nil {
|
||||
if _, err := r.reloadIfChanged(true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -235,20 +235,43 @@ func (r *imageStore) stopWriting() {
|
||||
// should use startReading() instead.
|
||||
func (r *imageStore) startReadingWithReload(canReload bool) error {
|
||||
r.lockfile.RLock()
|
||||
succeeded := false
|
||||
unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
r.lockfile.Unlock()
|
||||
if unlockFn != nil {
|
||||
unlockFn()
|
||||
}
|
||||
}()
|
||||
|
||||
if canReload {
|
||||
if err := r.reloadIfChanged(false); err != nil {
|
||||
return err
|
||||
if tryLockedForWriting, err := r.reloadIfChanged(false); err != nil {
|
||||
if !tryLockedForWriting {
|
||||
return err
|
||||
}
|
||||
unlockFn()
|
||||
unlockFn = nil
|
||||
|
||||
r.lockfile.Lock()
|
||||
unlockFn = r.lockfile.Unlock
|
||||
if _, err := r.load(true); err != nil {
|
||||
return err
|
||||
}
|
||||
unlockFn()
|
||||
unlockFn = nil
|
||||
|
||||
r.lockfile.RLock()
|
||||
unlockFn = r.lockfile.Unlock
|
||||
// We need to check for a reload reload once more because the on-disk state could have been modified
|
||||
// after we released the lock.
|
||||
// If that, _again_, finds inconsistent state, just give up.
|
||||
// We could, plausibly, retry a few times, but that inconsistent state (duplicate image names)
|
||||
// shouldn’t be saved (by correct implementations) in the first place.
|
||||
if _, err := r.reloadIfChanged(false); err != nil {
|
||||
return fmt.Errorf("(even after successfully cleaning up once:) %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
unlockFn = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -267,15 +290,23 @@ func (r *imageStore) stopReading() {
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *imageStore) reloadIfChanged(lockedForWriting bool) error {
|
||||
//
|
||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||
// retrying with lockedForWriting could succeed. In that case the caller MUST
|
||||
// call load(), not reloadIfChanged() (because the “if changed” state will not
|
||||
// be detected again).
|
||||
func (r *imageStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
|
||||
r.loadMut.Lock()
|
||||
defer r.loadMut.Unlock()
|
||||
|
||||
modified, err := r.lockfile.Modified()
|
||||
if err == nil && modified {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if modified {
|
||||
return r.load(lockedForWriting)
|
||||
}
|
||||
return err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *imageStore) Images() ([]Image, error) {
|
||||
@ -342,36 +373,39 @@ func (i *Image) recomputeDigests() error {
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *imageStore) load(lockedForWriting bool) error {
|
||||
shouldSave := false
|
||||
//
|
||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||
// retrying with lockedForWriting could succeed.
|
||||
func (r *imageStore) load(lockedForWriting bool) (bool, error) {
|
||||
rpath := r.imagespath()
|
||||
data, err := os.ReadFile(rpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
images := []*Image{}
|
||||
if len(data) != 0 {
|
||||
if err := json.Unmarshal(data, &images); err != nil {
|
||||
return fmt.Errorf("loading %q: %w", rpath, err)
|
||||
return false, fmt.Errorf("loading %q: %w", rpath, err)
|
||||
}
|
||||
}
|
||||
idlist := make([]string, 0, len(images))
|
||||
ids := make(map[string]*Image)
|
||||
names := make(map[string]*Image)
|
||||
digests := make(map[digest.Digest][]*Image)
|
||||
var errorToResolveBySaving error // == nil
|
||||
for n, image := range images {
|
||||
ids[image.ID] = images[n]
|
||||
idlist = append(idlist, image.ID)
|
||||
for _, name := range image.Names {
|
||||
if conflict, ok := names[name]; ok {
|
||||
r.removeName(conflict, name)
|
||||
shouldSave = true
|
||||
errorToResolveBySaving = ErrDuplicateImageNames
|
||||
}
|
||||
}
|
||||
// Compute the digest list.
|
||||
if err := image.recomputeDigests(); err != nil {
|
||||
return fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err)
|
||||
return false, fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err)
|
||||
}
|
||||
for _, name := range image.Names {
|
||||
names[name] = image
|
||||
@ -383,19 +417,23 @@ func (r *imageStore) load(lockedForWriting bool) error {
|
||||
image.ReadOnly = !r.lockfile.IsReadWrite()
|
||||
}
|
||||
|
||||
if shouldSave && (!r.lockfile.IsReadWrite() || !lockedForWriting) {
|
||||
// Eventually, the callers should be modified to retry with a write lock if IsReadWrite && !lockedForWriting, instead.
|
||||
return ErrDuplicateImageNames
|
||||
if errorToResolveBySaving != nil {
|
||||
if !r.lockfile.IsReadWrite() {
|
||||
return false, errorToResolveBySaving
|
||||
}
|
||||
if !lockedForWriting {
|
||||
return true, errorToResolveBySaving
|
||||
}
|
||||
}
|
||||
r.images = images
|
||||
r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store.
|
||||
r.byid = ids
|
||||
r.byname = names
|
||||
r.bydigest = digests
|
||||
if shouldSave {
|
||||
return r.Save()
|
||||
if errorToResolveBySaving != nil {
|
||||
return false, r.Save()
|
||||
}
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Save saves the contents of the store to disk. It should be called with
|
||||
@ -439,7 +477,7 @@ func newImageStore(dir string) (rwImageStore, error) {
|
||||
return nil, err
|
||||
}
|
||||
defer istore.stopWriting()
|
||||
if err := istore.load(true); err != nil {
|
||||
if _, err := istore.load(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &istore, nil
|
||||
@ -462,7 +500,7 @@ func newROImageStore(dir string) (roImageStore, error) {
|
||||
return nil, err
|
||||
}
|
||||
defer istore.stopReading()
|
||||
if err := istore.load(false); err != nil {
|
||||
if _, err := istore.load(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &istore, nil
|
||||
|
142
vendor/github.com/containers/storage/layers.go
generated
vendored
142
vendor/github.com/containers/storage/layers.go
generated
vendored
@ -36,6 +36,10 @@ import (
|
||||
const (
|
||||
tarSplitSuffix = ".tar-split.gz"
|
||||
incompleteFlag = "incomplete"
|
||||
// maxLayerStoreCleanupIterations is the number of times we try to clean up inconsistent layer store state
|
||||
// in readers (which, for implementation reasons, gives other writers the opportunity to create more inconsistent state)
|
||||
// until we just give up.
|
||||
maxLayerStoreCleanupIterations = 3
|
||||
)
|
||||
|
||||
// A Layer is a record of a copy-on-write layer that's stored by the lower
|
||||
@ -331,7 +335,7 @@ func (r *layerStore) startWritingWithReload(canReload bool) error {
|
||||
}()
|
||||
|
||||
if canReload {
|
||||
if err := r.reloadIfChanged(true); err != nil {
|
||||
if _, err := r.reloadIfChanged(true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -358,20 +362,46 @@ func (r *layerStore) stopWriting() {
|
||||
// should use startReading() instead.
|
||||
func (r *layerStore) startReadingWithReload(canReload bool) error {
|
||||
r.lockfile.RLock()
|
||||
succeeded := false
|
||||
unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
r.lockfile.Unlock()
|
||||
if unlockFn != nil {
|
||||
unlockFn()
|
||||
}
|
||||
}()
|
||||
|
||||
if canReload {
|
||||
if err := r.reloadIfChanged(false); err != nil {
|
||||
return err
|
||||
cleanupsDone := 0
|
||||
for {
|
||||
tryLockedForWriting, err := r.reloadIfChanged(false)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if !tryLockedForWriting {
|
||||
return err
|
||||
}
|
||||
if cleanupsDone >= maxLayerStoreCleanupIterations {
|
||||
return fmt.Errorf("(even after %d cleanup attempts:) %w", cleanupsDone, err)
|
||||
}
|
||||
unlockFn()
|
||||
unlockFn = nil
|
||||
|
||||
r.lockfile.Lock()
|
||||
unlockFn = r.lockfile.Unlock
|
||||
if _, err := r.load(true); err != nil {
|
||||
return err
|
||||
}
|
||||
unlockFn()
|
||||
unlockFn = nil
|
||||
|
||||
r.lockfile.RLock()
|
||||
unlockFn = r.lockfile.Unlock
|
||||
// We need to check for a reload reload again because the on-disk state could have been modified
|
||||
// after we released the lock.
|
||||
cleanupsDone++
|
||||
}
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
unlockFn = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -424,15 +454,23 @@ func (r *layerStore) Modified() (bool, error) {
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *layerStore) reloadIfChanged(lockedForWriting bool) error {
|
||||
//
|
||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||
// retrying with lockedForWriting could succeed. In that case the caller MUST
|
||||
// call load(), not reloadIfChanged() (because the “if changed” state will not
|
||||
// be detected again).
|
||||
func (r *layerStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
|
||||
r.loadMut.Lock()
|
||||
defer r.loadMut.Unlock()
|
||||
|
||||
modified, err := r.Modified()
|
||||
if err == nil && modified {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if modified {
|
||||
return r.load(lockedForWriting)
|
||||
}
|
||||
return err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *layerStore) Layers() ([]Layer, error) {
|
||||
@ -455,26 +493,28 @@ func (r *layerStore) layerspath() string {
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *layerStore) load(lockedForWriting bool) error {
|
||||
shouldSave := false
|
||||
//
|
||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||
// retrying with lockedForWriting could succeed.
|
||||
func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
||||
rpath := r.layerspath()
|
||||
info, err := os.Stat(rpath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
r.layerspathModified = info.ModTime()
|
||||
}
|
||||
data, err := os.ReadFile(rpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
layers := []*Layer{}
|
||||
if len(data) != 0 {
|
||||
if err := json.Unmarshal(data, &layers); err != nil {
|
||||
return fmt.Errorf("loading %q: %w", rpath, err)
|
||||
return false, fmt.Errorf("loading %q: %w", rpath, err)
|
||||
}
|
||||
}
|
||||
idlist := make([]string, 0, len(layers))
|
||||
@ -482,6 +522,7 @@ func (r *layerStore) load(lockedForWriting bool) error {
|
||||
names := make(map[string]*Layer)
|
||||
compressedsums := make(map[digest.Digest][]string)
|
||||
uncompressedsums := make(map[digest.Digest][]string)
|
||||
var errorToResolveBySaving error // == nil; if there are multiple errors, this is one of them.
|
||||
if r.lockfile.IsReadWrite() {
|
||||
selinux.ClearLabels()
|
||||
}
|
||||
@ -491,7 +532,7 @@ func (r *layerStore) load(lockedForWriting bool) error {
|
||||
for _, name := range layer.Names {
|
||||
if conflict, ok := names[name]; ok {
|
||||
r.removeName(conflict, name)
|
||||
shouldSave = true
|
||||
errorToResolveBySaving = ErrDuplicateLayerNames
|
||||
}
|
||||
names[name] = layers[n]
|
||||
}
|
||||
@ -505,11 +546,22 @@ func (r *layerStore) load(lockedForWriting bool) error {
|
||||
selinux.ReserveLabel(layer.MountLabel)
|
||||
}
|
||||
layer.ReadOnly = !r.lockfile.IsReadWrite()
|
||||
// The r.lockfile.IsReadWrite() condition maintains past practice:
|
||||
// Incomplete layers in a read-only store are not treated as a reason to refuse to use other layers from that store
|
||||
// (OTOH creating child layers on top would probably lead to problems?).
|
||||
// We do remove incomplete layers in read-write stores so that we don’t build on top of them.
|
||||
if layerHasIncompleteFlag(layer) && r.lockfile.IsReadWrite() {
|
||||
errorToResolveBySaving = errors.New("an incomplete layer exists and can't be cleaned up")
|
||||
}
|
||||
}
|
||||
|
||||
if shouldSave && (!r.lockfile.IsReadWrite() || !lockedForWriting) {
|
||||
// Eventually, the callers should be modified to retry with a write lock if IsReadWrite && !lockedForWriting, instead.
|
||||
return ErrDuplicateLayerNames
|
||||
if errorToResolveBySaving != nil {
|
||||
if !r.lockfile.IsReadWrite() {
|
||||
return false, errorToResolveBySaving
|
||||
}
|
||||
if !lockedForWriting {
|
||||
return true, errorToResolveBySaving
|
||||
}
|
||||
}
|
||||
r.layers = layers
|
||||
r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store.
|
||||
@ -523,42 +575,42 @@ func (r *layerStore) load(lockedForWriting bool) error {
|
||||
r.mountsLockfile.RLock()
|
||||
defer r.mountsLockfile.Unlock()
|
||||
if err := r.loadMounts(); err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// Last step: as we’re writable, try to remove anything that a previous
|
||||
if errorToResolveBySaving != nil {
|
||||
if !r.lockfile.IsReadWrite() {
|
||||
return false, fmt.Errorf("internal error: layerStore.load has shouldSave but !r.lockfile.IsReadWrite")
|
||||
}
|
||||
// Last step: try to remove anything that a previous
|
||||
// user of this storage area marked for deletion but didn't manage to
|
||||
// actually delete.
|
||||
var incompleteDeletionErrors error // = nil
|
||||
if lockedForWriting {
|
||||
for _, layer := range r.layers {
|
||||
if layer.Flags == nil {
|
||||
layer.Flags = make(map[string]interface{})
|
||||
}
|
||||
if layerHasIncompleteFlag(layer) {
|
||||
logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
|
||||
err = r.deleteInternal(layer.ID)
|
||||
if err != nil {
|
||||
// Don't return the error immediately, because deleteInternal does not saveLayers();
|
||||
// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
|
||||
// deleted incomplete layers have their metadata correctly removed.
|
||||
incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
|
||||
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
|
||||
}
|
||||
shouldSave = true
|
||||
for _, layer := range r.layers {
|
||||
if layer.Flags == nil {
|
||||
layer.Flags = make(map[string]interface{})
|
||||
}
|
||||
if layerHasIncompleteFlag(layer) {
|
||||
logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
|
||||
err = r.deleteInternal(layer.ID)
|
||||
if err != nil {
|
||||
// Don't return the error immediately, because deleteInternal does not saveLayers();
|
||||
// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
|
||||
// deleted incomplete layers have their metadata correctly removed.
|
||||
incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
|
||||
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
if shouldSave {
|
||||
if err := r.saveLayers(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := r.saveLayers(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if incompleteDeletionErrors != nil {
|
||||
return incompleteDeletionErrors
|
||||
return false, incompleteDeletionErrors
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *layerStore) loadMounts() error {
|
||||
@ -689,7 +741,7 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri
|
||||
return nil, err
|
||||
}
|
||||
defer rlstore.stopWriting()
|
||||
if err := rlstore.load(true); err != nil {
|
||||
if _, err := rlstore.load(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &rlstore, nil
|
||||
@ -714,7 +766,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL
|
||||
return nil, err
|
||||
}
|
||||
defer rlstore.stopReading()
|
||||
if err := rlstore.load(false); err != nil {
|
||||
if _, err := rlstore.load(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &rlstore, nil
|
||||
|
17
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go
generated
vendored
Normal file
17
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
package kernel
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// Utsname represents the system name structure.
|
||||
// It is passthrough for unix.Utsname in order to make it portable with
|
||||
// other platforms where it is not available.
|
||||
type Utsname unix.Utsname
|
||||
|
||||
func uname() (*unix.Utsname, error) {
|
||||
uts := &unix.Utsname{}
|
||||
|
||||
if err := unix.Uname(uts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return uts, nil
|
||||
}
|
9
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go
generated
vendored
9
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go
generated
vendored
@ -1,13 +1,14 @@
|
||||
//go:build freebsd || openbsd
|
||||
// +build freebsd openbsd
|
||||
//go:build openbsd
|
||||
// +build openbsd
|
||||
|
||||
package kernel
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// A stub called by kernel_unix.go .
|
||||
func uname() (*Utsname, error) {
|
||||
return nil, errors.New("Kernel version detection is available only on linux")
|
||||
return nil, fmt.Errorf("Kernel version detection is not available on %s", runtime.GOOS)
|
||||
}
|
||||
|
4
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
//go:build !linux && !solaris
|
||||
// +build !linux,!solaris
|
||||
//go:build !linux && !solaris && !freebsd
|
||||
// +build !linux,!solaris,!freebsd
|
||||
|
||||
package kernel
|
||||
|
||||
|
12
vendor/github.com/onsi/gomega/CHANGELOG.md
generated
vendored
12
vendor/github.com/onsi/gomega/CHANGELOG.md
generated
vendored
@ -1,3 +1,15 @@
|
||||
## 1.24.1
|
||||
|
||||
### Fixes
|
||||
- maintain backward compatibility for Eventually and Consisntetly's signatures [4c7df5e]
|
||||
- fix small typo (#601) [ea0ebe6]
|
||||
|
||||
### Maintenance
|
||||
- Bump golang.org/x/net from 0.1.0 to 0.2.0 (#603) [1ba8372]
|
||||
- Bump github.com/onsi/ginkgo/v2 from 2.4.0 to 2.5.0 (#602) [f9426cb]
|
||||
- fix label-filter in test.yml [d795db6]
|
||||
- stop running flakey tests and rely on external network dependencies in CI [7133290]
|
||||
|
||||
## 1.24.0
|
||||
|
||||
### Features
|
||||
|
5
vendor/github.com/onsi/gomega/gexec/session.go
generated
vendored
5
vendor/github.com/onsi/gomega/gexec/session.go
generated
vendored
@ -121,7 +121,6 @@ To assert that the command has exited it is more convenient to use the Exit matc
|
||||
|
||||
When the process exits because it has received a particular signal, the exit code will be 128+signal-value
|
||||
(See http://www.tldp.org/LDP/abs/html/exitcodes.html and http://man7.org/linux/man-pages/man7/signal.7.html)
|
||||
|
||||
*/
|
||||
func (s *Session) ExitCode() int {
|
||||
s.lock.Lock()
|
||||
@ -142,9 +141,7 @@ will wait for the command to exit then return the entirety of Out's contents.
|
||||
Wait uses eventually under the hood and accepts the same timeout/polling intervals that eventually does.
|
||||
*/
|
||||
func (s *Session) Wait(timeout ...interface{}) *Session {
|
||||
args := []any{s}
|
||||
args = append(args, timeout...)
|
||||
EventuallyWithOffset(1, args...).Should(Exit())
|
||||
EventuallyWithOffset(1, s, timeout...).Should(Exit())
|
||||
return s
|
||||
}
|
||||
|
||||
|
18
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
18
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
const GOMEGA_VERSION = "1.24.0"
|
||||
const GOMEGA_VERSION = "1.24.1"
|
||||
|
||||
const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
|
||||
If you're using Ginkgo then you probably forgot to put your assertion in an It().
|
||||
@ -368,9 +368,9 @@ is equivalent to
|
||||
|
||||
Eventually(...).WithTimeout(time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...)
|
||||
*/
|
||||
func Eventually(args ...interface{}) AsyncAssertion {
|
||||
func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
|
||||
ensureDefaultGomegaIsConfigured()
|
||||
return Default.Eventually(args...)
|
||||
return Default.Eventually(actualOrCtx, args...)
|
||||
}
|
||||
|
||||
// EventuallyWithOffset operates like Eventually but takes an additional
|
||||
@ -382,9 +382,9 @@ func Eventually(args ...interface{}) AsyncAssertion {
|
||||
// `EventuallyWithOffset` specifying a timeout interval (and an optional polling interval) are
|
||||
// the same as `Eventually(...).WithOffset(...).WithTimeout` or
|
||||
// `Eventually(...).WithOffset(...).WithTimeout(...).WithPolling`.
|
||||
func EventuallyWithOffset(offset int, args ...interface{}) AsyncAssertion {
|
||||
func EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
|
||||
ensureDefaultGomegaIsConfigured()
|
||||
return Default.EventuallyWithOffset(offset, args...)
|
||||
return Default.EventuallyWithOffset(offset, actualOrCtx, args...)
|
||||
}
|
||||
|
||||
/*
|
||||
@ -402,9 +402,9 @@ Consistently is useful in cases where you want to assert that something *does no
|
||||
|
||||
This will block for 200 milliseconds and repeatedly check the channel and ensure nothing has been received.
|
||||
*/
|
||||
func Consistently(args ...interface{}) AsyncAssertion {
|
||||
func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
|
||||
ensureDefaultGomegaIsConfigured()
|
||||
return Default.Consistently(args...)
|
||||
return Default.Consistently(actualOrCtx, args...)
|
||||
}
|
||||
|
||||
// ConsistentlyWithOffset operates like Consistently but takes an additional
|
||||
@ -413,9 +413,9 @@ func Consistently(args ...interface{}) AsyncAssertion {
|
||||
//
|
||||
// `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and
|
||||
// optional `WithTimeout` and `WithPolling`.
|
||||
func ConsistentlyWithOffset(offset int, args ...interface{}) AsyncAssertion {
|
||||
func ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
|
||||
ensureDefaultGomegaIsConfigured()
|
||||
return Default.ConsistentlyWithOffset(offset, args...)
|
||||
return Default.ConsistentlyWithOffset(offset, actualOrCtx, args...)
|
||||
}
|
||||
|
||||
/*
|
||||
|
37
vendor/github.com/onsi/gomega/internal/gomega.go
generated
vendored
37
vendor/github.com/onsi/gomega/internal/gomega.go
generated
vendored
@ -2,7 +2,6 @@ package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega/types"
|
||||
@ -53,42 +52,38 @@ func (g *Gomega) ExpectWithOffset(offset int, actual interface{}, extra ...inter
|
||||
return NewAssertion(actual, g, offset, extra...)
|
||||
}
|
||||
|
||||
func (g *Gomega) Eventually(args ...interface{}) types.AsyncAssertion {
|
||||
return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, args...)
|
||||
func (g *Gomega) Eventually(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
|
||||
return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, actualOrCtx, args...)
|
||||
}
|
||||
|
||||
func (g *Gomega) EventuallyWithOffset(offset int, args ...interface{}) types.AsyncAssertion {
|
||||
return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, args...)
|
||||
func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
|
||||
return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, actualOrCtx, args...)
|
||||
}
|
||||
|
||||
func (g *Gomega) Consistently(args ...interface{}) types.AsyncAssertion {
|
||||
return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, args...)
|
||||
func (g *Gomega) Consistently(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
|
||||
return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, actualOrCtx, args...)
|
||||
}
|
||||
|
||||
func (g *Gomega) ConsistentlyWithOffset(offset int, args ...interface{}) types.AsyncAssertion {
|
||||
return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, args...)
|
||||
func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
|
||||
return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, actualOrCtx, args...)
|
||||
}
|
||||
|
||||
func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, args ...interface{}) types.AsyncAssertion {
|
||||
func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
|
||||
baseOffset := 3
|
||||
timeoutInterval := -time.Duration(1)
|
||||
pollingInterval := -time.Duration(1)
|
||||
intervals := []interface{}{}
|
||||
var ctx context.Context
|
||||
if len(args) == 0 {
|
||||
g.Fail(fmt.Sprintf("Call to %s is missing a value or function to poll", asyncAssertionType), offset+baseOffset)
|
||||
return nil
|
||||
}
|
||||
|
||||
actual := args[0]
|
||||
startingIndex := 1
|
||||
if _, isCtx := args[0].(context.Context); isCtx && len(args) > 1 {
|
||||
actual := actualOrCtx
|
||||
startingIndex := 0
|
||||
if _, isCtx := actualOrCtx.(context.Context); isCtx && len(args) > 0 {
|
||||
// the first argument is a context, we should accept it as the context _only if_ it is **not** the only argumnent **and** the second argument is not a parseable duration
|
||||
// this is due to an unfortunate ambiguity in early version of Gomega in which multi-type durations are allowed after the actual
|
||||
if _, err := toDuration(args[1]); err != nil {
|
||||
ctx = args[0].(context.Context)
|
||||
actual = args[1]
|
||||
startingIndex = 2
|
||||
if _, err := toDuration(args[0]); err != nil {
|
||||
ctx = actualOrCtx.(context.Context)
|
||||
actual = args[0]
|
||||
startingIndex = 1
|
||||
}
|
||||
}
|
||||
|
||||
|
8
vendor/github.com/onsi/gomega/types/types.go
generated
vendored
8
vendor/github.com/onsi/gomega/types/types.go
generated
vendored
@ -19,11 +19,11 @@ type Gomega interface {
|
||||
Expect(actual interface{}, extra ...interface{}) Assertion
|
||||
ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion
|
||||
|
||||
Eventually(args ...interface{}) AsyncAssertion
|
||||
EventuallyWithOffset(offset int, args ...interface{}) AsyncAssertion
|
||||
Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion
|
||||
EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion
|
||||
|
||||
Consistently(args ...interface{}) AsyncAssertion
|
||||
ConsistentlyWithOffset(offset int, args ...interface{}) AsyncAssertion
|
||||
Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion
|
||||
ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion
|
||||
|
||||
SetDefaultEventuallyTimeout(time.Duration)
|
||||
SetDefaultEventuallyPollingInterval(time.Duration)
|
||||
|
4
vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
generated
vendored
4
vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved.
|
||||
// Copyright (c) 2018-2022, Sylabs Inc. All rights reserved.
|
||||
// Copyright (c) 2017, SingularityWare, LLC. All rights reserved.
|
||||
// Copyright (c) 2017, Yannick Cote <yhcote@gmail.com> All rights reserved.
|
||||
// This software is licensed under a 3-clause BSD license. Please consult the
|
||||
@ -104,7 +104,7 @@ func (f *FileImage) writeDescriptors() error {
|
||||
return binary.Write(f.rw, binary.LittleEndian, f.rds)
|
||||
}
|
||||
|
||||
// writeHeader writes the the global header in f to backing storage.
|
||||
// writeHeader writes the global header in f to backing storage.
|
||||
func (f *FileImage) writeHeader() error {
|
||||
if _, err := f.rw.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
|
5
vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go
generated
vendored
5
vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go
generated
vendored
@ -214,6 +214,11 @@ func (d Descriptor) SignatureMetadata() (ht crypto.Hash, fp []byte, err error) {
|
||||
}
|
||||
|
||||
fp = make([]byte, 20)
|
||||
|
||||
if bytes.Equal(s.Entity[:len(fp)], fp) {
|
||||
return ht, nil, nil // Fingerprint not present.
|
||||
}
|
||||
|
||||
copy(fp, s.Entity[:])
|
||||
|
||||
return ht, fp, nil
|
||||
|
Reference in New Issue
Block a user