mirror of
https://github.com/containers/podman.git
synced 2025-05-17 23:26:08 +08:00
Update vendor of containers/storage
Signed-off-by: Doug Rabson <dfr@rabson.org>
This commit is contained in:
2
go.mod
2
go.mod
@ -17,7 +17,7 @@ require (
|
||||
github.com/containers/image/v5 v5.23.1-0.20221101011818-2f770d6d5a0c
|
||||
github.com/containers/ocicrypt v1.1.6
|
||||
github.com/containers/psgo v1.8.0
|
||||
github.com/containers/storage v1.43.1-0.20221104122514-74e37433a2a0
|
||||
github.com/containers/storage v1.44.1-0.20221110192950-67e9778710f8
|
||||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
|
||||
github.com/cyphar/filepath-securejoin v0.2.3
|
||||
|
4
go.sum
4
go.sum
@ -281,8 +281,8 @@ github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY
|
||||
github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc=
|
||||
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
|
||||
github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
|
||||
github.com/containers/storage v1.43.1-0.20221104122514-74e37433a2a0 h1:j/+RfR57O5MEr0Irhju/5phfwoU7s3pR0Q5T1dEPeVw=
|
||||
github.com/containers/storage v1.43.1-0.20221104122514-74e37433a2a0/go.mod h1:HSfx7vUXwKPatPMqhgMw3mI3c3ijIJPZV5O0sj/mVxI=
|
||||
github.com/containers/storage v1.44.1-0.20221110192950-67e9778710f8 h1:MrQjgoKVQpD/16sfYe9C3T3y2gLvfBPADMFQ7Oq93zo=
|
||||
github.com/containers/storage v1.44.1-0.20221110192950-67e9778710f8/go.mod h1:HSfx7vUXwKPatPMqhgMw3mI3c3ijIJPZV5O0sj/mVxI=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||
|
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@ -1 +1 @@
|
||||
1.43.1-dev
|
||||
1.44.1-dev
|
||||
|
75
vendor/github.com/containers/storage/containers.go
generated
vendored
75
vendor/github.com/containers/storage/containers.go
generated
vendored
@ -191,7 +191,7 @@ func (r *containerStore) startWritingWithReload(canReload bool) error {
|
||||
}()
|
||||
|
||||
if canReload {
|
||||
if err := r.reloadIfChanged(true); err != nil {
|
||||
if _, err := r.reloadIfChanged(true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -215,18 +215,41 @@ func (r *containerStore) stopWriting() {
|
||||
// If this succeeds, the caller MUST call stopReading().
|
||||
func (r *containerStore) startReading() error {
|
||||
r.lockfile.RLock()
|
||||
succeeded := false
|
||||
unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
r.lockfile.Unlock()
|
||||
if unlockFn != nil {
|
||||
unlockFn()
|
||||
}
|
||||
}()
|
||||
|
||||
if err := r.reloadIfChanged(false); err != nil {
|
||||
return err
|
||||
if tryLockedForWriting, err := r.reloadIfChanged(false); err != nil {
|
||||
if !tryLockedForWriting {
|
||||
return err
|
||||
}
|
||||
unlockFn()
|
||||
unlockFn = nil
|
||||
|
||||
r.lockfile.Lock()
|
||||
unlockFn = r.lockfile.Unlock
|
||||
if _, err := r.load(true); err != nil {
|
||||
return err
|
||||
}
|
||||
unlockFn()
|
||||
unlockFn = nil
|
||||
|
||||
r.lockfile.RLock()
|
||||
unlockFn = r.lockfile.Unlock
|
||||
// We need to check for a reload reload once more because the on-disk state could have been modified
|
||||
// after we released the lock.
|
||||
// If that, _again_, finds inconsistent state, just give up.
|
||||
// We could, plausibly, retry a few times, but that inconsistent state (duplicate container names)
|
||||
// shouldn’t be saved (by correct implementations) in the first place.
|
||||
if _, err := r.reloadIfChanged(false); err != nil {
|
||||
return fmt.Errorf("(even after successfully cleaning up once:) %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
unlockFn = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -239,15 +262,23 @@ func (r *containerStore) stopReading() {
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *containerStore) reloadIfChanged(lockedForWriting bool) error {
|
||||
//
|
||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||
// load() with lockedForWriting could succeed. In that case the caller MUST
|
||||
// call load(), not reloadIfChanged() (because the “if changed” state will not
|
||||
// be detected again).
|
||||
func (r *containerStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
|
||||
r.loadMut.Lock()
|
||||
defer r.loadMut.Unlock()
|
||||
|
||||
modified, err := r.lockfile.Modified()
|
||||
if err == nil && modified {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if modified {
|
||||
return r.load(lockedForWriting)
|
||||
}
|
||||
return err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *containerStore) Containers() ([]Container, error) {
|
||||
@ -274,24 +305,27 @@ func (r *containerStore) datapath(id, key string) string {
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *containerStore) load(lockedForWriting bool) error {
|
||||
needSave := false
|
||||
//
|
||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||
// retrying with lockedForWriting could succeed.
|
||||
func (r *containerStore) load(lockedForWriting bool) (bool, error) {
|
||||
rpath := r.containerspath()
|
||||
data, err := os.ReadFile(rpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
containers := []*Container{}
|
||||
if len(data) != 0 {
|
||||
if err := json.Unmarshal(data, &containers); err != nil {
|
||||
return fmt.Errorf("loading %q: %w", rpath, err)
|
||||
return false, fmt.Errorf("loading %q: %w", rpath, err)
|
||||
}
|
||||
}
|
||||
idlist := make([]string, 0, len(containers))
|
||||
layers := make(map[string]*Container)
|
||||
ids := make(map[string]*Container)
|
||||
names := make(map[string]*Container)
|
||||
var errorToResolveBySaving error // == nil
|
||||
for n, container := range containers {
|
||||
idlist = append(idlist, container.ID)
|
||||
ids[container.ID] = containers[n]
|
||||
@ -299,7 +333,7 @@ func (r *containerStore) load(lockedForWriting bool) error {
|
||||
for _, name := range container.Names {
|
||||
if conflict, ok := names[name]; ok {
|
||||
r.removeName(conflict, name)
|
||||
needSave = true
|
||||
errorToResolveBySaving = errors.New("container store is inconsistent and the current caller does not hold a write lock")
|
||||
}
|
||||
names[name] = containers[n]
|
||||
}
|
||||
@ -310,14 +344,13 @@ func (r *containerStore) load(lockedForWriting bool) error {
|
||||
r.byid = ids
|
||||
r.bylayer = layers
|
||||
r.byname = names
|
||||
if needSave {
|
||||
if errorToResolveBySaving != nil {
|
||||
if !lockedForWriting {
|
||||
// Eventually, the callers should be modified to retry with a write lock, instead.
|
||||
return errors.New("container store is inconsistent and the current caller does not hold a write lock")
|
||||
return true, errorToResolveBySaving
|
||||
}
|
||||
return r.Save()
|
||||
return false, r.Save()
|
||||
}
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Save saves the contents of the store to disk. It should be called with
|
||||
@ -358,7 +391,7 @@ func newContainerStore(dir string) (rwContainerStore, error) {
|
||||
return nil, err
|
||||
}
|
||||
defer cstore.stopWriting()
|
||||
if err := cstore.load(true); err != nil {
|
||||
if _, err := cstore.load(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cstore, nil
|
||||
|
56
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
56
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
@ -51,6 +51,7 @@ struct fsxattr {
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
@ -78,6 +79,7 @@ type Control struct {
|
||||
backingFsBlockDev string
|
||||
nextProjectID uint32
|
||||
quotas map[string]uint32
|
||||
basePath string
|
||||
}
|
||||
|
||||
// Attempt to generate a unigue projectid. Multiple directories
|
||||
@ -158,20 +160,22 @@ func NewControl(basePath string) (*Control, error) {
|
||||
Size: 0,
|
||||
Inodes: 0,
|
||||
}
|
||||
if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
q := Control{
|
||||
backingFsBlockDev: backingFsBlockDev,
|
||||
nextProjectID: minProjectID + 1,
|
||||
quotas: make(map[string]uint32),
|
||||
basePath: basePath,
|
||||
}
|
||||
|
||||
if err := q.setProjectQuota(minProjectID, quota); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//
|
||||
// get first project id to be used for next container
|
||||
//
|
||||
err = q.findNextProjectID(basePath)
|
||||
err = q.findNextProjectID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -204,11 +208,11 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error {
|
||||
// set the quota limit for the container's project id
|
||||
//
|
||||
logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID)
|
||||
return setProjectQuota(q.backingFsBlockDev, projectID, quota)
|
||||
return q.setProjectQuota(projectID, quota)
|
||||
}
|
||||
|
||||
// setProjectQuota - set the quota for project id on xfs block device
|
||||
func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error {
|
||||
func (q *Control) setProjectQuota(projectID uint32, quota Quota) error {
|
||||
var d C.fs_disk_quota_t
|
||||
d.d_version = C.FS_DQUOT_VERSION
|
||||
d.d_id = C.__u32(projectID)
|
||||
@ -225,15 +229,35 @@ func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) er
|
||||
d.d_ino_softlimit = d.d_ino_hardlimit
|
||||
}
|
||||
|
||||
var cs = C.CString(backingFsBlockDev)
|
||||
var cs = C.CString(q.backingFsBlockDev)
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
|
||||
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
|
||||
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
|
||||
uintptr(unsafe.Pointer(&d)), 0, 0)
|
||||
if errno != 0 {
|
||||
runQuotactl := func() syscall.Errno {
|
||||
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
|
||||
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
|
||||
uintptr(unsafe.Pointer(&d)), 0, 0)
|
||||
return errno
|
||||
}
|
||||
|
||||
errno := runQuotactl()
|
||||
|
||||
// If the backingFsBlockDev does not exist any more then try to recreate it.
|
||||
if errors.Is(errno, unix.ENOENT) {
|
||||
if _, err := makeBackingFsDev(q.basePath); err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed to recreate missing backingFsBlockDev %s for projid %d: %w",
|
||||
q.backingFsBlockDev, projectID, err,
|
||||
)
|
||||
}
|
||||
|
||||
if errno := runQuotactl(); errno != 0 {
|
||||
return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w",
|
||||
projectID, q.backingFsBlockDev, errno)
|
||||
}
|
||||
|
||||
} else if errno != 0 {
|
||||
return fmt.Errorf("failed to set quota limit for projid %d on %s: %w",
|
||||
projectID, backingFsBlockDev, errno)
|
||||
projectID, q.backingFsBlockDev, errno)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -332,16 +356,16 @@ func setProjectID(targetPath string, projectID uint32) error {
|
||||
|
||||
// findNextProjectID - find the next project id to be used for containers
|
||||
// by scanning driver home directory to find used project ids
|
||||
func (q *Control) findNextProjectID(home string) error {
|
||||
files, err := os.ReadDir(home)
|
||||
func (q *Control) findNextProjectID() error {
|
||||
files, err := os.ReadDir(q.basePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read directory failed : %s", home)
|
||||
return fmt.Errorf("read directory failed : %s", q.basePath)
|
||||
}
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
continue
|
||||
}
|
||||
path := filepath.Join(home, file.Name())
|
||||
path := filepath.Join(q.basePath, file.Name())
|
||||
projid, err := getProjectID(path)
|
||||
if err != nil {
|
||||
return err
|
||||
|
86
vendor/github.com/containers/storage/images.go
generated
vendored
86
vendor/github.com/containers/storage/images.go
generated
vendored
@ -208,7 +208,7 @@ func (r *imageStore) startWritingWithReload(canReload bool) error {
|
||||
}()
|
||||
|
||||
if canReload {
|
||||
if err := r.reloadIfChanged(true); err != nil {
|
||||
if _, err := r.reloadIfChanged(true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -235,20 +235,43 @@ func (r *imageStore) stopWriting() {
|
||||
// should use startReading() instead.
|
||||
func (r *imageStore) startReadingWithReload(canReload bool) error {
|
||||
r.lockfile.RLock()
|
||||
succeeded := false
|
||||
unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
r.lockfile.Unlock()
|
||||
if unlockFn != nil {
|
||||
unlockFn()
|
||||
}
|
||||
}()
|
||||
|
||||
if canReload {
|
||||
if err := r.reloadIfChanged(false); err != nil {
|
||||
return err
|
||||
if tryLockedForWriting, err := r.reloadIfChanged(false); err != nil {
|
||||
if !tryLockedForWriting {
|
||||
return err
|
||||
}
|
||||
unlockFn()
|
||||
unlockFn = nil
|
||||
|
||||
r.lockfile.Lock()
|
||||
unlockFn = r.lockfile.Unlock
|
||||
if _, err := r.load(true); err != nil {
|
||||
return err
|
||||
}
|
||||
unlockFn()
|
||||
unlockFn = nil
|
||||
|
||||
r.lockfile.RLock()
|
||||
unlockFn = r.lockfile.Unlock
|
||||
// We need to check for a reload reload once more because the on-disk state could have been modified
|
||||
// after we released the lock.
|
||||
// If that, _again_, finds inconsistent state, just give up.
|
||||
// We could, plausibly, retry a few times, but that inconsistent state (duplicate image names)
|
||||
// shouldn’t be saved (by correct implementations) in the first place.
|
||||
if _, err := r.reloadIfChanged(false); err != nil {
|
||||
return fmt.Errorf("(even after successfully cleaning up once:) %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
unlockFn = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -267,15 +290,23 @@ func (r *imageStore) stopReading() {
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *imageStore) reloadIfChanged(lockedForWriting bool) error {
|
||||
//
|
||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||
// retrying with lockedForWriting could succeed. In that case the caller MUST
|
||||
// call load(), not reloadIfChanged() (because the “if changed” state will not
|
||||
// be detected again).
|
||||
func (r *imageStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
|
||||
r.loadMut.Lock()
|
||||
defer r.loadMut.Unlock()
|
||||
|
||||
modified, err := r.lockfile.Modified()
|
||||
if err == nil && modified {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if modified {
|
||||
return r.load(lockedForWriting)
|
||||
}
|
||||
return err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *imageStore) Images() ([]Image, error) {
|
||||
@ -342,36 +373,39 @@ func (i *Image) recomputeDigests() error {
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *imageStore) load(lockedForWriting bool) error {
|
||||
shouldSave := false
|
||||
//
|
||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||
// retrying with lockedForWriting could succeed.
|
||||
func (r *imageStore) load(lockedForWriting bool) (bool, error) {
|
||||
rpath := r.imagespath()
|
||||
data, err := os.ReadFile(rpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
images := []*Image{}
|
||||
if len(data) != 0 {
|
||||
if err := json.Unmarshal(data, &images); err != nil {
|
||||
return fmt.Errorf("loading %q: %w", rpath, err)
|
||||
return false, fmt.Errorf("loading %q: %w", rpath, err)
|
||||
}
|
||||
}
|
||||
idlist := make([]string, 0, len(images))
|
||||
ids := make(map[string]*Image)
|
||||
names := make(map[string]*Image)
|
||||
digests := make(map[digest.Digest][]*Image)
|
||||
var errorToResolveBySaving error // == nil
|
||||
for n, image := range images {
|
||||
ids[image.ID] = images[n]
|
||||
idlist = append(idlist, image.ID)
|
||||
for _, name := range image.Names {
|
||||
if conflict, ok := names[name]; ok {
|
||||
r.removeName(conflict, name)
|
||||
shouldSave = true
|
||||
errorToResolveBySaving = ErrDuplicateImageNames
|
||||
}
|
||||
}
|
||||
// Compute the digest list.
|
||||
if err := image.recomputeDigests(); err != nil {
|
||||
return fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err)
|
||||
return false, fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err)
|
||||
}
|
||||
for _, name := range image.Names {
|
||||
names[name] = image
|
||||
@ -383,19 +417,23 @@ func (r *imageStore) load(lockedForWriting bool) error {
|
||||
image.ReadOnly = !r.lockfile.IsReadWrite()
|
||||
}
|
||||
|
||||
if shouldSave && (!r.lockfile.IsReadWrite() || !lockedForWriting) {
|
||||
// Eventually, the callers should be modified to retry with a write lock if IsReadWrite && !lockedForWriting, instead.
|
||||
return ErrDuplicateImageNames
|
||||
if errorToResolveBySaving != nil {
|
||||
if !r.lockfile.IsReadWrite() {
|
||||
return false, errorToResolveBySaving
|
||||
}
|
||||
if !lockedForWriting {
|
||||
return true, errorToResolveBySaving
|
||||
}
|
||||
}
|
||||
r.images = images
|
||||
r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store.
|
||||
r.byid = ids
|
||||
r.byname = names
|
||||
r.bydigest = digests
|
||||
if shouldSave {
|
||||
return r.Save()
|
||||
if errorToResolveBySaving != nil {
|
||||
return false, r.Save()
|
||||
}
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Save saves the contents of the store to disk. It should be called with
|
||||
@ -439,7 +477,7 @@ func newImageStore(dir string) (rwImageStore, error) {
|
||||
return nil, err
|
||||
}
|
||||
defer istore.stopWriting()
|
||||
if err := istore.load(true); err != nil {
|
||||
if _, err := istore.load(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &istore, nil
|
||||
@ -462,7 +500,7 @@ func newROImageStore(dir string) (roImageStore, error) {
|
||||
return nil, err
|
||||
}
|
||||
defer istore.stopReading()
|
||||
if err := istore.load(false); err != nil {
|
||||
if _, err := istore.load(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &istore, nil
|
||||
|
142
vendor/github.com/containers/storage/layers.go
generated
vendored
142
vendor/github.com/containers/storage/layers.go
generated
vendored
@ -36,6 +36,10 @@ import (
|
||||
const (
|
||||
tarSplitSuffix = ".tar-split.gz"
|
||||
incompleteFlag = "incomplete"
|
||||
// maxLayerStoreCleanupIterations is the number of times we try to clean up inconsistent layer store state
|
||||
// in readers (which, for implementation reasons, gives other writers the opportunity to create more inconsistent state)
|
||||
// until we just give up.
|
||||
maxLayerStoreCleanupIterations = 3
|
||||
)
|
||||
|
||||
// A Layer is a record of a copy-on-write layer that's stored by the lower
|
||||
@ -331,7 +335,7 @@ func (r *layerStore) startWritingWithReload(canReload bool) error {
|
||||
}()
|
||||
|
||||
if canReload {
|
||||
if err := r.reloadIfChanged(true); err != nil {
|
||||
if _, err := r.reloadIfChanged(true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -358,20 +362,46 @@ func (r *layerStore) stopWriting() {
|
||||
// should use startReading() instead.
|
||||
func (r *layerStore) startReadingWithReload(canReload bool) error {
|
||||
r.lockfile.RLock()
|
||||
succeeded := false
|
||||
unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
r.lockfile.Unlock()
|
||||
if unlockFn != nil {
|
||||
unlockFn()
|
||||
}
|
||||
}()
|
||||
|
||||
if canReload {
|
||||
if err := r.reloadIfChanged(false); err != nil {
|
||||
return err
|
||||
cleanupsDone := 0
|
||||
for {
|
||||
tryLockedForWriting, err := r.reloadIfChanged(false)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if !tryLockedForWriting {
|
||||
return err
|
||||
}
|
||||
if cleanupsDone >= maxLayerStoreCleanupIterations {
|
||||
return fmt.Errorf("(even after %d cleanup attempts:) %w", cleanupsDone, err)
|
||||
}
|
||||
unlockFn()
|
||||
unlockFn = nil
|
||||
|
||||
r.lockfile.Lock()
|
||||
unlockFn = r.lockfile.Unlock
|
||||
if _, err := r.load(true); err != nil {
|
||||
return err
|
||||
}
|
||||
unlockFn()
|
||||
unlockFn = nil
|
||||
|
||||
r.lockfile.RLock()
|
||||
unlockFn = r.lockfile.Unlock
|
||||
// We need to check for a reload reload again because the on-disk state could have been modified
|
||||
// after we released the lock.
|
||||
cleanupsDone++
|
||||
}
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
unlockFn = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -424,15 +454,23 @@ func (r *layerStore) Modified() (bool, error) {
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *layerStore) reloadIfChanged(lockedForWriting bool) error {
|
||||
//
|
||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||
// retrying with lockedForWriting could succeed. In that case the caller MUST
|
||||
// call load(), not reloadIfChanged() (because the “if changed” state will not
|
||||
// be detected again).
|
||||
func (r *layerStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
|
||||
r.loadMut.Lock()
|
||||
defer r.loadMut.Unlock()
|
||||
|
||||
modified, err := r.Modified()
|
||||
if err == nil && modified {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if modified {
|
||||
return r.load(lockedForWriting)
|
||||
}
|
||||
return err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *layerStore) Layers() ([]Layer, error) {
|
||||
@ -455,26 +493,28 @@ func (r *layerStore) layerspath() string {
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *layerStore) load(lockedForWriting bool) error {
|
||||
shouldSave := false
|
||||
//
|
||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||
// retrying with lockedForWriting could succeed.
|
||||
func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
||||
rpath := r.layerspath()
|
||||
info, err := os.Stat(rpath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
r.layerspathModified = info.ModTime()
|
||||
}
|
||||
data, err := os.ReadFile(rpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
layers := []*Layer{}
|
||||
if len(data) != 0 {
|
||||
if err := json.Unmarshal(data, &layers); err != nil {
|
||||
return fmt.Errorf("loading %q: %w", rpath, err)
|
||||
return false, fmt.Errorf("loading %q: %w", rpath, err)
|
||||
}
|
||||
}
|
||||
idlist := make([]string, 0, len(layers))
|
||||
@ -482,6 +522,7 @@ func (r *layerStore) load(lockedForWriting bool) error {
|
||||
names := make(map[string]*Layer)
|
||||
compressedsums := make(map[digest.Digest][]string)
|
||||
uncompressedsums := make(map[digest.Digest][]string)
|
||||
var errorToResolveBySaving error // == nil; if there are multiple errors, this is one of them.
|
||||
if r.lockfile.IsReadWrite() {
|
||||
selinux.ClearLabels()
|
||||
}
|
||||
@ -491,7 +532,7 @@ func (r *layerStore) load(lockedForWriting bool) error {
|
||||
for _, name := range layer.Names {
|
||||
if conflict, ok := names[name]; ok {
|
||||
r.removeName(conflict, name)
|
||||
shouldSave = true
|
||||
errorToResolveBySaving = ErrDuplicateLayerNames
|
||||
}
|
||||
names[name] = layers[n]
|
||||
}
|
||||
@ -505,11 +546,22 @@ func (r *layerStore) load(lockedForWriting bool) error {
|
||||
selinux.ReserveLabel(layer.MountLabel)
|
||||
}
|
||||
layer.ReadOnly = !r.lockfile.IsReadWrite()
|
||||
// The r.lockfile.IsReadWrite() condition maintains past practice:
|
||||
// Incomplete layers in a read-only store are not treated as a reason to refuse to use other layers from that store
|
||||
// (OTOH creating child layers on top would probably lead to problems?).
|
||||
// We do remove incomplete layers in read-write stores so that we don’t build on top of them.
|
||||
if layerHasIncompleteFlag(layer) && r.lockfile.IsReadWrite() {
|
||||
errorToResolveBySaving = errors.New("an incomplete layer exists and can't be cleaned up")
|
||||
}
|
||||
}
|
||||
|
||||
if shouldSave && (!r.lockfile.IsReadWrite() || !lockedForWriting) {
|
||||
// Eventually, the callers should be modified to retry with a write lock if IsReadWrite && !lockedForWriting, instead.
|
||||
return ErrDuplicateLayerNames
|
||||
if errorToResolveBySaving != nil {
|
||||
if !r.lockfile.IsReadWrite() {
|
||||
return false, errorToResolveBySaving
|
||||
}
|
||||
if !lockedForWriting {
|
||||
return true, errorToResolveBySaving
|
||||
}
|
||||
}
|
||||
r.layers = layers
|
||||
r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store.
|
||||
@ -523,42 +575,42 @@ func (r *layerStore) load(lockedForWriting bool) error {
|
||||
r.mountsLockfile.RLock()
|
||||
defer r.mountsLockfile.Unlock()
|
||||
if err := r.loadMounts(); err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// Last step: as we’re writable, try to remove anything that a previous
|
||||
if errorToResolveBySaving != nil {
|
||||
if !r.lockfile.IsReadWrite() {
|
||||
return false, fmt.Errorf("internal error: layerStore.load has shouldSave but !r.lockfile.IsReadWrite")
|
||||
}
|
||||
// Last step: try to remove anything that a previous
|
||||
// user of this storage area marked for deletion but didn't manage to
|
||||
// actually delete.
|
||||
var incompleteDeletionErrors error // = nil
|
||||
if lockedForWriting {
|
||||
for _, layer := range r.layers {
|
||||
if layer.Flags == nil {
|
||||
layer.Flags = make(map[string]interface{})
|
||||
}
|
||||
if layerHasIncompleteFlag(layer) {
|
||||
logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
|
||||
err = r.deleteInternal(layer.ID)
|
||||
if err != nil {
|
||||
// Don't return the error immediately, because deleteInternal does not saveLayers();
|
||||
// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
|
||||
// deleted incomplete layers have their metadata correctly removed.
|
||||
incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
|
||||
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
|
||||
}
|
||||
shouldSave = true
|
||||
for _, layer := range r.layers {
|
||||
if layer.Flags == nil {
|
||||
layer.Flags = make(map[string]interface{})
|
||||
}
|
||||
if layerHasIncompleteFlag(layer) {
|
||||
logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
|
||||
err = r.deleteInternal(layer.ID)
|
||||
if err != nil {
|
||||
// Don't return the error immediately, because deleteInternal does not saveLayers();
|
||||
// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
|
||||
// deleted incomplete layers have their metadata correctly removed.
|
||||
incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
|
||||
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
if shouldSave {
|
||||
if err := r.saveLayers(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := r.saveLayers(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if incompleteDeletionErrors != nil {
|
||||
return incompleteDeletionErrors
|
||||
return false, incompleteDeletionErrors
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *layerStore) loadMounts() error {
|
||||
@ -689,7 +741,7 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri
|
||||
return nil, err
|
||||
}
|
||||
defer rlstore.stopWriting()
|
||||
if err := rlstore.load(true); err != nil {
|
||||
if _, err := rlstore.load(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &rlstore, nil
|
||||
@ -714,7 +766,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL
|
||||
return nil, err
|
||||
}
|
||||
defer rlstore.stopReading()
|
||||
if err := rlstore.load(false); err != nil {
|
||||
if _, err := rlstore.load(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &rlstore, nil
|
||||
|
17
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go
generated
vendored
Normal file
17
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
package kernel
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// Utsname represents the system name structure.
|
||||
// It is passthrough for unix.Utsname in order to make it portable with
|
||||
// other platforms where it is not available.
|
||||
type Utsname unix.Utsname
|
||||
|
||||
func uname() (*unix.Utsname, error) {
|
||||
uts := &unix.Utsname{}
|
||||
|
||||
if err := unix.Uname(uts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return uts, nil
|
||||
}
|
9
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go
generated
vendored
9
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go
generated
vendored
@ -1,13 +1,14 @@
|
||||
//go:build freebsd || openbsd
|
||||
// +build freebsd openbsd
|
||||
//go:build openbsd
|
||||
// +build openbsd
|
||||
|
||||
package kernel
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// A stub called by kernel_unix.go .
|
||||
func uname() (*Utsname, error) {
|
||||
return nil, errors.New("Kernel version detection is available only on linux")
|
||||
return nil, fmt.Errorf("Kernel version detection is not available on %s", runtime.GOOS)
|
||||
}
|
||||
|
4
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
//go:build !linux && !solaris
|
||||
// +build !linux,!solaris
|
||||
//go:build !linux && !solaris && !freebsd
|
||||
// +build !linux,!solaris,!freebsd
|
||||
|
||||
package kernel
|
||||
|
||||
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
@ -264,7 +264,7 @@ github.com/containers/psgo/internal/dev
|
||||
github.com/containers/psgo/internal/host
|
||||
github.com/containers/psgo/internal/proc
|
||||
github.com/containers/psgo/internal/process
|
||||
# github.com/containers/storage v1.43.1-0.20221104122514-74e37433a2a0
|
||||
# github.com/containers/storage v1.44.1-0.20221110192950-67e9778710f8
|
||||
## explicit; go 1.17
|
||||
github.com/containers/storage
|
||||
github.com/containers/storage/drivers
|
||||
|
Reference in New Issue
Block a user