Vendor in latest containers/(common, image, storage)

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
Daniel J Walsh
2023-01-05 10:37:32 -05:00
parent 8e80c199d7
commit eea78ec7b3
87 changed files with 4156 additions and 1544 deletions

View File

@ -83,13 +83,26 @@ type Layer struct {
MountLabel string `json:"mountlabel,omitempty"`
// MountPoint is the path where the layer is mounted, or where it was most
// recently mounted. This can change between subsequent Unmount() and
// Mount() calls, so the caller should consult this value after Mount()
// succeeds to find the location of the container's root filesystem.
// recently mounted.
//
// WARNING: This field is a snapshot in time: (except for users inside c/storage that
// hold the mount lock) the true value can change between subsequent
// calls to c/storage API.
//
// Users that need to handle concurrent mount/unmount attempts should not access this
// field at all, and should only use the path returned by .Mount() (and thats only
// assuming no other user will concurrently decide to unmount that mount point).
MountPoint string `json:"-"`
// MountCount is used as a reference count for the container's layer being
// mounted at the mount point.
//
// WARNING: This field is a snapshot in time; (except for users inside c/storage that
// hold the mount lock) the true value can change between subsequent
// calls to c/storage API.
//
// In situations where concurrent mount/unmount attempts can happen, this field
// should not be used for any decisions, maybe apart from heuristic user warnings.
MountCount int `json:"-"`
// Created is the datestamp for when this layer was created. Older
@ -265,8 +278,15 @@ type rwLayerStore interface {
// The mappings used by the container can be specified.
Mount(id string, options drivers.MountOpts) (string, error)
// Unmount unmounts a layer when it is no longer in use.
Unmount(id string, force bool) (bool, error)
// unmount unmounts a layer when it is no longer in use.
// If conditional is set, it will fail with ErrLayerNotMounted if the layer is not mounted (without conditional, the caller is
// making a promise that the layer is actually mounted).
// If force is set, it will physically try to unmount it even if it is mounted multple times, or even if (!conditional and)
// there are no records of it being mounted in the first place.
// It returns whether the layer was still mounted at the time this function returned.
// WARNING: The return value may already be obsolete by the time it is available
// to the caller, so it can be used for heuristic sanity checks at best. It should almost always be ignored.
unmount(id string, force bool, conditional bool) (bool, error)
// Mounted returns number of times the layer has been mounted.
Mounted(id string) (int, error)
@ -305,12 +325,17 @@ type rwLayerStore interface {
}
type layerStore struct {
lockfile *lockfile.LockFile
mountsLockfile *lockfile.LockFile
rundir string
jsonPath [numLayerLocationIndex]string
driver drivers.Driver
layerdir string
// The following fields are only set when constructing layerStore, and must never be modified afterwards.
// They are safe to access without any other locking.
lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores.
mountsLockfile *lockfile.LockFile // Can _only_ be obtained with inProcessLock held.
rundir string
jsonPath [numLayerLocationIndex]string
layerdir string
inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held.
// The following fields can only be read/written with read/write ownership of inProcessLock, respectively.
// Almost all users should use startReading() or startWriting().
lastWrite lockfile.LastWrite
mountsLastWrite lockfile.LastWrite // Only valid if lockfile.IsReadWrite()
layers []*Layer
@ -320,10 +345,14 @@ type layerStore struct {
bymount map[string]*Layer
bycompressedsum map[digest.Digest][]string
byuncompressedsum map[digest.Digest][]string
loadMut sync.Mutex
layerspathsModified [numLayerLocationIndex]time.Time
// FIXME: This field is only set when constructing layerStore, but locking rules of the driver
// interface itself are not documented here.
driver drivers.Driver
}
// The caller must hold r.inProcessLock for reading.
func layerLocation(l *Layer) layerLocations {
if l.volatileStore {
return volatileLayerLocation
@ -364,9 +393,11 @@ func copyLayer(l *Layer) *Layer {
// should use startWriting() instead.
func (r *layerStore) startWritingWithReload(canReload bool) error {
r.lockfile.Lock()
r.inProcessLock.Lock()
succeeded := false
defer func() {
if !succeeded {
r.inProcessLock.Unlock()
r.lockfile.Unlock()
}
}()
@ -389,6 +420,7 @@ func (r *layerStore) startWriting() error {
// stopWriting releases locks obtained by startWriting.
func (r *layerStore) stopWriting() {
r.inProcessLock.Unlock()
r.lockfile.Unlock()
}
@ -398,6 +430,13 @@ func (r *layerStore) stopWriting() {
// This is an internal implementation detail of layerStore construction, every other caller
// should use startReading() instead.
func (r *layerStore) startReadingWithReload(canReload bool) error {
// inProcessLocked calls the nested function with r.inProcessLock held for writing.
inProcessLocked := func(fn func() error) error {
r.inProcessLock.Lock()
defer r.inProcessLock.Unlock()
return fn()
}
r.lockfile.RLock()
unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil
defer func() {
@ -405,36 +444,71 @@ func (r *layerStore) startReadingWithReload(canReload bool) error {
unlockFn()
}
}()
r.inProcessLock.RLock()
unlockFn = r.stopReading
if canReload {
cleanupsDone := 0
for {
tryLockedForWriting, err := r.reloadIfChanged(false)
if err == nil {
break
}
if !tryLockedForWriting {
return err
}
if cleanupsDone >= maxLayerStoreCleanupIterations {
return fmt.Errorf("(even after %d cleanup attempts:) %w", cleanupsDone, err)
}
unlockFn()
unlockFn = nil
r.lockfile.Lock()
// If we are lucky, we can just hold the read locks, check that we are fresh, and continue.
modified, err := r.modified()
if err != nil {
return err
}
if modified {
// We are unlucky, and need to reload.
// NOTE: Multiple goroutines can get to this place approximately simultaneously.
r.inProcessLock.RUnlock()
unlockFn = r.lockfile.Unlock
if _, err := r.reloadIfChanged(true); err != nil {
return err
}
unlockFn()
unlockFn = nil
r.lockfile.RLock()
unlockFn = r.lockfile.Unlock
// We need to check for a reload reload again because the on-disk state could have been modified
// after we released the lock.
cleanupsDone++
cleanupsDone := 0
for {
// First try reloading with r.lockfile held for reading.
// r.inProcessLock will serialize all goroutines that got here;
// each will re-check on-disk state vs. r.lastWrite, and the first one will actually reload the data.
var tryLockedForWriting bool
err := inProcessLocked(func() error {
var err error
tryLockedForWriting, err = r.reloadIfChanged(false)
return err
})
if err == nil {
break
}
if !tryLockedForWriting {
return err
}
if cleanupsDone >= maxLayerStoreCleanupIterations {
return fmt.Errorf("(even after %d cleanup attempts:) %w", cleanupsDone, err)
}
// Not good enough, we need r.lockfile held for writing. So, lets do that.
unlockFn()
unlockFn = nil
r.lockfile.Lock()
unlockFn = r.lockfile.Unlock
if err := inProcessLocked(func() error {
_, err := r.reloadIfChanged(true)
return err
}); err != nil {
return err
}
unlockFn()
unlockFn = nil
r.lockfile.RLock()
unlockFn = r.lockfile.Unlock
// We need to check for a reload again because the on-disk state could have been modified
// after we released the lock.
cleanupsDone++
}
// NOTE that we hold neither a read nor write inProcessLock at this point. Thats fine in ordinary operation, because
// the on-filesystem r.lockfile should protect us against (cooperating) writers, and any use of r.inProcessLock
// protects us against in-process writers modifying data.
// In presence of non-cooperating writers, we just ensure that 1) the in-memory data is not clearly out-of-date
// and 2) access to the in-memory data is not racy;
// but we cant protect against those out-of-process writers modifying _files_ while we are assuming they are in a consistent state.
r.inProcessLock.RLock()
}
}
@ -450,13 +524,48 @@ func (r *layerStore) startReading() error {
// stopReading releases locks obtained by startReading.
func (r *layerStore) stopReading() {
r.inProcessLock.RUnlock()
r.lockfile.Unlock()
}
// modified returns true if the on-disk state (of layers or mounts) has changed (ie if reloadIcHanged may need to modify the store)
//
// Note that unlike containerStore.modified and imageStore.modified, this function is not directly used in layerStore.reloadIfChanged();
// it exists only to help the reader ensure it has fresh enough state.
//
// The caller must hold r.lockfile for reading _or_ writing.
// The caller must hold r.inProcessLock for reading or writing.
func (r *layerStore) modified() (bool, error) {
_, m, err := r.layersModified()
if err != nil {
return false, err
}
if m {
return true, nil
}
if r.lockfile.IsReadWrite() {
// This means we get, release, and re-obtain, r.mountsLockfile if we actually need to do any kind of reload.
// Thats a bit expensive, but hopefully most callers will be read-only and see no changes.
// We cant eliminate these mountsLockfile accesses given the current assumption that Layer objects have _some_ not-very-obsolete
// mount data. Maybe we can segregate the mount-dependent and mount-independent operations better...
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
_, m, err := r.mountsModified()
if err != nil {
return false, err
}
if m {
return true, nil
}
}
return false, nil
}
// layersModified() checks if the most recent writer to r.jsonPath[] was a party other than the
// last recorded writer. If so, it returns a lockfile.LastWrite value to record on a successful
// reload.
// It should only be called with the lock held.
// The caller must hold r.inProcessLock for reading.
func (r *layerStore) layersModified() (lockfile.LastWrite, bool, error) {
lastWrite, modified, err := r.lockfile.ModifiedSince(r.lastWrite)
if err != nil {
@ -488,12 +597,11 @@ func (r *layerStore) layersModified() (lockfile.LastWrite, bool, error) {
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
// if it is held for writing.
//
// The caller must hold r.inProcessLock for WRITING.
//
// If !lockedForWriting and this function fails, the return value indicates whether
// reloadIfChanged() with lockedForWriting could succeed.
func (r *layerStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
r.loadMut.Lock()
defer r.loadMut.Unlock()
lastWrite, layersModified, err := r.layersModified()
if err != nil {
return false, err
@ -516,11 +624,20 @@ func (r *layerStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
return false, nil
}
// mountsModified returns true if the on-disk mount state has changed (i.e. if reloadMountsIfChanged may need to modify the store),
// and a lockfile.LastWrite value for that update.
//
// The caller must hold r.mountsLockfile for reading _or_ writing.
// The caller must hold r.inProcessLock for reading or writing.
func (r *layerStore) mountsModified() (lockfile.LastWrite, bool, error) {
return r.mountsLockfile.ModifiedSince(r.mountsLastWrite)
}
// reloadMountsIfChanged reloads the contents of mountsPath from disk if it is changed.
//
// The caller must hold r.mountsLockFile for reading or writing.
func (r *layerStore) reloadMountsIfChanged() error {
lastWrite, modified, err := r.mountsLockfile.ModifiedSince(r.mountsLastWrite)
lastWrite, modified, err := r.mountsModified()
if err != nil {
return err
}
@ -533,6 +650,7 @@ func (r *layerStore) reloadMountsIfChanged() error {
return nil
}
// Requires startReading or startWriting.
func (r *layerStore) Layers() ([]Layer, error) {
layers := make([]Layer, len(r.layers))
for i := range r.layers {
@ -541,6 +659,7 @@ func (r *layerStore) Layers() ([]Layer, error) {
return layers, nil
}
// Requires startWriting.
func (r *layerStore) GarbageCollect() error {
layers, err := r.driver.ListLayers()
@ -581,6 +700,7 @@ func (r *layerStore) mountspath() string {
//
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
// if it is held for writing.
// The caller must hold r.inProcessLock for WRITING.
//
// If !lockedForWriting and this function fails, the return value indicates whether
// retrying with lockedForWriting could succeed.
@ -694,6 +814,11 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
return false, err
}
r.mountsLastWrite = mountsLastWrite
// NOTE: We will release mountsLockfile when this function returns, so unlike most of the layer data, the
// r.layers[].MountPoint, r.layers[].MountCount, and r.bymount values might not reflect
// true on-filesystem state already by the time this function returns.
// Code that needs the state to be accurate must lock r.mountsLockfile again,
// and possibly loadMounts() again.
}
if errorToResolveBySaving != nil {
@ -731,6 +856,8 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
return false, nil
}
// The caller must hold r.mountsLockfile for reading or writing.
// The caller must hold r.inProcessLock for WRITING.
func (r *layerStore) loadMounts() error {
mounts := make(map[string]*Layer)
mpath := r.mountspath()
@ -768,8 +895,9 @@ func (r *layerStore) loadMounts() error {
return err
}
// Save saves the contents of the store to disk. It should be called with
// the lock held, locked for writing.
// save saves the contents of the store to disk.
// The caller must hold r.lockfile locked for writing.
// The caller must hold r.inProcessLock for WRITING.
func (r *layerStore) save(saveLocations layerLocations) error {
r.mountsLockfile.Lock()
defer r.mountsLockfile.Unlock()
@ -779,10 +907,15 @@ func (r *layerStore) save(saveLocations layerLocations) error {
return r.saveMounts()
}
// saveFor saves the contents of the store relevant for modifiedLayer to disk.
// The caller must hold r.lockfile locked for writing.
// The caller must hold r.inProcessLock for WRITING.
func (r *layerStore) saveFor(modifiedLayer *Layer) error {
return r.save(layerLocation(modifiedLayer))
}
// The caller must hold r.lockfile locked for writing.
// The caller must hold r.inProcessLock for WRITING.
func (r *layerStore) saveLayers(saveLocations layerLocations) error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerdir, ErrStoreIsReadOnly)
@ -826,6 +959,8 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error {
return nil
}
// The caller must hold r.mountsLockfile for writing.
// The caller must hold r.inProcessLock for WRITING.
func (r *layerStore) saveMounts() error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerdir, ErrStoreIsReadOnly)
@ -887,16 +1022,18 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri
rlstore := layerStore{
lockfile: lockFile,
mountsLockfile: mountsLockfile,
driver: driver,
rundir: rundir,
layerdir: layerdir,
byid: make(map[string]*Layer),
bymount: make(map[string]*Layer),
byname: make(map[string]*Layer),
jsonPath: [numLayerLocationIndex]string{
filepath.Join(layerdir, "layers.json"),
filepath.Join(volatileDir, "volatile-layers.json"),
},
layerdir: layerdir,
byid: make(map[string]*Layer),
byname: make(map[string]*Layer),
bymount: make(map[string]*Layer),
driver: driver,
}
if err := rlstore.startWritingWithReload(false); err != nil {
return nil, err
@ -922,16 +1059,18 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL
rlstore := layerStore{
lockfile: lockfile,
mountsLockfile: nil,
driver: driver,
rundir: rundir,
layerdir: layerdir,
byid: make(map[string]*Layer),
bymount: make(map[string]*Layer),
byname: make(map[string]*Layer),
jsonPath: [numLayerLocationIndex]string{
filepath.Join(layerdir, "layers.json"),
filepath.Join(layerdir, "volatile-layers.json"),
},
layerdir: layerdir,
byid: make(map[string]*Layer),
byname: make(map[string]*Layer),
bymount: make(map[string]*Layer),
driver: driver,
}
if err := rlstore.startReadingWithReload(false); err != nil {
return nil, err
@ -948,6 +1087,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL
return &rlstore, nil
}
// Requires startReading or startWriting.
func (r *layerStore) lookup(id string) (*Layer, bool) {
if layer, ok := r.byid[id]; ok {
return layer, ok
@ -960,6 +1100,7 @@ func (r *layerStore) lookup(id string) (*Layer, bool) {
return nil, false
}
// Requires startReading or startWriting.
func (r *layerStore) Size(name string) (int64, error) {
layer, ok := r.lookup(name)
if !ok {
@ -974,6 +1115,7 @@ func (r *layerStore) Size(name string) (int64, error) {
return -1, nil
}
// Requires startWriting.
func (r *layerStore) ClearFlag(id string, flag string) error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to clear flags on layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
@ -986,6 +1128,7 @@ func (r *layerStore) ClearFlag(id string, flag string) error {
return r.saveFor(layer)
}
// Requires startWriting.
func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to set flags on layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
@ -1005,6 +1148,7 @@ func (r *layerStore) Status() ([][2]string, error) {
return r.driver.Status(), nil
}
// Requires startWriting.
func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) {
if duplicateLayer, idInUse := r.byid[id]; idInUse {
return duplicateLayer, ErrDuplicateID
@ -1061,6 +1205,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
return copyLayer(layer), nil
}
// Requires startWriting.
func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) {
if !r.lockfile.IsReadWrite() {
return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
@ -1258,32 +1403,41 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
return layer, size, err
}
// Requires startWriting.
func (r *layerStore) CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) {
layer, _, err = r.Put(id, parent, names, mountLabel, options, moreOptions, writeable, flags, nil)
return layer, err
}
// Requires startWriting.
func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (layer *Layer, err error) {
return r.CreateWithFlags(id, parent, names, mountLabel, options, moreOptions, writeable, nil)
}
// Requires startReading or startWriting.
func (r *layerStore) Mounted(id string) (int, error) {
if !r.lockfile.IsReadWrite() {
return 0, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
}
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
if err := r.reloadMountsIfChanged(); err != nil {
return 0, err
}
layer, ok := r.lookup(id)
if !ok {
return 0, ErrLayerUnknown
}
// NOTE: The caller of this function is not holding (currently cannot hold) r.mountsLockfile,
// so the data is necessarily obsolete by the time this function returns. So, we dont even
// try to reload it in this function, we just rely on r.load() that happened during
// r.startReading() or r.startWriting().
return layer.MountCount, nil
}
// Requires startWriting.
func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) {
// LOCKING BUG: This is reachable via store.Diff → layerStore.Diff → layerStore.newFileGetter
// (with btrfs and zfs graph drivers) holding layerStore only locked for reading, while it modifies
// - r.layers[].MountCount (directly and via loadMounts / saveMounts)
// - r.layers[].MountPoint (directly and via loadMounts / saveMounts)
// - r.bymount (via loadMounts / saveMounts)
// check whether options include ro option
hasReadOnlyOpt := func(opts []string) bool {
for _, item := range opts {
@ -1343,7 +1497,14 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
return mountpoint, err
}
func (r *layerStore) Unmount(id string, force bool) (bool, error) {
// Requires startWriting.
func (r *layerStore) unmount(id string, force bool, conditional bool) (bool, error) {
// LOCKING BUG: This is reachable via store.Diff → layerStore.Diff → layerStore.newFileGetter → simpleGetCloser.Close()
// (with btrfs and zfs graph drivers) holding layerStore only locked for reading, while it modifies
// - r.layers[].MountCount (directly and via loadMounts / saveMounts)
// - r.layers[].MountPoint (directly and via loadMounts / saveMounts)
// - r.bymount (via loadMounts / saveMounts)
if !r.lockfile.IsReadWrite() {
return false, fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
}
@ -1360,6 +1521,9 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
}
layer = layerByMount
}
if conditional && layer.MountCount == 0 {
return false, ErrLayerNotMounted
}
if force {
layer.MountCount = 1
}
@ -1379,15 +1543,16 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
return true, err
}
// Requires startReading or startWriting.
func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
if !r.lockfile.IsReadWrite() {
return nil, nil, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
}
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
if err := r.reloadMountsIfChanged(); err != nil {
return nil, nil, err
}
// We are not checking r.mountsLockfile.Modified() and calling r.loadMounts here because the store
// is only locked for reading = we are not allowed to modify layer data.
// Holding r.mountsLockfile protects us against concurrent mount/unmount operations.
layer, ok := r.lookup(id)
if !ok {
return nil, nil, ErrLayerUnknown
@ -1400,6 +1565,13 @@ func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
// We don't know which directories to examine.
return nil, nil, ErrLayerNotMounted
}
// Holding r.mountsLockfile protects us against concurrent mount/unmount operations, but we didnt
// hold it continuously since the time we loaded the mount data; so its possible the layer
// was unmounted in the meantime, or mounted elsewhere. Treat that as if we were run after the unmount,
// = a missing mount, not a filesystem error.
if _, err := system.Lstat(layer.MountPoint); errors.Is(err, os.ErrNotExist) {
return nil, nil, ErrLayerNotMounted
}
rootuid, rootgid, err := idtools.GetRootUIDGID(layer.UIDMap, layer.GIDMap)
if err != nil {
return nil, nil, fmt.Errorf("reading root ID values for layer %q: %w", layer.ID, err)
@ -1448,10 +1620,12 @@ func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
return uids, gids, nil
}
// Requires startWriting.
func (r *layerStore) removeName(layer *Layer, name string) {
layer.Names = stringSliceWithoutValue(layer.Names, name)
}
// Requires startWriting.
func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to change layer name assignments at %q: %w", r.layerdir, ErrStoreIsReadOnly)
@ -1486,6 +1660,7 @@ func (r *layerStore) datapath(id, key string) string {
return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
}
// Requires startReading or startWriting.
func (r *layerStore) BigData(id, key string) (io.ReadCloser, error) {
if key == "" {
return nil, fmt.Errorf("can't retrieve layer big data value for empty name: %w", ErrInvalidBigDataName)
@ -1497,6 +1672,7 @@ func (r *layerStore) BigData(id, key string) (io.ReadCloser, error) {
return os.Open(r.datapath(layer.ID, key))
}
// Requires startWriting.
func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
if key == "" {
return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName)
@ -1544,6 +1720,7 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
return nil
}
// Requires startReading or startWriting.
func (r *layerStore) BigDataNames(id string) ([]string, error) {
layer, ok := r.lookup(id)
if !ok {
@ -1552,6 +1729,7 @@ func (r *layerStore) BigDataNames(id string) ([]string, error) {
return copyStringSlice(layer.BigDataNames), nil
}
// Requires startReading or startWriting.
func (r *layerStore) Metadata(id string) (string, error) {
if layer, ok := r.lookup(id); ok {
return layer.Metadata, nil
@ -1559,6 +1737,7 @@ func (r *layerStore) Metadata(id string) (string, error) {
return "", ErrLayerUnknown
}
// Requires startWriting.
func (r *layerStore) SetMetadata(id, metadata string) error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to modify layer metadata at %q: %w", r.layerdir, ErrStoreIsReadOnly)
@ -1575,6 +1754,7 @@ func (r *layerStore) tspath(id string) string {
}
// layerHasIncompleteFlag returns true if layer.Flags contains an incompleteFlag set to true
// The caller must hold r.inProcessLock for reading.
func layerHasIncompleteFlag(layer *Layer) bool {
// layer.Flags[…] is defined to succeed and return ok == false if Flags == nil
if flagValue, ok := layer.Flags[incompleteFlag]; ok {
@ -1585,6 +1765,7 @@ func layerHasIncompleteFlag(layer *Layer) bool {
return false
}
// Requires startWriting.
func (r *layerStore) deleteInternal(id string) error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
@ -1655,6 +1836,7 @@ func (r *layerStore) deleteInternal(id string) error {
return nil
}
// Requires startWriting.
func (r *layerStore) deleteInDigestMap(id string) {
for digest, layers := range r.bycompressedsum {
for i, layerID := range layers {
@ -1676,6 +1858,7 @@ func (r *layerStore) deleteInDigestMap(id string) {
}
}
// Requires startWriting.
func (r *layerStore) Delete(id string) error {
layer, ok := r.lookup(id)
if !ok {
@ -1685,17 +1868,13 @@ func (r *layerStore) Delete(id string) error {
// The layer may already have been explicitly unmounted, but if not, we
// should try to clean that up before we start deleting anything at the
// driver level.
mountCount, err := r.Mounted(id)
if err != nil {
return fmt.Errorf("checking if layer %q is still mounted: %w", id, err)
}
for mountCount > 0 {
if _, err := r.Unmount(id, false); err != nil {
return err
for {
_, err := r.unmount(id, false, true)
if err == ErrLayerNotMounted {
break
}
mountCount, err = r.Mounted(id)
if err != nil {
return fmt.Errorf("checking if layer %q is still mounted: %w", id, err)
return err
}
}
if err := r.deleteInternal(id); err != nil {
@ -1704,11 +1883,13 @@ func (r *layerStore) Delete(id string) error {
return r.saveFor(layer)
}
// Requires startReading or startWriting.
func (r *layerStore) Exists(id string) bool {
_, ok := r.lookup(id)
return ok
}
// Requires startReading or startWriting.
func (r *layerStore) Get(id string) (*Layer, error) {
if layer, ok := r.lookup(id); ok {
return copyLayer(layer), nil
@ -1716,6 +1897,7 @@ func (r *layerStore) Get(id string) (*Layer, error) {
return nil, ErrLayerUnknown
}
// Requires startWriting.
func (r *layerStore) Wipe() error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
@ -1735,6 +1917,7 @@ func (r *layerStore) Wipe() error {
return nil
}
// Requires startReading or startWriting.
func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID string, fromLayer, toLayer *Layer, err error) {
var ok bool
toLayer, ok = r.lookup(to)
@ -1759,6 +1942,7 @@ func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID st
return from, to, fromLayer, toLayer, nil
}
// The caller must hold r.inProcessLock for reading.
func (r *layerStore) layerMappings(layer *Layer) *idtools.IDMappings {
if layer == nil {
return &idtools.IDMappings{}
@ -1766,6 +1950,7 @@ func (r *layerStore) layerMappings(layer *Layer) *idtools.IDMappings {
return idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap)
}
// Requires startReading or startWriting.
func (r *layerStore) Changes(from, to string) ([]archive.Change, error) {
from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to)
if err != nil {
@ -1784,11 +1969,13 @@ func (s *simpleGetCloser) Get(path string) (io.ReadCloser, error) {
return os.Open(filepath.Join(s.path, path))
}
// LOCKING BUG: See the comments in layerStore.Diff
func (s *simpleGetCloser) Close() error {
_, err := s.r.Unmount(s.id, false)
_, err := s.r.unmount(s.id, false, false)
return err
}
// LOCKING BUG: See the comments in layerStore.Diff
func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) {
if getter, ok := r.driver.(drivers.DiffGetterDriver); ok {
return getter.DiffGetter(id)
@ -1822,6 +2009,7 @@ func writeCompressedDataGoroutine(pwriter *io.PipeWriter, compressor io.WriteClo
err = writeCompressedData(compressor, source)
}
// Requires startReading or startWriting.
func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) {
var metadata storage.Unpacker
@ -1933,6 +2121,8 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
metadata = storage.NewJSONUnpacker(decompressor)
// LOCKING BUG: With btrfs and zfs graph drivers), this uses r.Mount() and r.unmount() holding layerStore only locked for reading
// but they modify in-memory state.
fgetter, err := r.newFileGetter(to)
if err != nil {
errs := multierror.Append(nil, fmt.Errorf("creating file-getter: %w", err))
@ -1968,6 +2158,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
return maybeCompressReadCloser(rc)
}
// Requires startReading or startWriting.
func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
var fromLayer, toLayer *Layer
from, to, fromLayer, toLayer, err = r.findParentAndLayer(from, to)
@ -1977,10 +2168,12 @@ func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
return r.driver.DiffSize(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel)
}
// Requires startWriting.
func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) {
return r.applyDiffWithOptions(to, nil, diff)
}
// Requires startWriting.
func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (size int64, err error) {
if !r.lockfile.IsReadWrite() {
return -1, fmt.Errorf("not allowed to modify layer contents at %q: %w", r.layerdir, ErrStoreIsReadOnly)
@ -2127,6 +2320,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
return size, err
}
// Requires (startReading or?) startWriting.
func (r *layerStore) DifferTarget(id string) (string, error) {
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
if !ok {
@ -2139,6 +2333,7 @@ func (r *layerStore) DifferTarget(id string) (string, error) {
return ddriver.DifferTarget(layer.ID)
}
// Requires startWriting.
func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error {
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
if !ok {
@ -2177,6 +2372,7 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
return err
}
// Requires startWriting.
func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
if !ok {
@ -2216,6 +2412,7 @@ func (r *layerStore) CleanupStagingDirectory(stagingDirectory string) error {
return ddriver.CleanupStagingDirectory(stagingDirectory)
}
// Requires startReading or startWriting.
func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Digest) ([]Layer, error) {
var layers []Layer
for _, layerID := range m[d] {
@ -2228,10 +2425,12 @@ func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Di
return layers, nil
}
// Requires startReading or startWriting.
func (r *layerStore) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) {
return r.layersByDigestMap(r.bycompressedsum, d)
}
// Requires startReading or startWriting.
func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
return r.layersByDigestMap(r.byuncompressedsum, d)
}