mirror of
https://github.com/containers/podman.git
synced 2025-12-10 15:47:46 +08:00
Remove locks from volumes
I was looking into why we have locks in volumes, and I'm fairly convinced they're unnecessary. We don't have a state whose accesses we need to guard with locks and syncs. The only real purpose for the lock was to prevent concurrent removal of the same volume. Looking at the code, concurrent removal ought to be fine with a bit of reordering - one or the other might fail, but we will successfully evict the volume from the state. Also, remove the 'prune' bool from RemoveVolume. None of our other API functions accept it, and it only served to toggle off more verbose error messages. Signed-off-by: Matthew Heon <matthew.heon@pm.me>
This commit is contained in:
@@ -1369,10 +1369,6 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
|
|||||||
return ErrDBClosed
|
return ErrDBClosed
|
||||||
}
|
}
|
||||||
|
|
||||||
if !volume.valid {
|
|
||||||
return ErrVolumeRemoved
|
|
||||||
}
|
|
||||||
|
|
||||||
volName := []byte(volume.Name())
|
volName := []byte(volume.Name())
|
||||||
|
|
||||||
db, err := s.getDBCon()
|
db, err := s.getDBCon()
|
||||||
|
|||||||
@@ -348,13 +348,6 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
|
|||||||
return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name))
|
return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the lock
|
|
||||||
lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "error retrieving lockfile for volume %s", string(name))
|
|
||||||
}
|
|
||||||
volume.lock = lock
|
|
||||||
|
|
||||||
volume.runtime = s.runtime
|
volume.runtime = s.runtime
|
||||||
volume.valid = true
|
volume.valid = true
|
||||||
|
|
||||||
|
|||||||
@@ -421,7 +421,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
|
|||||||
|
|
||||||
for _, v := range volumes {
|
for _, v := range volumes {
|
||||||
if volume, err := runtime.state.Volume(v); err == nil {
|
if volume, err := runtime.state.Volume(v); err == nil {
|
||||||
if err := runtime.removeVolume(ctx, volume, false, true); err != nil && err != ErrNoSuchVolume && err != ErrVolumeBeingUsed {
|
if err := runtime.removeVolume(ctx, volume, false); err != nil && err != ErrNoSuchVolume && err != ErrVolumeBeingUsed {
|
||||||
logrus.Errorf("cleanup volume (%s): %v", v, err)
|
logrus.Errorf("cleanup volume (%s): %v", v, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,6 +12,12 @@ import (
|
|||||||
// It renders the runtime it is called on, and all container/pod/volume structs
|
// It renders the runtime it is called on, and all container/pod/volume structs
|
||||||
// from that runtime, unusable, and requires that a new runtime be initialized
|
// from that runtime, unusable, and requires that a new runtime be initialized
|
||||||
// after it is called.
|
// after it is called.
|
||||||
|
// TODO: It would be desirable to make it impossible to call this until all
|
||||||
|
// other libpod sessions are dead.
|
||||||
|
// Possibly use a read-write file lock, with all non-renumber podmans owning the
|
||||||
|
// lock as read, renumber attempting to take a write lock?
|
||||||
|
// The alternative is some sort of session tracking, and I don't know how
|
||||||
|
// reliable that can be.
|
||||||
func (r *Runtime) RenumberLocks() error {
|
func (r *Runtime) RenumberLocks() error {
|
||||||
r.lock.Lock()
|
r.lock.Lock()
|
||||||
locked := true
|
locked := true
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ type VolumeCreateOption func(*Volume) error
|
|||||||
type VolumeFilter func(*Volume) bool
|
type VolumeFilter func(*Volume) bool
|
||||||
|
|
||||||
// RemoveVolume removes a volumes
|
// RemoveVolume removes a volumes
|
||||||
func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force, prune bool) error {
|
func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool) error {
|
||||||
r.lock.Lock()
|
r.lock.Lock()
|
||||||
defer r.lock.Unlock()
|
defer r.lock.Unlock()
|
||||||
|
|
||||||
@@ -35,10 +35,7 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force, prune bool
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
v.lock.Lock()
|
return r.removeVolume(ctx, v, force)
|
||||||
defer v.lock.Unlock()
|
|
||||||
|
|
||||||
return r.removeVolume(ctx, v, force, prune)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveVolumes removes a slice of volumes or all with a force bool
|
// RemoveVolumes removes a slice of volumes or all with a force bool
|
||||||
@@ -64,7 +61,7 @@ func (r *Runtime) RemoveVolumes(ctx context.Context, volumes []string, all, forc
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, vol := range vols {
|
for _, vol := range vols {
|
||||||
if err := r.RemoveVolume(ctx, vol, force, false); err != nil {
|
if err := r.RemoveVolume(ctx, vol, force); err != nil {
|
||||||
return deletedVols, err
|
return deletedVols, err
|
||||||
}
|
}
|
||||||
logrus.Debugf("removed volume %s", vol.Name())
|
logrus.Debugf("removed volume %s", vol.Name())
|
||||||
@@ -168,8 +165,8 @@ func (r *Runtime) PruneVolumes(ctx context.Context) ([]string, []error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, vol := range vols {
|
for _, vol := range vols {
|
||||||
if err := r.RemoveVolume(ctx, vol, false, true); err != nil {
|
if err := r.RemoveVolume(ctx, vol, false); err != nil {
|
||||||
if err != ErrVolumeBeingUsed {
|
if errors.Cause(err) != ErrVolumeBeingUsed && errors.Cause(err) != ErrVolumeRemoved {
|
||||||
pruneErrors = append(pruneErrors, err)
|
pruneErrors = append(pruneErrors, err)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -67,13 +67,6 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
|
|||||||
}
|
}
|
||||||
volume.config.MountPoint = fullVolPath
|
volume.config.MountPoint = fullVolPath
|
||||||
|
|
||||||
lock, err := r.lockManager.AllocateLock()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "error allocating lock for new volume")
|
|
||||||
}
|
|
||||||
volume.lock = lock
|
|
||||||
volume.config.LockID = volume.lock.ID()
|
|
||||||
|
|
||||||
volume.valid = true
|
volume.valid = true
|
||||||
|
|
||||||
// Add the volume to state
|
// Add the volume to state
|
||||||
@@ -85,9 +78,12 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// removeVolume removes the specified volume from state as well tears down its mountpoint and storage
|
// removeVolume removes the specified volume from state as well tears down its mountpoint and storage
|
||||||
func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool) error {
|
func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error {
|
||||||
if !v.valid {
|
if !v.valid {
|
||||||
return ErrNoSuchVolume
|
if ok, _ := r.state.HasVolume(v.Name()); !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return ErrVolumeRemoved
|
||||||
}
|
}
|
||||||
|
|
||||||
deps, err := r.state.VolumeInUse(v)
|
deps, err := r.state.VolumeInUse(v)
|
||||||
@@ -95,9 +91,6 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(deps) != 0 {
|
if len(deps) != 0 {
|
||||||
if prune {
|
|
||||||
return ErrVolumeBeingUsed
|
|
||||||
}
|
|
||||||
depsStr := strings.Join(deps, ", ")
|
depsStr := strings.Join(deps, ", ")
|
||||||
if !force {
|
if !force {
|
||||||
return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
|
return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
|
||||||
@@ -112,18 +105,20 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
|
// Set volume as invalid so it can no longer be used
|
||||||
if err := v.teardownStorage(); err != nil {
|
v.valid = false
|
||||||
return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the volume from the state
|
// Remove the volume from the state
|
||||||
if err := r.state.RemoveVolume(v); err != nil {
|
if err := r.state.RemoveVolume(v); err != nil {
|
||||||
return errors.Wrapf(err, "error removing volume %s", v.Name())
|
return errors.Wrapf(err, "error removing volume %s", v.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set volume as invalid so it can no longer be used
|
// Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
|
||||||
v.valid = false
|
if err := v.teardownStorage(); err != nil {
|
||||||
|
return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
logrus.Debugf("Removed volume %s", v.Name())
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,5 @@
|
|||||||
package libpod
|
package libpod
|
||||||
|
|
||||||
import "github.com/containers/libpod/libpod/lock"
|
|
||||||
|
|
||||||
// Volume is the type used to create named volumes
|
// Volume is the type used to create named volumes
|
||||||
// TODO: all volumes should be created using this and the Volume API
|
// TODO: all volumes should be created using this and the Volume API
|
||||||
type Volume struct {
|
type Volume struct {
|
||||||
@@ -9,7 +7,6 @@ type Volume struct {
|
|||||||
|
|
||||||
valid bool
|
valid bool
|
||||||
runtime *Runtime
|
runtime *Runtime
|
||||||
lock lock.Locker
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// VolumeConfig holds the volume's config information
|
// VolumeConfig holds the volume's config information
|
||||||
@@ -17,8 +14,6 @@ type Volume struct {
|
|||||||
type VolumeConfig struct {
|
type VolumeConfig struct {
|
||||||
// Name of the volume
|
// Name of the volume
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
// ID of this volume's lock
|
|
||||||
LockID uint32 `json:"lockID"`
|
|
||||||
|
|
||||||
Labels map[string]string `json:"labels"`
|
Labels map[string]string `json:"labels"`
|
||||||
MountPoint string `json:"mountPoint"`
|
MountPoint string `json:"mountPoint"`
|
||||||
|
|||||||
@@ -18,8 +18,5 @@ func newVolume(runtime *Runtime) (*Volume, error) {
|
|||||||
|
|
||||||
// teardownStorage deletes the volume from volumePath
|
// teardownStorage deletes the volume from volumePath
|
||||||
func (v *Volume) teardownStorage() error {
|
func (v *Volume) teardownStorage() error {
|
||||||
if !v.valid {
|
|
||||||
return ErrNoSuchVolume
|
|
||||||
}
|
|
||||||
return os.RemoveAll(filepath.Join(v.runtime.config.VolumePath, v.Name()))
|
return os.RemoveAll(filepath.Join(v.runtime.config.VolumePath, v.Name()))
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user