mirror of
https://github.com/containers/podman.git
synced 2025-07-04 18:27:33 +08:00
Remove runtime lockDir and add in-memory lock manager
Remove runtime's lockDir as it is no longer needed after the lock rework. Add a trivial in-memory lock manager for unit testing Signed-off-by: Matthew Heon <matthew.heon@gmail.com>
This commit is contained in:

committed by
Matthew Heon

parent
d4b2f11601
commit
35361595f3
@ -3,7 +3,6 @@ package libpod
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -351,8 +350,7 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get the lock
|
// Get the lock
|
||||||
lockPath := filepath.Join(s.runtime.lockDir, string(name))
|
lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID)
|
||||||
lock, err := storage.GetLockfile(lockPath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error retrieving lockfile for volume %s", string(name))
|
return errors.Wrapf(err, "error retrieving lockfile for volume %s", string(name))
|
||||||
}
|
}
|
||||||
|
91
libpod/lock/in_memory_locks.go
Normal file
91
libpod/lock/in_memory_locks.go
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
package lock
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mutex holds a single mutex and whether it has been allocated.
|
||||||
|
type Mutex struct {
|
||||||
|
id uint32
|
||||||
|
lock sync.Mutex
|
||||||
|
allocated bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID retrieves the ID of the mutex
|
||||||
|
func (m *Mutex) ID() uint32 {
|
||||||
|
return m.id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock locks the mutex
|
||||||
|
func (m *Mutex) Lock() {
|
||||||
|
m.lock.Lock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlock unlocks the mutex
|
||||||
|
func (m *Mutex) Unlock() {
|
||||||
|
m.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free deallocates the mutex to allow its reuse
|
||||||
|
func (m *Mutex) Free() error {
|
||||||
|
m.allocated = false
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InMemoryManager is a lock manager that allocates and retrieves local-only
|
||||||
|
// locks - that is, they are not multiprocess. This lock manager is intended
|
||||||
|
// purely for unit and integration testing and should not be used in production
|
||||||
|
// deployments.
|
||||||
|
type InMemoryManager struct {
|
||||||
|
locks []*Mutex
|
||||||
|
numLocks uint32
|
||||||
|
localLock sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewInMemoryManager creates a new in-memory lock manager with the given number
|
||||||
|
// of locks.
|
||||||
|
func NewInMemoryManager(numLocks uint32) (Manager, error) {
|
||||||
|
if numLocks == 0 {
|
||||||
|
return nil, errors.Errorf("must provide a non-zero number of locks!")
|
||||||
|
}
|
||||||
|
|
||||||
|
manager := new(InMemoryManager)
|
||||||
|
manager.numLocks = numLocks
|
||||||
|
manager.locks = make([]*Mutex, numLocks)
|
||||||
|
|
||||||
|
var i uint32
|
||||||
|
for i = 0; i < numLocks; i++ {
|
||||||
|
lock := new(Mutex)
|
||||||
|
lock.id = i
|
||||||
|
manager.locks[i] = lock
|
||||||
|
}
|
||||||
|
|
||||||
|
return manager, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllocateLock allocates a lock from the manager.
|
||||||
|
func (m *InMemoryManager) AllocateLock() (Locker, error) {
|
||||||
|
m.localLock.Lock()
|
||||||
|
defer m.localLock.Unlock()
|
||||||
|
|
||||||
|
for _, lock := range m.locks {
|
||||||
|
if !lock.allocated {
|
||||||
|
lock.allocated = true
|
||||||
|
return lock, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.Errorf("all locks have been allocated")
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetrieveLock retrieves a lock from the manager.
|
||||||
|
func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
|
||||||
|
if id >= m.numLocks {
|
||||||
|
return nil, errors.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.locks[id], nil
|
||||||
|
}
|
@ -12,6 +12,8 @@ type SHMLockManager struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewSHMLockManager makes a new SHMLockManager with the given number of locks.
|
// NewSHMLockManager makes a new SHMLockManager with the given number of locks.
|
||||||
|
// Due to the underlying implementation, the exact number of locks created may
|
||||||
|
// be greater than the number given here.
|
||||||
func NewSHMLockManager(path string, numLocks uint32) (Manager, error) {
|
func NewSHMLockManager(path string, numLocks uint32) (Manager, error) {
|
||||||
locks, err := shm.CreateSHMLock(path, numLocks)
|
locks, err := shm.CreateSHMLock(path, numLocks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -13,7 +13,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Creates a new, empty pod
|
// Creates a new, empty pod
|
||||||
func newPod(lockDir string, runtime *Runtime) (*Pod, error) {
|
func newPod(runtime *Runtime) (*Pod, error) {
|
||||||
pod := new(Pod)
|
pod := new(Pod)
|
||||||
pod.config = new(PodConfig)
|
pod.config = new(PodConfig)
|
||||||
pod.config.ID = stringid.GenerateNonCryptoID()
|
pod.config.ID = stringid.GenerateNonCryptoID()
|
||||||
|
@ -84,7 +84,6 @@ type Runtime struct {
|
|||||||
storageService *storageService
|
storageService *storageService
|
||||||
imageContext *types.SystemContext
|
imageContext *types.SystemContext
|
||||||
ociRuntime *OCIRuntime
|
ociRuntime *OCIRuntime
|
||||||
lockDir string
|
|
||||||
netPlugin ocicni.CNIPlugin
|
netPlugin ocicni.CNIPlugin
|
||||||
ociRuntimePath string
|
ociRuntimePath string
|
||||||
conmonPath string
|
conmonPath string
|
||||||
@ -679,17 +678,6 @@ func makeRuntime(runtime *Runtime) (err error) {
|
|||||||
}
|
}
|
||||||
runtime.ociRuntime = ociRuntime
|
runtime.ociRuntime = ociRuntime
|
||||||
|
|
||||||
// Make a directory to hold container lockfiles
|
|
||||||
lockDir := filepath.Join(runtime.config.TmpDir, "lock")
|
|
||||||
if err := os.MkdirAll(lockDir, 0755); err != nil {
|
|
||||||
// The directory is allowed to exist
|
|
||||||
if !os.IsExist(err) {
|
|
||||||
return errors.Wrapf(err, "error creating runtime lockfiles directory %s",
|
|
||||||
lockDir)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
runtime.lockDir = lockDir
|
|
||||||
|
|
||||||
// Make the per-boot files directory if it does not exist
|
// Make the per-boot files directory if it does not exist
|
||||||
if err := os.MkdirAll(runtime.config.TmpDir, 0755); err != nil {
|
if err := os.MkdirAll(runtime.config.TmpDir, 0755); err != nil {
|
||||||
// The directory is allowed to exist
|
// The directory is allowed to exist
|
||||||
@ -732,6 +720,7 @@ func makeRuntime(runtime *Runtime) (err error) {
|
|||||||
if err2 := runtime.refresh(runtimeAliveFile); err2 != nil {
|
if err2 := runtime.refresh(runtimeAliveFile); err2 != nil {
|
||||||
return err2
|
return err2
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod,
|
|||||||
return nil, ErrRuntimeStopped
|
return nil, ErrRuntimeStopped
|
||||||
}
|
}
|
||||||
|
|
||||||
pod, err := newPod(r.lockDir, r)
|
pod, err := newPod(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error creating pod")
|
return nil, errors.Wrapf(err, "error creating pod")
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,6 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/containers/storage"
|
|
||||||
"github.com/containers/storage/pkg/stringid"
|
"github.com/containers/storage/pkg/stringid"
|
||||||
"github.com/opencontainers/selinux/go-selinux/label"
|
"github.com/opencontainers/selinux/go-selinux/label"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -68,14 +67,12 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
|
|||||||
}
|
}
|
||||||
volume.config.MountPoint = fullVolPath
|
volume.config.MountPoint = fullVolPath
|
||||||
|
|
||||||
// Path our lock file will reside at
|
lock, err := r.lockManager.AllocateLock()
|
||||||
lockPath := filepath.Join(r.lockDir, volume.config.Name)
|
|
||||||
// Grab a lockfile at the given path
|
|
||||||
lock, err := storage.GetLockfile(lockPath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error creating lockfile for new volume")
|
return nil, errors.Wrapf(err, "error allocating lock for new volume")
|
||||||
}
|
}
|
||||||
volume.lock = lock
|
volume.lock = lock
|
||||||
|
volume.config.LockID = volume.lock.ID()
|
||||||
|
|
||||||
volume.valid = true
|
volume.valid = true
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
package libpod
|
package libpod
|
||||||
|
|
||||||
import "github.com/containers/storage"
|
import "github.com/containers/libpod/libpod/lock"
|
||||||
|
|
||||||
// Volume is the type used to create named volumes
|
// Volume is the type used to create named volumes
|
||||||
// TODO: all volumes should be created using this and the Volume API
|
// TODO: all volumes should be created using this and the Volume API
|
||||||
@ -9,13 +9,17 @@ type Volume struct {
|
|||||||
|
|
||||||
valid bool
|
valid bool
|
||||||
runtime *Runtime
|
runtime *Runtime
|
||||||
lock storage.Locker
|
lock lock.Locker
|
||||||
}
|
}
|
||||||
|
|
||||||
// VolumeConfig holds the volume's config information
|
// VolumeConfig holds the volume's config information
|
||||||
//easyjson:json
|
//easyjson:json
|
||||||
type VolumeConfig struct {
|
type VolumeConfig struct {
|
||||||
Name string `json:"name"`
|
// Name of the volume
|
||||||
|
Name string `json:"name"`
|
||||||
|
// ID of this volume's lock
|
||||||
|
LockID uint32 `json:"lockID"`
|
||||||
|
|
||||||
Labels map[string]string `json:"labels"`
|
Labels map[string]string `json:"labels"`
|
||||||
MountPoint string `json:"mountPoint"`
|
MountPoint string `json:"mountPoint"`
|
||||||
Driver string `json:"driver"`
|
Driver string `json:"driver"`
|
||||||
|
Reference in New Issue
Block a user