Add initial version of renumber backend

Renumber is a way of renumbering container locks after the number
of locks available has changed.

For now, renumber only works with containers.

Signed-off-by: Matthew Heon <matthew.heon@pm.me>
This commit is contained in:
Matthew Heon
2019-02-14 17:25:58 -05:00
parent 84feff2e06
commit 7fdd20ae5a
9 changed files with 150 additions and 7 deletions

View File

@ -89,3 +89,12 @@ func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
return m.locks[id], nil
}
// FreeAllLocks frees all locks
func (m *InMemoryManager) FreeAllLocks() error {
for _, lock := range m.locks {
lock.allocated = false
}
return nil
}

View File

@ -24,6 +24,9 @@ type Manager interface {
// The underlying lock MUST be the same as another other lock with the
// same UUID.
RetrieveLock(id uint32) (Locker, error)
// FreeAllLocks frees all allocated locks, in preparation for lock
// reallocation.
FreeAllLocks() error
}
// Locker is similar to sync.Locker, but provides a method for freeing the lock

View File

@ -407,6 +407,36 @@ int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
return 0;
}
// Deallocate all semaphores unconditionally.
// Returns negative ERRNO values.
int32_t deallocate_all_semaphores(shm_struct_t *shm) {
int ret_code;
uint i;
if (shm == NULL) {
return -1 * EINVAL;
}
// Lock the mutex controlling access to our shared memory
ret_code = take_mutex(&(shm->segment_lock));
if (ret_code != 0) {
return -1 * ret_code;
}
// Iterate through all bitmaps and reset to unused
for (i = 0; i < shm->num_bitmaps; i++) {
shm->locks[i].bitmap = 0;
}
// Unlock the allocation control mutex
ret_code = release_mutex(&(shm->segment_lock));
if (ret_code != 0) {
return -1 * ret_code;
}
return 0;
}
// Lock a given semaphore
// Does not check if the semaphore is allocated - this ensures that, even for
// removed containers, we can still successfully lock to check status (and

View File

@ -155,6 +155,22 @@ func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
return nil
}
// DeallocateAllSemaphores frees all semaphores so they can be reallocated to
// other containers and pods.
func (locks *SHMLocks) DeallocateAllSemaphores() error {
if !locks.valid {
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
}
retCode := C.deallocate_all_semaphores(locks.lockStruct)
if retCode < 0 {
// Negative errno return from C
return syscall.Errno(-1 * retCode)
}
return nil
}
// LockSemaphore locks the given semaphore.
// If the semaphore is already locked, LockSemaphore will block until the lock
// can be acquired.

View File

@ -40,6 +40,7 @@ shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code);
int32_t close_lock_shm(shm_struct_t *shm);
int64_t allocate_semaphore(shm_struct_t *shm);
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t deallocate_all_semaphores(shm_struct_t *shm);
int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index);

View File

@ -4,7 +4,6 @@ import (
"fmt"
"os"
"runtime"
"syscall"
"testing"
"time"
@ -53,12 +52,8 @@ func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) {
}
defer func() {
// Deallocate all locks
// Ignore ENOENT (lock is not allocated)
var i uint32
for i = 0; i < numLocks; i++ {
if err := locks.DeallocateSemaphore(i); err != nil && err != syscall.ENOENT {
t.Fatalf("Error deallocating semaphore %d: %v", i, err)
}
if err := locks.DeallocateAllSemaphores(); err != nil {
t.Fatalf("Error deallocating semaphores: %v", err)
}
if err := locks.Close(); err != nil {
@ -212,6 +207,25 @@ func TestAllocateDeallocateCycle(t *testing.T) {
})
}
// Test that DeallocateAllSemaphores deallocates all semaphores
func TestDeallocateAllSemaphoresDeallocatesAll(t *testing.T) {
runLockTest(t, func(t *testing.T, locks *SHMLocks) {
// Allocate a lock
locks1, err := locks.AllocateSemaphore()
assert.NoError(t, err)
// Free all locks
err = locks.DeallocateAllSemaphores()
assert.NoError(t, err)
// Allocate another lock
locks2, err := locks.AllocateSemaphore()
assert.NoError(t, err)
assert.Equal(t, locks1, locks2)
})
}
// Test that locks actually lock
func TestLockSemaphoreActuallyLocks(t *testing.T) {
runLockTest(t, func(t *testing.T, locks *SHMLocks) {

View File

@ -71,6 +71,11 @@ func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) {
return lock, nil
}
// FreeAllLocks frees all locks in the manager
func (m *SHMLockManager) FreeAllLocks() error {
return m.locks.DeallocateAllSemaphores()
}
// SHMLock is an individual shared memory lock.
type SHMLock struct {
lockID uint32

View File

@ -27,3 +27,8 @@ func (m *SHMLockManager) AllocateLock() (Locker, error) {
func (m *SHMLockManager) RetrieveLock(id string) (Locker, error) {
return nil, fmt.Errorf("not supported")
}
// FreeAllLocks is not supported on this platform
func (m *SHMLockManager) FreeAllLocks() error {
return fmt.Errorf("not supported")
}

View File

@ -0,0 +1,60 @@
package libpod
import (
"path/filepath"
"github.com/containers/storage"
"github.com/pkg/errors"
)
// RenumberLocks reassigns lock numbers for all containers, pods, and volumes in
// the state.
// It renders the runtime it is called on, and all container/pod/volume structs
// from that runtime, unusable, and requires that a new runtime be initialized
// after it is called.
func (r *Runtime) RenumberLocks() error {
r.lock.Lock()
locked := true
defer func() {
if locked {
r.lock.Unlock()
}
}()
runtimeAliveLock := filepath.Join(r.config.TmpDir, "alive.lck")
aliveLock, err := storage.GetLockfile(runtimeAliveLock)
if err != nil {
return errors.Wrapf(err, "error acquiring runtime init lock")
}
aliveLock.Lock()
// It's OK to defer until Shutdown() has run, so no need to check locked
defer aliveLock.Unlock()
// Start off by deallocating all locks
if err := r.lockManager.FreeAllLocks(); err != nil {
return err
}
allCtrs, err := r.state.AllContainers()
if err != nil {
return err
}
for _, ctr := range allCtrs {
lock, err := r.lockManager.AllocateLock()
if err != nil {
return errors.Wrapf(err, "error allocating lock for container %s", ctr.ID())
}
ctr.config.LockID = lock.ID()
// Write the new lock ID
if err := r.state.RewriteContainerConfig(ctr, ctr.config); err != nil {
return err
}
}
r.lock.Unlock()
locked = false
return r.Shutdown(false)
}