Move to POSIX mutexes for SHM locks

Signed-off-by: Matthew Heon <matthew.heon@gmail.com>
This commit is contained in:
Matthew Heon
2018-08-10 13:46:07 -04:00
committed by Matthew Heon
parent f38fccb48c
commit e73484c176
6 changed files with 271 additions and 171 deletions

View File

@ -23,7 +23,7 @@ type Manager interface {
// RetrieveLock retrieves a lock given its UUID. // RetrieveLock retrieves a lock given its UUID.
// The underlying lock MUST be the same as another other lock with the // The underlying lock MUST be the same as another other lock with the
// same UUID. // same UUID.
RetrieveLock(id string) (Locker, error) RetrieveLock(id uint32) (Locker, error)
} }
// Locker is similar to sync.Locker, but provides a method for freeing the lock // Locker is similar to sync.Locker, but provides a method for freeing the lock
@ -37,7 +37,7 @@ type Locker interface {
// ID is guaranteed to uniquely identify the lock within the // ID is guaranteed to uniquely identify the lock within the
// Manager - that is, calling RetrieveLock with this ID will return // Manager - that is, calling RetrieveLock with this ID will return
// another instance of the same lock. // another instance of the same lock.
ID() string ID() uint32
// Lock locks the lock. // Lock locks the lock.
// This call MUST block until it successfully acquires the lock or // This call MUST block until it successfully acquires the lock or
// encounters a fatal error. // encounters a fatal error.
@ -46,8 +46,8 @@ type Locker interface {
// A call to Unlock() on a lock that is already unlocked lock MUST // A call to Unlock() on a lock that is already unlocked lock MUST
// error. // error.
Unlock() error Unlock() error
// Deallocate deallocates the underlying lock, allowing its reuse by // Free deallocates the underlying lock, allowing its reuse by other
// other pods and containers. // pods and containers.
// The lock MUST still be usable after a Free() - some libpod instances // The lock MUST still be usable after a Free() - some libpod instances
// may still retain Container structs with the old lock. This simply // may still retain Container structs with the old lock. This simply
// advises the manager that the lock may be reallocated. // advises the manager that the lock may be reallocated.

View File

@ -1,6 +1,6 @@
#include <errno.h> #include <errno.h>
#include <fcntl.h> #include <fcntl.h>
#include <semaphore.h> #include <pthread.h>
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h> #include <stdint.h>
#include <stdlib.h> #include <stdlib.h>
@ -12,19 +12,67 @@
#include "shm_lock.h" #include "shm_lock.h"
// Compute the size of the SHM struct // Compute the size of the SHM struct
size_t compute_shm_size(uint32_t num_bitmaps) { static size_t compute_shm_size(uint32_t num_bitmaps) {
return sizeof(shm_struct_t) + (num_bitmaps * sizeof(lock_group_t)); return sizeof(shm_struct_t) + (num_bitmaps * sizeof(lock_group_t));
} }
// Take the given mutex.
// Handles exceptional conditions, including a mutex locked by a process that
// died holding it.
// Returns 0 on success, or positive errno on failure.
static int take_mutex(pthread_mutex_t *mutex) {
int ret_code;
do {
ret_code = pthread_mutex_lock(mutex);
} while(ret_code == EAGAIN);
if (ret_code == EOWNERDEAD) {
// The previous owner of the mutex died while holding it
// Take it for ourselves
ret_code = pthread_mutex_consistent(mutex);
if (ret_code != 0) {
// Someone else may have gotten here first and marked the state consistent
// However, the mutex could also be invalid.
// Fail here instead of looping back to trying to lock the mutex.
return ret_code;
}
} else if (ret_code != 0) {
return ret_code;
}
return 0;
}
// Release the given mutex.
// Returns 0 on success, or positive errno on failure.
static int release_mutex(pthread_mutex_t *mutex) {
int ret_code;
do {
ret_code = pthread_mutex_unlock(mutex);
} while(ret_code == EAGAIN);
if (ret_code != 0) {
return ret_code;
}
return 0;
}
// Set up an SHM segment holding locks for libpod. // Set up an SHM segment holding locks for libpod.
// num_locks must be a multiple of BITMAP_SIZE (32 by default). // num_locks must not be 0.
// Path is the path to the SHM segment. It must begin with a single / and
// container no other / characters, and be at most 255 characters including
// terminating NULL byte.
// Returns a valid pointer on success or NULL on error. // Returns a valid pointer on success or NULL on error.
// If an error occurs, it will be written to the int pointed to by error_code. // If an error occurs, negative ERRNO values will be written to error_code.
shm_struct_t *setup_lock_shm(uint32_t num_locks, int *error_code) { shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code) {
int shm_fd, i, j, ret_code; int shm_fd, i, j, ret_code;
uint32_t num_bitmaps; uint32_t num_bitmaps;
size_t shm_size; size_t shm_size;
shm_struct_t *shm; shm_struct_t *shm;
pthread_mutexattr_t attr;
// If error_code doesn't point to anything, we can't reasonably return errors // If error_code doesn't point to anything, we can't reasonably return errors
// So fail immediately // So fail immediately
@ -34,67 +82,93 @@ shm_struct_t *setup_lock_shm(uint32_t num_locks, int *error_code) {
// We need a nonzero number of locks // We need a nonzero number of locks
if (num_locks == 0) { if (num_locks == 0) {
*error_code = EINVAL; *error_code = -1 * EINVAL;
return NULL;
}
if (path == NULL) {
*error_code = -1 * EINVAL;
return NULL; return NULL;
} }
// Calculate the number of bitmaps required // Calculate the number of bitmaps required
if (num_locks % BITMAP_SIZE != 0) {
// Number of locks not a multiple of BITMAP_SIZE
*error_code = EINVAL;
return NULL;
}
num_bitmaps = num_locks / BITMAP_SIZE; num_bitmaps = num_locks / BITMAP_SIZE;
if (num_locks % BITMAP_SIZE != 0) {
// The actual number given is not an even multiple of our bitmap size
// So round up
num_bitmaps += 1;
}
// Calculate size of the shm segment // Calculate size of the shm segment
shm_size = compute_shm_size(num_bitmaps); shm_size = compute_shm_size(num_bitmaps);
// Create a new SHM segment for us // Create a new SHM segment for us
shm_fd = shm_open(SHM_NAME, O_RDWR | O_CREAT | O_EXCL, 0600); shm_fd = shm_open(path, O_RDWR | O_CREAT | O_EXCL, 0600);
if (shm_fd < 0) { if (shm_fd < 0) {
*error_code = errno; *error_code = -1 * errno;
return NULL; return NULL;
} }
// Increase its size to what we need // Increase its size to what we need
ret_code = ftruncate(shm_fd, shm_size); ret_code = ftruncate(shm_fd, shm_size);
if (ret_code < 0) { if (ret_code < 0) {
*error_code = errno; *error_code = -1 * errno;
goto CLEANUP_UNLINK; goto CLEANUP_UNLINK;
} }
// Map the shared memory in // Map the shared memory in
shm = mmap(NULL, shm_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0); shm = mmap(NULL, shm_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
if (shm == MAP_FAILED) { if (shm == MAP_FAILED) {
*error_code = errno; *error_code = -1 * errno;
goto CLEANUP_UNLINK; goto CLEANUP_UNLINK;
} }
// We have successfully mapped the memory, now initialize the region // We have successfully mapped the memory, now initialize the region
shm->magic = MAGIC; shm->magic = MAGIC;
shm->num_locks = num_locks; shm->unused = 0;
shm->num_locks = num_bitmaps * BITMAP_SIZE;
shm->num_bitmaps = num_bitmaps; shm->num_bitmaps = num_bitmaps;
// Initialize the semaphore that protects the bitmaps. // Create an initializer for our pthread mutexes
// Initialize to value 1, as we're a mutex, and set pshared as this will be ret_code = pthread_mutexattr_init(&attr);
// shared between processes in an SHM. if (ret_code != 0) {
ret_code = sem_init(&(shm->segment_lock), true, 1); *error_code = -1 * ret_code;
if (ret_code < 0) {
*error_code = errno;
goto CLEANUP_UNMAP; goto CLEANUP_UNMAP;
} }
// Set mutexes to pshared - multiprocess-safe
ret_code = pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
if (ret_code != 0) {
*error_code = -1 * ret_code;
goto CLEANUP_FREEATTR;
}
// Set mutexes to robust - if a process dies while holding a mutex, we'll get
// a special error code on the next attempt to lock it.
// This should prevent panicing processes from leaving the state unusable.
ret_code = pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST);
if (ret_code != 0) {
*error_code = -1 * ret_code;
goto CLEANUP_FREEATTR;
}
// Initialize the mutex that protects the bitmaps using the mutex attributes
ret_code = pthread_mutex_init(&(shm->segment_lock), &attr);
if (ret_code != 0) {
*error_code = -1 * ret_code;
goto CLEANUP_FREEATTR;
}
// Initialize all bitmaps to 0 initially // Initialize all bitmaps to 0 initially
// And initialize all semaphores they use // And initialize all semaphores they use
for (i = 0; i < num_bitmaps; i++) { for (i = 0; i < num_bitmaps; i++) {
shm->locks[i].bitmap = 0; shm->locks[i].bitmap = 0;
for (j = 0; j < BITMAP_SIZE; j++) { for (j = 0; j < BITMAP_SIZE; j++) {
// As above, initialize to 1 to act as a mutex, and set pshared as we'll // Initialize each mutex
// be living in an SHM. ret_code = pthread_mutex_init(&(shm->locks[i].locks[j]), &attr);
ret_code = sem_init(&(shm->locks[i].locks[j]), true, 1); if (ret_code != 0) {
if (ret_code < 0) { *error_code = -1 * ret_code;
*error_code = errno; goto CLEANUP_FREEATTR;
goto CLEANUP_UNMAP;
} }
} }
} }
@ -103,23 +177,33 @@ shm_struct_t *setup_lock_shm(uint32_t num_locks, int *error_code) {
// Ignore errors, it's ok if we leak a single FD and this should only run once // Ignore errors, it's ok if we leak a single FD and this should only run once
close(shm_fd); close(shm_fd);
// Destroy the pthread initializer attribute.
// Again, ignore errors, this will only run once and we might leak a tiny bit
// of memory at worst.
pthread_mutexattr_destroy(&attr);
return shm; return shm;
// Cleanup after an error // Cleanup after an error
CLEANUP_FREEATTR:
pthread_mutexattr_destroy(&attr);
CLEANUP_UNMAP: CLEANUP_UNMAP:
munmap(shm, shm_size); munmap(shm, shm_size);
CLEANUP_UNLINK: CLEANUP_UNLINK:
close(shm_fd); close(shm_fd);
shm_unlink(SHM_NAME); shm_unlink(path);
return NULL; return NULL;
} }
// Open an existing SHM segment holding libpod locks. // Open an existing SHM segment holding libpod locks.
// num_locks is the number of locks that will be configured in the SHM segment. // num_locks is the number of locks that will be configured in the SHM segment.
// num_locks must be a multiple of BITMAP_SIZE (32 by default). // num_locks cannot be 0.
// Path is the path to the SHM segment. It must begin with a single / and
// container no other / characters, and be at most 255 characters including
// terminating NULL byte.
// Returns a valid pointer on success or NULL on error. // Returns a valid pointer on success or NULL on error.
// If an error occurs, it will be written to the int pointed to by error_code. // If an error occurs, negative ERRNO values will be written to error_code.
shm_struct_t *open_lock_shm(uint32_t num_locks, int *error_code) { shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code) {
int shm_fd; int shm_fd;
shm_struct_t *shm; shm_struct_t *shm;
size_t shm_size; size_t shm_size;
@ -131,30 +215,34 @@ shm_struct_t *open_lock_shm(uint32_t num_locks, int *error_code) {
// We need a nonzero number of locks // We need a nonzero number of locks
if (num_locks == 0) { if (num_locks == 0) {
*error_code = EINVAL; *error_code = -1 * EINVAL;
return NULL;
}
if (path == NULL) {
*error_code = -1 * EINVAL;
return NULL; return NULL;
} }
// Calculate the number of bitmaps required // Calculate the number of bitmaps required
if (num_locks % BITMAP_SIZE != 0) {
// Number of locks not a multiple of BITMAP_SIZE
*error_code = EINVAL;
return NULL;
}
num_bitmaps = num_locks / BITMAP_SIZE; num_bitmaps = num_locks / BITMAP_SIZE;
if (num_locks % BITMAP_SIZE != 0) {
num_bitmaps += 1;
}
// Calculate size of the shm segment // Calculate size of the shm segment
shm_size = compute_shm_size(num_bitmaps); shm_size = compute_shm_size(num_bitmaps);
shm_fd = shm_open(SHM_NAME, O_RDWR, 0600); shm_fd = shm_open(path, O_RDWR, 0600);
if (shm_fd < 0) { if (shm_fd < 0) {
*error_code = -1 * errno;
return NULL; return NULL;
} }
// Map the shared memory in // Map the shared memory in
shm = mmap(NULL, shm_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0); shm = mmap(NULL, shm_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
if (shm == MAP_FAILED) { if (shm == MAP_FAILED) {
*error_code = errno; *error_code = -1 * errno;
} }
// Ignore errors, it's ok if we leak a single FD since this only runs once // Ignore errors, it's ok if we leak a single FD since this only runs once
@ -167,11 +255,11 @@ shm_struct_t *open_lock_shm(uint32_t num_locks, int *error_code) {
// Need to check the SHM to see if it's actually our locks // Need to check the SHM to see if it's actually our locks
if (shm->magic != MAGIC) { if (shm->magic != MAGIC) {
*error_code = errno; *error_code = -1 * errno;
goto CLEANUP; goto CLEANUP;
} }
if (shm->num_locks != num_locks) { if (shm->num_locks != (num_bitmaps * BITMAP_SIZE)) {
*error_code = errno; *error_code = -1 * errno;
goto CLEANUP; goto CLEANUP;
} }
@ -219,11 +307,9 @@ int64_t allocate_semaphore(shm_struct_t *shm) {
} }
// Lock the semaphore controlling access to our shared memory // Lock the semaphore controlling access to our shared memory
do { ret_code = take_mutex(&(shm->segment_lock));
ret_code = sem_wait(&(shm->segment_lock));
} while(ret_code == EINTR);
if (ret_code != 0) { if (ret_code != 0) {
return -1 * errno; return -1 * ret_code;
} }
// Loop through our bitmaps to search for one that is not full // Loop through our bitmaps to search for one that is not full
@ -237,8 +323,13 @@ int64_t allocate_semaphore(shm_struct_t *shm) {
sem_number = (BITMAP_SIZE * i) + num_within_bitmap; sem_number = (BITMAP_SIZE * i) + num_within_bitmap;
// OR in the bitmap // OR in the bitmap
shm->locks[i].bitmap = shm->locks[i].bitmap | test_map; shm->locks[i].bitmap = shm->locks[i].bitmap | test_map;
// Clear the semaphore
sem_post(&(shm->segment_lock)); // Clear the mutex
ret_code = release_mutex(&(shm->segment_lock));
if (ret_code != 0) {
return -1 * ret_code;
}
// Return the semaphore we've allocated // Return the semaphore we've allocated
return sem_number; return sem_number;
} }
@ -250,8 +341,11 @@ int64_t allocate_semaphore(shm_struct_t *shm) {
} }
} }
// Post to the semaphore to clear the lock // Clear the mutex
sem_post(&(shm->segment_lock)); ret_code = release_mutex(&(shm->segment_lock));
if (ret_code != 0) {
return -1 * ret_code;
}
// All bitmaps are full // All bitmaps are full
// We have no available semaphores, report allocation failure // We have no available semaphores, report allocation failure
@ -282,23 +376,20 @@ int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
return -1 * EFAULT; return -1 * EFAULT;
} }
test_map = 0x1; test_map = 0x1 << index_in_bitmap;
for (i = 0; i < index_in_bitmap; i++) {
test_map = test_map << 1;
}
// Lock the semaphore controlling access to our shared memory // Lock the mutex controlling access to our shared memory
do { ret_code = take_mutex(&(shm->segment_lock));
ret_code = sem_wait(&(shm->segment_lock));
} while(ret_code == EINTR);
if (ret_code != 0) { if (ret_code != 0) {
return -1 * errno; return -1 * ret_code;
} }
// Check if the semaphore is allocated // Check if the semaphore is allocated
if ((test_map & shm->locks[bitmap_index].bitmap) == 0) { if ((test_map & shm->locks[bitmap_index].bitmap) == 0) {
// Post to the semaphore to clear the lock ret_code = release_mutex(&(shm->segment_lock));
sem_post(&(shm->segment_lock)); if (ret_code != 0) {
return -1 * ret_code;
}
return -1 * ENOENT; return -1 * ENOENT;
} }
@ -308,8 +399,10 @@ int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
test_map = ~test_map; test_map = ~test_map;
shm->locks[bitmap_index].bitmap = shm->locks[bitmap_index].bitmap & test_map; shm->locks[bitmap_index].bitmap = shm->locks[bitmap_index].bitmap & test_map;
// Post to the semaphore to clear the lock ret_code = release_mutex(&(shm->segment_lock));
sem_post(&(shm->segment_lock)); if (ret_code != 0) {
return -1 * ret_code;
}
return 0; return 0;
} }
@ -333,15 +426,7 @@ int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index) {
bitmap_index = sem_index / BITMAP_SIZE; bitmap_index = sem_index / BITMAP_SIZE;
index_in_bitmap = sem_index % BITMAP_SIZE; index_in_bitmap = sem_index % BITMAP_SIZE;
// Lock the semaphore controlling access to our shared memory return -1 * take_mutex(&(shm->locks[bitmap_index].locks[index_in_bitmap]));
do {
ret_code = sem_wait(&(shm->locks[bitmap_index].locks[index_in_bitmap]));
} while(ret_code == EINTR);
if (ret_code != 0) {
return -1 * errno;
}
return 0;
} }
// Unlock a given semaphore // Unlock a given semaphore
@ -351,7 +436,6 @@ int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index) {
// Returns 0 on success, -1 on failure // Returns 0 on success, -1 on failure
int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index) { int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index) {
int bitmap_index, index_in_bitmap, ret_code; int bitmap_index, index_in_bitmap, ret_code;
unsigned int sem_value = 0;
if (shm == NULL) { if (shm == NULL) {
return -1 * EINVAL; return -1 * EINVAL;
@ -364,20 +448,5 @@ int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index) {
bitmap_index = sem_index / BITMAP_SIZE; bitmap_index = sem_index / BITMAP_SIZE;
index_in_bitmap = sem_index % BITMAP_SIZE; index_in_bitmap = sem_index % BITMAP_SIZE;
// Only allow a post if the semaphore is less than 1 (locked) return -1 * release_mutex(&(shm->locks[bitmap_index].locks[index_in_bitmap]));
// This allows us to preserve mutex behavior
ret_code = sem_getvalue(&(shm->locks[bitmap_index].locks[index_in_bitmap]), &sem_value);
if (ret_code != 0) {
return -1 * errno;
}
if (sem_value >= 1) {
return -1 * EBUSY;
}
ret_code = sem_post(&(shm->locks[bitmap_index].locks[index_in_bitmap]));
if (ret_code != 0) {
return -1 * errno;
}
return 0;
} }

View File

@ -1,47 +1,54 @@
package shm package shm
// #cgo LDFLAGS: -lrt -lpthread // #cgo LDFLAGS: -lrt -lpthread
// #include <stdlib.h>
// #include "shm_lock.h" // #include "shm_lock.h"
// const uint32_t bitmap_size_c = BITMAP_SIZE; // const uint32_t bitmap_size_c = BITMAP_SIZE;
import "C" import "C"
import ( import (
"runtime"
"syscall" "syscall"
"unsafe"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
var ( const (
bitmapSize uint32 = uint32(C.bitmap_size_c) BitmapSize uint32 = uint32(C.bitmap_size_c)
) )
// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory // SHMLocks is a struct enabling POSIX semaphore locking in a shared memory
// segment. // segment.
type SHMLocks struct { // nolint type SHMLocks struct { // nolint
lockStruct *C.shm_struct_t lockStruct *C.shm_struct_t
valid bool
maxLocks uint32 maxLocks uint32
valid bool
} }
// CreateSHMLock sets up a shared-memory segment holding a given number of POSIX // CreateSHMLock sets up a shared-memory segment holding a given number of POSIX
// semaphores, and returns a struct that can be used to operate on those locks. // semaphores, and returns a struct that can be used to operate on those locks.
// numLocks must be a multiple of the lock bitmap size (by default, 32). // numLocks must not be 0, and may be rounded up to a multiple of the bitmap
func CreateSHMLock(numLocks uint32) (*SHMLocks, error) { // size used by the underlying implementation.
if numLocks%bitmapSize != 0 || numLocks == 0 { func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be a multiple of %d", C.bitmap_size_c) if numLocks == 0 {
return nil, errors.Wrapf(syscall.EINVAL, "number of locks must greater than 0 0")
} }
locks := new(SHMLocks) locks := new(SHMLocks)
cPath := C.CString(path)
defer C.free(unsafe.Pointer(cPath))
var errCode C.int var errCode C.int
lockStruct := C.setup_lock_shm(C.uint32_t(numLocks), &errCode) lockStruct := C.setup_lock_shm(cPath, C.uint32_t(numLocks), &errCode)
if lockStruct == nil { if lockStruct == nil {
// We got a null pointer, so something errored // We got a null pointer, so something errored
return nil, syscall.Errno(-1 * errCode) return nil, syscall.Errno(-1 * errCode)
} }
locks.lockStruct = lockStruct locks.lockStruct = lockStruct
locks.maxLocks = numLocks locks.maxLocks = uint32(lockStruct.num_locks)
locks.valid = true locks.valid = true
return locks, nil return locks, nil
@ -49,17 +56,19 @@ func CreateSHMLock(numLocks uint32) (*SHMLocks, error) {
// OpenSHMLock opens an existing shared-memory segment holding a given number of // OpenSHMLock opens an existing shared-memory segment holding a given number of
// POSIX semaphores. numLocks must match the number of locks the shared memory // POSIX semaphores. numLocks must match the number of locks the shared memory
// segment was created with and be a multiple of the lock bitmap size (default // segment was created with.
// 32). func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
func OpenSHMLock(numLocks uint32) (*SHMLocks, error) { if numLocks == 0 {
if numLocks%bitmapSize != 0 || numLocks == 0 { return nil, errors.Wrapf(syscall.EINVAL, "number of locks must greater than 0")
return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be a multiple of %d", C.bitmap_size_c)
} }
locks := new(SHMLocks) locks := new(SHMLocks)
cPath := C.CString(path)
defer C.free(unsafe.Pointer(cPath))
var errCode C.int var errCode C.int
lockStruct := C.open_lock_shm(C.uint32_t(numLocks), &errCode) lockStruct := C.open_lock_shm(cPath, C.uint32_t(numLocks), &errCode)
if lockStruct == nil { if lockStruct == nil {
// We got a null pointer, so something errored // We got a null pointer, so something errored
return nil, syscall.Errno(-1 * errCode) return nil, syscall.Errno(-1 * errCode)
@ -108,6 +117,8 @@ func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed") return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
} }
// This returns a U64, so we have the full u32 range available for
// semaphore indexes, and can still return error codes.
retCode := C.allocate_semaphore(locks.lockStruct) retCode := C.allocate_semaphore(locks.lockStruct)
if retCode < 0 { if retCode < 0 {
// Negative errno returned // Negative errno returned
@ -154,6 +165,10 @@ func (locks *SHMLocks) LockSemaphore(sem uint32) error {
return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks) return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
} }
// For pthread mutexes, we have to guarantee lock and unlock happen in
// the same thread.
runtime.LockOSThread()
retCode := C.lock_semaphore(locks.lockStruct, C.uint32_t(sem)) retCode := C.lock_semaphore(locks.lockStruct, C.uint32_t(sem))
if retCode < 0 { if retCode < 0 {
// Negative errno returned // Negative errno returned
@ -184,5 +199,12 @@ func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
return syscall.Errno(-1 * retCode) return syscall.Errno(-1 * retCode)
} }
// For pthread mutexes, we have to guarantee lock and unlock happen in
// the same thread.
// OK if we take multiple locks - UnlockOSThread() won't actually unlock
// until the number of calls equals the number of calls to
// LockOSThread()
runtime.UnlockOSThread()
return nil return nil
} }

View File

@ -1,14 +1,11 @@
#ifndef shm_locks_h_ #ifndef shm_locks_h_
#define shm_locks_h_ #define shm_locks_h_
#include <semaphore.h> #include <pthread.h>
#include <stdint.h> #include <stdint.h>
// Magic number to ensure we open the right SHM segment // Magic number to ensure we open the right SHM segment
#define MAGIC 0xA5A5 #define MAGIC 0x87D1
// Name of the SHM
#define SHM_NAME "/libpod_lock"
// Type for our bitmaps // Type for our bitmaps
typedef uint32_t bitmap_t; typedef uint32_t bitmap_t;
@ -18,22 +15,28 @@ typedef uint32_t bitmap_t;
// Struct to hold a single bitmap and associated locks // Struct to hold a single bitmap and associated locks
typedef struct lock_group { typedef struct lock_group {
bitmap_t bitmap; bitmap_t bitmap;
sem_t locks[BITMAP_SIZE]; pthread_mutex_t locks[BITMAP_SIZE];
} lock_group_t; } lock_group_t;
// Struct to hold our SHM locks // Struct to hold our SHM locks.
// Unused is required to be 0 in the current implementation. If we ever make
// changes to this structure in the future, this will be repurposed as a version
// field.
typedef struct shm_struct { typedef struct shm_struct {
uint16_t magic; uint16_t magic;
sem_t segment_lock; uint16_t unused;
uint32_t num_bitmaps; pthread_mutex_t segment_lock;
uint32_t num_locks; uint32_t num_bitmaps;
lock_group_t locks[]; uint32_t num_locks;
lock_group_t locks[];
} shm_struct_t; } shm_struct_t;
size_t compute_shm_size(uint32_t num_bitmaps); static size_t compute_shm_size(uint32_t num_bitmaps);
shm_struct_t *setup_lock_shm(uint32_t num_locks, int *error_code); static int take_mutex(pthread_mutex_t *mutex);
shm_struct_t *open_lock_shm(uint32_t num_locks, int *error_code); static int release_mutex(pthread_mutex_t *mutex);
shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code);
shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code);
int32_t close_lock_shm(shm_struct_t *shm); int32_t close_lock_shm(shm_struct_t *shm);
int64_t allocate_semaphore(shm_struct_t *shm); int64_t allocate_semaphore(shm_struct_t *shm);
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index); int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index);

View File

@ -3,6 +3,7 @@ package shm
import ( import (
"fmt" "fmt"
"os" "os"
"runtime"
"syscall" "syscall"
"testing" "testing"
"time" "time"
@ -17,11 +18,13 @@ import (
// We can at least verify that the locks work within the local process. // We can at least verify that the locks work within the local process.
// 4 * BITMAP_SIZE to ensure we have to traverse bitmaps // 4 * BITMAP_SIZE to ensure we have to traverse bitmaps
const numLocks = 128 const numLocks uint32 = 4 * BitmapSize
const lockPath = "/libpod_test"
// We need a test main to ensure that the SHM is created before the tests run // We need a test main to ensure that the SHM is created before the tests run
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
shmLock, err := CreateSHMLock(numLocks) shmLock, err := CreateSHMLock(lockPath, numLocks)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error creating SHM for tests: %v\n", err) fmt.Fprintf(os.Stderr, "Error creating SHM for tests: %v\n", err)
os.Exit(-1) os.Exit(-1)
@ -42,19 +45,15 @@ func TestMain(m *testing.M) {
} }
func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) { func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) {
locks, err := OpenSHMLock(numLocks) locks, err := OpenSHMLock(lockPath, numLocks)
if err != nil { if err != nil {
t.Fatalf("Error opening locks: %v", err) t.Fatalf("Error opening locks: %v", err)
} }
defer func() { defer func() {
// Unlock and deallocate all locks // Deallocate all locks
// Ignore EBUSY (lock is already unlocked)
// Ignore ENOENT (lock is not allocated) // Ignore ENOENT (lock is not allocated)
var i uint32 var i uint32
for i = 0; i < numLocks; i++ { for i = 0; i < numLocks; i++ {
if err := locks.UnlockSemaphore(i); err != nil && err != syscall.EBUSY {
t.Fatalf("Error unlocking semaphore %d: %v", i, err)
}
if err := locks.DeallocateSemaphore(i); err != nil && err != syscall.ENOENT { if err := locks.DeallocateSemaphore(i); err != nil && err != syscall.ENOENT {
t.Fatalf("Error deallocating semaphore %d: %v", i, err) t.Fatalf("Error deallocating semaphore %d: %v", i, err)
} }
@ -73,16 +72,22 @@ func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) {
} }
} }
// Test that creating an SHM with a bad size fails // Test that creating an SHM with a bad size rounds up to a good size
func TestCreateNewSHMBadSize(t *testing.T) { func TestCreateNewSHMBadSizeRoundsUp(t *testing.T) {
// Odd number, not a power of 2, should never be a word size on a system // Odd number, not a power of 2, should never be a word size on a system
_, err := CreateSHMLock(7) lock, err := CreateSHMLock("/test1", 7)
assert.Error(t, err) assert.NoError(t, err)
assert.Equal(t, lock.GetMaxLocks(), BitmapSize)
if err := lock.Close(); err != nil {
t.Fatalf("Error closing locks: %v", err)
}
} }
// Test that creating an SHM with 0 size fails // Test that creating an SHM with 0 size fails
func TestCreateNewSHMZeroSize(t *testing.T) { func TestCreateNewSHMZeroSize(t *testing.T) {
_, err := CreateSHMLock(0) _, err := CreateSHMLock("/test2", 0)
assert.Error(t, err) assert.Error(t, err)
} }
@ -241,3 +246,28 @@ func TestLockSemaphoreActuallyLocks(t *testing.T) {
assert.True(t, duration.Seconds() > 1.0) assert.True(t, duration.Seconds() > 1.0)
}) })
} }
// Test that locking and unlocking two semaphores succeeds
// Ensures that runtime.LockOSThread() is doing its job
func TestLockAndUnlockTwoSemaphore(t *testing.T) {
runLockTest(t, func(t *testing.T, locks *SHMLocks) {
err := locks.LockSemaphore(0)
assert.NoError(t, err)
err = locks.LockSemaphore(1)
assert.NoError(t, err)
err = locks.UnlockSemaphore(1)
assert.NoError(t, err)
// Now yield scheduling
// To try and get us on another OS thread
runtime.Gosched()
// And unlock the last semaphore
// If we are in a different OS thread, this should fail.
// However, runtime.UnlockOSThread() should guarantee we are not
err = locks.UnlockSemaphore(0)
assert.NoError(t, err)
})
}

View File

@ -3,13 +3,7 @@
package lock package lock
import ( import (
"fmt" "github.com/containers/libpod/libpod/lock/shm"
"math"
"strconv"
"syscall"
"github.com/pkg/errors"
"github.com/projectatomic/libpod/libpod/lock/shm"
) )
// SHMLockManager manages shared memory locks. // SHMLockManager manages shared memory locks.
@ -18,8 +12,8 @@ type SHMLockManager struct {
} }
// NewSHMLockManager makes a new SHMLockManager with the given number of locks. // NewSHMLockManager makes a new SHMLockManager with the given number of locks.
func NewSHMLockManager(numLocks uint32) (Manager, error) { func NewSHMLockManager(path string, numLocks uint32) (Manager, error) {
locks, err := shm.CreateSHMLock(numLocks) locks, err := shm.CreateSHMLock(path, numLocks)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -32,8 +26,8 @@ func NewSHMLockManager(numLocks uint32) (Manager, error) {
// OpenSHMLockManager opens an existing SHMLockManager with the given number of // OpenSHMLockManager opens an existing SHMLockManager with the given number of
// locks. // locks.
func OpenSHMLockManager(numLocks uint32) (Manager, error) { func OpenSHMLockManager(path string, numLocks uint32) (Manager, error) {
locks, err := shm.OpenSHMLock(numLocks) locks, err := shm.OpenSHMLock(path, numLocks)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -59,27 +53,9 @@ func (m *SHMLockManager) AllocateLock() (Locker, error) {
} }
// RetrieveLock retrieves a lock from the manager given its ID. // RetrieveLock retrieves a lock from the manager given its ID.
func (m *SHMLockManager) RetrieveLock(id string) (Locker, error) { func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) {
intID, err := strconv.ParseInt(id, 16, 64)
if err != nil {
return nil, errors.Wrapf(err, "given ID %q is not a valid SHMLockManager ID - cannot be parsed as int", id)
}
if intID < 0 {
return nil, errors.Wrapf(syscall.EINVAL, "given ID %q is not a valid SHMLockManager ID - must be positive", id)
}
if intID > math.MaxUint32 {
return nil, errors.Wrapf(syscall.EINVAL, "given ID %q is not a valid SHMLockManager ID - too large", id)
}
var u32ID uint32 = uint32(intID)
if u32ID >= m.locks.GetMaxLocks() {
return nil, errors.Wrapf(syscall.EINVAL, "given ID %q is not a valid SHMLockManager ID - too large to fit", id)
}
lock := new(SHMLock) lock := new(SHMLock)
lock.lockID = u32ID lock.lockID = id
lock.manager = m lock.manager = m
return lock, nil return lock, nil
@ -92,8 +68,8 @@ type SHMLock struct {
} }
// ID returns the ID of the lock. // ID returns the ID of the lock.
func (l *SHMLock) ID() string { func (l *SHMLock) ID() uint32 {
return fmt.Sprintf("%x", l.lockID) return l.lockID
} }
// Lock acquires the lock. // Lock acquires the lock.