mirror of
https://github.com/containers/podman.git
synced 2025-07-02 00:30:00 +08:00
Add number of free locks to podman info
This is a nice quality-of-life change that should help to debug situations where someone runs out of locks (usually when a bunch of unused volumes accumulate). Signed-off-by: Matt Heon <mheon@redhat.com>
This commit is contained in:
@ -38,6 +38,7 @@ type HostInfo struct {
|
||||
DatabaseBackend string `json:"databaseBackend"`
|
||||
Distribution DistributionInfo `json:"distribution"`
|
||||
EventLogger string `json:"eventLogger"`
|
||||
FreeLocks *uint32 `json:"freeLocks,omitempty"`
|
||||
Hostname string `json:"hostname"`
|
||||
IDMappings IDMappings `json:"idMappings,omitempty"`
|
||||
Kernel string `json:"kernel"`
|
||||
|
@ -99,6 +99,12 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
locksFree, err := r.lockManager.AvailableLocks()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting free locks: %w", err)
|
||||
}
|
||||
|
||||
info := define.HostInfo{
|
||||
Arch: runtime.GOARCH,
|
||||
BuildahVersion: buildah.Version,
|
||||
@ -107,6 +113,7 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) {
|
||||
CPUs: runtime.NumCPU(),
|
||||
CPUUtilization: cpuUtil,
|
||||
Distribution: hostDistributionInfo,
|
||||
FreeLocks: locksFree,
|
||||
LogDriver: r.config.Containers.LogDriver,
|
||||
EventLogger: r.eventer.String(),
|
||||
Hostname: host,
|
||||
|
@ -79,6 +79,12 @@ func (m *FileLockManager) FreeAllLocks() error {
|
||||
return m.locks.DeallocateAllLocks()
|
||||
}
|
||||
|
||||
// AvailableLocks returns the number of available locks. Since this is not
|
||||
// limited in the file lock implementation, nil is returned.
|
||||
func (locks *FileLockManager) AvailableLocks() (*uint32, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// FileLock is an individual shared memory lock.
|
||||
type FileLock struct {
|
||||
lockID uint32
|
||||
|
@ -116,3 +116,16 @@ func (m *InMemoryManager) FreeAllLocks() error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get number of available locks
|
||||
func (m *InMemoryManager) AvailableLocks() (*uint32, error) {
|
||||
var count uint32
|
||||
|
||||
for _, lock := range m.locks {
|
||||
if !lock.allocated {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return &count, nil
|
||||
}
|
||||
|
@ -45,6 +45,12 @@ type Manager interface {
|
||||
// renumbering, where reasonable guarantees about other processes can be
|
||||
// made.
|
||||
FreeAllLocks() error
|
||||
// NumAvailableLocks gets the number of remaining locks available to be
|
||||
// allocated.
|
||||
// Some lock managers do not have a maximum number of locks, and can
|
||||
// allocate an unlimited number. These implementations should return
|
||||
// a nil uin32.
|
||||
AvailableLocks() (*uint32, error)
|
||||
}
|
||||
|
||||
// Locker is similar to sync.Locker, but provides a method for freeing the lock
|
||||
|
@ -537,3 +537,53 @@ int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index) {
|
||||
|
||||
return -1 * release_mutex(&(shm->locks[bitmap_index].locks[index_in_bitmap]));
|
||||
}
|
||||
|
||||
// Get the number of free locks.
|
||||
// Returns a positive integer guaranteed to be less than UINT32_MAX on success,
|
||||
// or negative errno values on failure.
|
||||
// On success, the returned integer is the number of free semaphores.
|
||||
int64_t available_locks(shm_struct_t *shm) {
|
||||
int ret_code, i, count;
|
||||
bitmap_t test_map;
|
||||
int64_t free_locks = 0;
|
||||
|
||||
if (shm == NULL) {
|
||||
return -1 * EINVAL;
|
||||
}
|
||||
|
||||
// Lock the semaphore controlling access to the SHM segment.
|
||||
// This isn't strictly necessary as we're only reading, but it seems safer.
|
||||
ret_code = take_mutex(&(shm->segment_lock));
|
||||
if (ret_code != 0) {
|
||||
return -1 * ret_code;
|
||||
}
|
||||
|
||||
// Loop through all bitmaps, counting number of allocated locks.
|
||||
for (i = 0; i < shm->num_bitmaps; i++) {
|
||||
// Short-circuit to catch fully-empty bitmaps quick.
|
||||
if (shm->locks[i].bitmap == 0) {
|
||||
free_locks += 32;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Use Kernighan's Algorithm to count bits set. Subtract from number of bits
|
||||
// in the integer to get free bits, and thus free lock count.
|
||||
test_map = shm->locks[i].bitmap;
|
||||
count = 0;
|
||||
while (test_map) {
|
||||
test_map = test_map & (test_map - 1);
|
||||
count++;
|
||||
}
|
||||
|
||||
free_locks += 32 - count;
|
||||
}
|
||||
|
||||
// Clear the mutex
|
||||
ret_code = release_mutex(&(shm->segment_lock));
|
||||
if (ret_code != 0) {
|
||||
return -1 * ret_code;
|
||||
}
|
||||
|
||||
// Return free lock count.
|
||||
return free_locks;
|
||||
}
|
||||
|
@ -266,6 +266,21 @@ func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetFreeLocks gets the number of locks available to be allocated.
|
||||
func (locks *SHMLocks) GetFreeLocks() (uint32, error) {
|
||||
if !locks.valid {
|
||||
return 0, fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
|
||||
}
|
||||
|
||||
retCode := C.available_locks(locks.lockStruct)
|
||||
if retCode < 0 {
|
||||
// Negative errno returned
|
||||
return 0, syscall.Errno(-1 * retCode)
|
||||
}
|
||||
|
||||
return uint32(retCode), nil
|
||||
}
|
||||
|
||||
func unlinkSHMLock(path string) error {
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
|
@ -41,5 +41,6 @@ int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index);
|
||||
int32_t deallocate_all_semaphores(shm_struct_t *shm);
|
||||
int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index);
|
||||
int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index);
|
||||
int64_t available_locks(shm_struct_t *shm);
|
||||
|
||||
#endif
|
||||
|
@ -98,6 +98,16 @@ func (m *SHMLockManager) FreeAllLocks() error {
|
||||
return m.locks.DeallocateAllSemaphores()
|
||||
}
|
||||
|
||||
// AvailableLocks returns the number of free locks in the manager.
|
||||
func (m *SHMLockManager) AvailableLocks() (*uint32, error) {
|
||||
avail, err := m.locks.GetFreeLocks()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &avail, nil
|
||||
}
|
||||
|
||||
// SHMLock is an individual shared memory lock.
|
||||
type SHMLock struct {
|
||||
lockID uint32
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -175,4 +176,27 @@ var _ = Describe("Podman Info", func() {
|
||||
Expect(session).To(Exit(0))
|
||||
Expect(session.OutputToString()).To(Equal(want))
|
||||
})
|
||||
|
||||
It("Podman info: check lock count", func() {
|
||||
// This should not run on architectures and OSes that use the file locks backend.
|
||||
// Which, for now, is Linux + RISCV and FreeBSD, neither of which are in CI - so
|
||||
// no skips.
|
||||
info1 := podmanTest.Podman([]string{"info", "--format", "{{ .Host.FreeLocks }}"})
|
||||
info1.WaitWithDefaultTimeout()
|
||||
Expect(info1).To(Exit(0))
|
||||
free1, err := strconv.Atoi(info1.OutputToString())
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
ctr := podmanTest.Podman([]string{"create", ALPINE, "top"})
|
||||
ctr.WaitWithDefaultTimeout()
|
||||
Expect(ctr).To(Exit(0))
|
||||
|
||||
info2 := podmanTest.Podman([]string{"info", "--format", "{{ .Host.FreeLocks }}"})
|
||||
info2.WaitWithDefaultTimeout()
|
||||
Expect(info2).To(Exit(0))
|
||||
free2, err := strconv.Atoi(info2.OutputToString())
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
Expect(free1).To(Equal(free2 + 1))
|
||||
})
|
||||
})
|
||||
|
Reference in New Issue
Block a user