mirror of
https://github.com/containers/podman.git
synced 2025-05-17 15:18:43 +08:00
Address review feedback and add manpage notes
The inspect format for `.LockNumber` needed to be documented. Signed-off-by: Matt Heon <mheon@redhat.com>
This commit is contained in:
@ -43,7 +43,7 @@ func runLocks() error {
|
||||
if len(report.LockConflicts) > 0 {
|
||||
fmt.Printf("\nLock conflicts have been detected. Recommend immediate use of `podman system renumber` to resolve.\n\n")
|
||||
} else {
|
||||
fmt.Printf("\nNo lock conflicts have been detected, system safe from deadlocks.\n\n")
|
||||
fmt.Printf("\nNo lock conflicts have been detected.\n\n")
|
||||
}
|
||||
|
||||
for _, lockNum := range report.LocksHeld {
|
||||
|
@ -43,6 +43,7 @@ Valid placeholders for the Go template are listed below:
|
||||
| .IsInfra | Is this an infra container? (string: true/false) |
|
||||
| .IsService | Is this a service container? (string: true/false) |
|
||||
| .KubeExitCodePropagation | Kube exit-code propagation (string) |
|
||||
| .LockNumber | Number of the container's Libpod lock |
|
||||
| .MountLabel | SELinux label of mount (string) |
|
||||
| .Mounts | Mounts (array of strings) |
|
||||
| .Name | Container name (string) |
|
||||
|
@ -44,6 +44,7 @@ Valid placeholders for the Go template are listed below:
|
||||
| .InfraContainerID | Pod infrastructure ID |
|
||||
| .InspectPodData ... | Nested structure, for experts only |
|
||||
| .Labels | Pod labels |
|
||||
| .LockNumber | Number of the pod's Libpod lock |
|
||||
| .MemoryLimit | Memory limit, bytes |
|
||||
| .MemorySwap | Memory swap limit, in bytes |
|
||||
| .Mounts | Mounts |
|
||||
|
@ -33,6 +33,7 @@ Valid placeholders for the Go template are listed below:
|
||||
| .Driver | Volume driver |
|
||||
| .GID | GID the volume was created with |
|
||||
| .Labels | Label information associated with the volume |
|
||||
| .LockNumber | Number of the volume's Libpod lock |
|
||||
| .MountCount | Number of times the volume is mounted |
|
||||
| .Mountpoint | Source of volume mount point |
|
||||
| .Name | Volume name |
|
||||
|
@ -42,6 +42,7 @@ Valid placeholders for the Go template are listed below:
|
||||
| .GID | GID of volume |
|
||||
| .InspectVolumeData ... | Don't use |
|
||||
| .Labels | Label information associated with the volume |
|
||||
| .LockNumber | Number of the volume's Libpod lock |
|
||||
| .MountCount | Number of times the volume is mounted |
|
||||
| .Mountpoint | Source of volume mount point |
|
||||
| .Name | Volume name |
|
||||
|
@ -568,7 +568,7 @@ int64_t available_locks(shm_struct_t *shm) {
|
||||
for (i = 0; i < shm->num_bitmaps; i++) {
|
||||
// Short-circuit to catch fully-empty bitmaps quick.
|
||||
if (shm->locks[i].bitmap == 0) {
|
||||
free_locks += 32;
|
||||
free_locks += sizeof(bitmap_t) * 8;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -581,7 +581,7 @@ int64_t available_locks(shm_struct_t *shm) {
|
||||
count++;
|
||||
}
|
||||
|
||||
free_locks += 32 - count;
|
||||
free_locks += (sizeof(bitmap_t) * 8) - count;
|
||||
}
|
||||
|
||||
// Clear the mutex
|
||||
|
@ -1206,12 +1206,7 @@ func (r *Runtime) LockConflicts() (map[uint32][]string, []uint32, error) {
|
||||
for _, ctr := range ctrs {
|
||||
lockNum := ctr.lock.ID()
|
||||
ctrString := fmt.Sprintf("container %s", ctr.ID())
|
||||
locksArr, ok := locksInUse[lockNum]
|
||||
if ok {
|
||||
locksInUse[lockNum] = append(locksArr, ctrString)
|
||||
} else {
|
||||
locksInUse[lockNum] = []string{ctrString}
|
||||
}
|
||||
locksInUse[lockNum] = append(locksInUse[lockNum], ctrString)
|
||||
}
|
||||
|
||||
pods, err := r.state.AllPods()
|
||||
@ -1221,12 +1216,7 @@ func (r *Runtime) LockConflicts() (map[uint32][]string, []uint32, error) {
|
||||
for _, pod := range pods {
|
||||
lockNum := pod.lock.ID()
|
||||
podString := fmt.Sprintf("pod %s", pod.ID())
|
||||
locksArr, ok := locksInUse[lockNum]
|
||||
if ok {
|
||||
locksInUse[lockNum] = append(locksArr, podString)
|
||||
} else {
|
||||
locksInUse[lockNum] = []string{podString}
|
||||
}
|
||||
locksInUse[lockNum] = append(locksInUse[lockNum], podString)
|
||||
}
|
||||
|
||||
volumes, err := r.state.AllVolumes()
|
||||
@ -1236,12 +1226,7 @@ func (r *Runtime) LockConflicts() (map[uint32][]string, []uint32, error) {
|
||||
for _, vol := range volumes {
|
||||
lockNum := vol.lock.ID()
|
||||
volString := fmt.Sprintf("volume %s", vol.Name())
|
||||
locksArr, ok := locksInUse[lockNum]
|
||||
if ok {
|
||||
locksInUse[lockNum] = append(locksArr, volString)
|
||||
} else {
|
||||
locksInUse[lockNum] = []string{volString}
|
||||
}
|
||||
locksInUse[lockNum] = append(locksInUse[lockNum], volString)
|
||||
}
|
||||
|
||||
// Now go through and find any entries with >1 item associated
|
||||
|
@ -177,7 +177,7 @@ var _ = Describe("Podman Info", func() {
|
||||
Expect(session.OutputToString()).To(Equal(want))
|
||||
})
|
||||
|
||||
It("Podman info: check lock count", func() {
|
||||
It("Podman info: check lock count", Serial, func() {
|
||||
// This should not run on architectures and OSes that use the file locks backend.
|
||||
// Which, for now, is Linux + RISCV and FreeBSD, neither of which are in CI - so
|
||||
// no skips.
|
||||
@ -197,6 +197,9 @@ var _ = Describe("Podman Info", func() {
|
||||
free2, err := strconv.Atoi(info2.OutputToString())
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// Effectively, we are checking that 1 lock has been taken.
|
||||
// We do this by comparing the number of locks after (plus 1), to the number of locks before.
|
||||
// Don't check absolute numbers because there is a decent chance of contamination, containers that were never removed properly, etc.
|
||||
Expect(free1).To(Equal(free2 + 1))
|
||||
})
|
||||
})
|
||||
|
Reference in New Issue
Block a user