Merge pull request #2350 from mheon/lock_renumber

Add lock renumbering
This commit is contained in:
OpenShift Merge Robot
2019-02-21 23:11:24 +01:00
committed by GitHub
26 changed files with 569 additions and 70 deletions

View File

@ -548,3 +548,7 @@ type SystemPruneValues struct {
Force bool
Volume bool
}
type SystemRenumberValues struct {
PodmanCommand
}

View File

@ -131,5 +131,6 @@ func getTrustSubCommands() []*cobra.Command {
func getSystemSubCommands() []*cobra.Command {
return []*cobra.Command{
_pruneSystemCommand,
_renumberCommand,
}
}

View File

@ -8,8 +8,17 @@ import (
"github.com/pkg/errors"
)
// GetRuntimeRenumber gets a libpod runtime that will perform a lock renumber
func GetRuntimeRenumber(c *cliconfig.PodmanCommand) (*libpod.Runtime, error) {
return getRuntime(c, true)
}
// GetRuntime generates a new libpod runtime configured by command line options
func GetRuntime(c *cliconfig.PodmanCommand) (*libpod.Runtime, error) {
return getRuntime(c, false)
}
func getRuntime(c *cliconfig.PodmanCommand, renumber bool) (*libpod.Runtime, error) {
options := []libpod.RuntimeOption{}
storageOpts, volumePath, err := util.GetDefaultStoreOptions()

View File

@ -0,0 +1,49 @@
package main
import (
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
renumberCommand cliconfig.SystemRenumberValues
renumberDescription = `
podman system renumber
Migrate lock numbers to handle a change in maximum number of locks.
Mandatory after the number of locks in libpod.conf is changed.
`
_renumberCommand = &cobra.Command{
Use: "renumber",
Short: "Migrate lock numbers",
Long: renumberDescription,
RunE: func(cmd *cobra.Command, args []string) error {
renumberCommand.InputArgs = args
renumberCommand.GlobalFlags = MainGlobalOpts
return renumberCmd(&renumberCommand)
},
}
)
func init() {
renumberCommand.Command = _renumberCommand
renumberCommand.SetUsageTemplate(UsageTemplate())
}
func renumberCmd(c *cliconfig.SystemRenumberValues) error {
// We need to pass one extra option to NewRuntime.
// This will inform the OCI runtime to start a renumber.
// That's controlled by the last argument to GetRuntime.
r, err := libpodruntime.GetRuntimeRenumber(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error renumbering locks")
}
if err := r.Shutdown(false); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,29 @@
% podman-system-renumber(1) podman
## NAME
podman\-system\-renumber - Renumber container locks
## SYNOPSIS
** podman system renumber**
## DESCRIPTION
** podman system renumber** renumbers locks used by containers and pods.
Each Podman container and pod is allocated a lock at creation time, up to a maximum number controlled by the **num_locks** parameter in **libpod.conf**.
When all available locks are exhausted, no further containers and pods can be created until some existing containers and pods are removed. This can be avoided by increasing the number of locks available via modifying **libpod.conf** and subsequently running **podman system renumber** to prepare the new locks (and reallocate lock numbers to fit the new struct).
**podman system renumber** must be called after any changes to **num_locks** - failure to do so will result in errors starting Podman as the number of locks available conflicts with the configured number of locks.
**podman system renumber** can also be used to migrate 1.0 and earlier versions of Podman, which used a different locking scheme, to the new locking model. It is not strictly required to do this, but it is highly recommended to do so as deadlocks can occur otherwise.
If possible, avoid calling **podman system renumber** while there are other Podman processes running.
## SYNOPSIS
**podman system renumber**
## SEE ALSO
`podman(1)`, `libpod.conf(5)`
# HISTORY
February 2019, Originally compiled by Matt Heon (mheon at redhat dot com)

View File

@ -783,6 +783,94 @@ func (s *BoltState) AllContainers() ([]*Container, error) {
return ctrs, nil
}
// RewriteContainerConfig rewrites a container's configuration.
// WARNING: This function is DANGEROUS. Do not use without reading the full
// comment on this function in state.go.
func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error {
if !s.valid {
return ErrDBClosed
}
if !ctr.valid {
return ErrCtrRemoved
}
newCfgJSON, err := json.Marshal(newCfg)
if err != nil {
return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", ctr.ID())
}
db, err := s.getDBCon()
if err != nil {
return err
}
defer s.closeDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
ctrBkt, err := getCtrBucket(tx)
if err != nil {
return err
}
ctrDB := ctrBkt.Bucket([]byte(ctr.ID()))
if ctrDB == nil {
ctr.valid = false
return errors.Wrapf(ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID())
}
if err := ctrDB.Put(configKey, newCfgJSON); err != nil {
return errors.Wrapf(err, "error updating container %s config JSON", ctr.ID())
}
return nil
})
return err
}
// RewritePodConfig rewrites a pod's configuration.
// WARNING: This function is DANGEROUS. Do not use without reading the full
// comment on this function in state.go.
func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
if !s.valid {
return ErrDBClosed
}
if !pod.valid {
return ErrPodRemoved
}
newCfgJSON, err := json.Marshal(newCfg)
if err != nil {
return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", pod.ID())
}
db, err := s.getDBCon()
if err != nil {
return err
}
defer s.closeDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
if err != nil {
return err
}
podDB := podBkt.Bucket([]byte(pod.ID()))
if podDB == nil {
pod.valid = false
return errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID())
}
if err := podDB.Put(configKey, newCfgJSON); err != nil {
return errors.Wrapf(err, "error updating pod %s config JSON", pod.ID())
}
return nil
})
return err
}
// Pod retrieves a pod given its full ID
func (s *BoltState) Pod(id string) (*Pod, error) {
if id == "" {
@ -1281,10 +1369,6 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
return ErrDBClosed
}
if !volume.valid {
return ErrVolumeRemoved
}
volName := []byte(volume.Name())
db, err := s.getDBCon()

View File

@ -348,13 +348,6 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name))
}
// Get the lock
lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID)
if err != nil {
return errors.Wrapf(err, "error retrieving lockfile for volume %s", string(name))
}
volume.lock = lock
volume.runtime = s.runtime
volume.valid = true

View File

@ -378,6 +378,58 @@ func (s *InMemoryState) ContainerInUse(ctr *Container) ([]string, error) {
return arr, nil
}
// AllContainers retrieves all containers from the state
func (s *InMemoryState) AllContainers() ([]*Container, error) {
ctrs := make([]*Container, 0, len(s.containers))
for _, ctr := range s.containers {
if s.namespace == "" || ctr.config.Namespace == s.namespace {
ctrs = append(ctrs, ctr)
}
}
return ctrs, nil
}
// RewriteContainerConfig rewrites a container's configuration.
// This function is DANGEROUS, even with an in-memory state.
// Please read the full comment on it in state.go before using it.
func (s *InMemoryState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error {
if !ctr.valid {
return ErrCtrRemoved
}
// If the container does not exist, return error
stateCtr, ok := s.containers[ctr.ID()]
if !ok {
ctr.valid = false
return errors.Wrapf(ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID())
}
stateCtr.config = newCfg
return nil
}
// RewritePodConfig rewrites a pod's configuration.
// This function is DANGEROUS, even with in-memory state.
// Please read the full comment on it in state.go before using it.
func (s *InMemoryState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
if !pod.valid {
return ErrPodRemoved
}
// If the pod does not exist, return error
statePod, ok := s.pods[pod.ID()]
if !ok {
pod.valid = false
return errors.Wrapf(ErrNoSuchPod, "pod with ID %s not found in state", pod.ID())
}
statePod.config = newCfg
return nil
}
// Volume retrieves a volume from its full name
func (s *InMemoryState) Volume(name string) (*Volume, error) {
if name == "" {
@ -486,18 +538,6 @@ func (s *InMemoryState) AllVolumes() ([]*Volume, error) {
return allVols, nil
}
// AllContainers retrieves all containers from the state
func (s *InMemoryState) AllContainers() ([]*Container, error) {
ctrs := make([]*Container, 0, len(s.containers))
for _, ctr := range s.containers {
if s.namespace == "" || ctr.config.Namespace == s.namespace {
ctrs = append(ctrs, ctr)
}
}
return ctrs, nil
}
// Pod retrieves a pod from the state from its full ID
func (s *InMemoryState) Pod(id string) (*Pod, error) {
if id == "" {

View File

@ -89,3 +89,14 @@ func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
return m.locks[id], nil
}
// FreeAllLocks frees all locks.
// This function is DANGEROUS. Please read the full comment in locks.go before
// trying to use it.
func (m *InMemoryManager) FreeAllLocks() error {
for _, lock := range m.locks {
lock.allocated = false
}
return nil
}

View File

@ -24,6 +24,20 @@ type Manager interface {
// The underlying lock MUST be the same as another other lock with the
// same UUID.
RetrieveLock(id uint32) (Locker, error)
// PLEASE READ FULL DESCRIPTION BEFORE USING.
// FreeAllLocks frees all allocated locks, in preparation for lock
// reallocation.
// As this deallocates all presently-held locks, this can be very
// dangerous - if there are other processes running that might be
// attempting to allocate new locks and free existing locks, we may
// encounter races leading to an inconsistent state.
// (This is in addition to the fact that FreeAllLocks instantly makes
// the state inconsistent simply by using it, and requires a full
// lock renumbering to restore consistency!).
// In short, this should only be used as part of unit tests, or lock
// renumbering, where reasonable guarantees about other processes can be
// made.
FreeAllLocks() error
}
// Locker is similar to sync.Locker, but provides a method for freeing the lock

View File

@ -203,6 +203,8 @@ shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code) {
// terminating NULL byte.
// Returns a valid pointer on success or NULL on error.
// If an error occurs, negative ERRNO values will be written to error_code.
// ERANGE is returned for a mismatch between num_locks and the number of locks
// available in the the SHM lock struct.
shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code) {
int shm_fd;
shm_struct_t *shm;
@ -255,11 +257,11 @@ shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code) {
// Need to check the SHM to see if it's actually our locks
if (shm->magic != MAGIC) {
*error_code = -1 * errno;
*error_code = -1 * EBADF;
goto CLEANUP;
}
if (shm->num_locks != (num_bitmaps * BITMAP_SIZE)) {
*error_code = -1 * errno;
*error_code = -1 * ERANGE;
goto CLEANUP;
}
@ -407,6 +409,36 @@ int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
return 0;
}
// Deallocate all semaphores unconditionally.
// Returns negative ERRNO values.
int32_t deallocate_all_semaphores(shm_struct_t *shm) {
int ret_code;
uint i;
if (shm == NULL) {
return -1 * EINVAL;
}
// Lock the mutex controlling access to our shared memory
ret_code = take_mutex(&(shm->segment_lock));
if (ret_code != 0) {
return -1 * ret_code;
}
// Iterate through all bitmaps and reset to unused
for (i = 0; i < shm->num_bitmaps; i++) {
shm->locks[i].bitmap = 0;
}
// Unlock the allocation control mutex
ret_code = release_mutex(&(shm->segment_lock));
if (ret_code != 0) {
return -1 * ret_code;
}
return 0;
}
// Lock a given semaphore
// Does not check if the semaphore is allocated - this ensures that, even for
// removed containers, we can still successfully lock to check status (and

View File

@ -155,6 +155,22 @@ func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
return nil
}
// DeallocateAllSemaphores frees all semaphores so they can be reallocated to
// other containers and pods.
func (locks *SHMLocks) DeallocateAllSemaphores() error {
if !locks.valid {
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
}
retCode := C.deallocate_all_semaphores(locks.lockStruct)
if retCode < 0 {
// Negative errno return from C
return syscall.Errno(-1 * retCode)
}
return nil
}
// LockSemaphore locks the given semaphore.
// If the semaphore is already locked, LockSemaphore will block until the lock
// can be acquired.

View File

@ -40,6 +40,7 @@ shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code);
int32_t close_lock_shm(shm_struct_t *shm);
int64_t allocate_semaphore(shm_struct_t *shm);
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t deallocate_all_semaphores(shm_struct_t *shm);
int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index);

View File

@ -4,7 +4,6 @@ import (
"fmt"
"os"
"runtime"
"syscall"
"testing"
"time"
@ -53,12 +52,8 @@ func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) {
}
defer func() {
// Deallocate all locks
// Ignore ENOENT (lock is not allocated)
var i uint32
for i = 0; i < numLocks; i++ {
if err := locks.DeallocateSemaphore(i); err != nil && err != syscall.ENOENT {
t.Fatalf("Error deallocating semaphore %d: %v", i, err)
}
if err := locks.DeallocateAllSemaphores(); err != nil {
t.Fatalf("Error deallocating semaphores: %v", err)
}
if err := locks.Close(); err != nil {
@ -212,6 +207,25 @@ func TestAllocateDeallocateCycle(t *testing.T) {
})
}
// Test that DeallocateAllSemaphores deallocates all semaphores
func TestDeallocateAllSemaphoresDeallocatesAll(t *testing.T) {
runLockTest(t, func(t *testing.T, locks *SHMLocks) {
// Allocate a lock
locks1, err := locks.AllocateSemaphore()
assert.NoError(t, err)
// Free all locks
err = locks.DeallocateAllSemaphores()
assert.NoError(t, err)
// Allocate another lock
locks2, err := locks.AllocateSemaphore()
assert.NoError(t, err)
assert.Equal(t, locks1, locks2)
})
}
// Test that locks actually lock
func TestLockSemaphoreActuallyLocks(t *testing.T) {
runLockTest(t, func(t *testing.T, locks *SHMLocks) {

View File

@ -71,6 +71,13 @@ func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) {
return lock, nil
}
// FreeAllLocks frees all locks in the manager.
// This function is DANGEROUS. Please read the full comment in locks.go before
// trying to use it.
func (m *SHMLockManager) FreeAllLocks() error {
return m.locks.DeallocateAllSemaphores()
}
// SHMLock is an individual shared memory lock.
type SHMLock struct {
lockID uint32

View File

@ -27,3 +27,8 @@ func (m *SHMLockManager) AllocateLock() (Locker, error) {
func (m *SHMLockManager) RetrieveLock(id string) (Locker, error) {
return nil, fmt.Errorf("not supported")
}
// FreeAllLocks is not supported on this platform
func (m *SHMLockManager) FreeAllLocks() error {
return fmt.Errorf("not supported")
}

View File

@ -394,6 +394,22 @@ func WithDefaultInfraCommand(cmd string) RuntimeOption {
}
}
// WithRenumber instructs libpod to perform a lock renumbering while
// initializing. This will handle migrations from early versions of libpod with
// file locks to newer versions with SHM locking, as well as changes in the
// number of configured locks.
func WithRenumber() RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
return ErrRuntimeFinalized
}
rt.doRenumber = true
return nil
}
}
// Container Creation Options
// WithShmDir sets the directory that should be mounted on /dev/shm.

View File

@ -7,6 +7,7 @@ import (
"os/exec"
"path/filepath"
"sync"
"syscall"
"github.com/BurntSushi/toml"
is "github.com/containers/image/storage"
@ -80,6 +81,7 @@ type RuntimeOption func(*Runtime) error
// Runtime is the core libpod runtime
type Runtime struct {
config *RuntimeConfig
state State
store storage.Store
storageService *storageService
@ -88,12 +90,23 @@ type Runtime struct {
netPlugin ocicni.CNIPlugin
ociRuntimePath OCIRuntimePath
conmonPath string
valid bool
lock sync.RWMutex
imageRuntime *image.Runtime
firewallBackend firewall.FirewallBackend
lockManager lock.Manager
configuredFrom *runtimeConfiguredFrom
// doRenumber indicates that the runtime should perform a lock renumber
// during initialization.
// Once the runtime has been initialized and returned, this variable is
// unused.
doRenumber bool
// valid indicates whether the runtime is ready to use.
// valid is set to true when a runtime is returned from GetRuntime(),
// and remains true until the runtime is shut down (rendering its
// storage unusable). When valid is false, the runtime cannot be used.
valid bool
lock sync.RWMutex
}
// OCIRuntimePath contains information about an OCI runtime.
@ -753,6 +766,7 @@ func makeRuntime(runtime *Runtime) (err error) {
aliveLock.Unlock()
}
}()
_, err = os.Stat(runtimeAliveFile)
if err != nil {
// If the file doesn't exist, we need to refresh the state
@ -774,6 +788,20 @@ func makeRuntime(runtime *Runtime) (err error) {
manager, err := lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks)
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
if err != nil {
return err
}
} else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber {
logrus.Debugf("Number of locks does not match - removing old locks")
// ERANGE indicates a lock numbering mismatch.
// Since we're renumbering, this is not fatal.
// Remove the earlier set of locks and recreate.
if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
return errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
}
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
if err != nil {
return err
@ -784,6 +812,15 @@ func makeRuntime(runtime *Runtime) (err error) {
}
runtime.lockManager = manager
// If we're renumbering locks, do it now.
// It breaks out of normal runtime init, and will not return a valid
// runtime.
if runtime.doRenumber {
if err := runtime.renumberLocks(); err != nil {
return err
}
}
// If we need to refresh the state, do it now - things are guaranteed to
// be set up by now.
if doRefresh {

View File

@ -421,7 +421,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
for _, v := range volumes {
if volume, err := runtime.state.Volume(v); err == nil {
if err := runtime.removeVolume(ctx, volume, false, true); err != nil && err != ErrNoSuchVolume && err != ErrVolumeBeingUsed {
if err := runtime.removeVolume(ctx, volume, false); err != nil && err != ErrNoSuchVolume && err != ErrVolumeBeingUsed {
logrus.Errorf("cleanup volume (%s): %v", v, err)
}
}

View File

@ -0,0 +1,57 @@
package libpod
import (
"github.com/pkg/errors"
)
// renumberLocks reassigns lock numbers for all containers and pods in the
// state.
// TODO: It would be desirable to make it impossible to call this until all
// other libpod sessions are dead.
// Possibly use a read-write file lock, with all non-renumber podmans owning the
// lock as read, renumber attempting to take a write lock?
// The alternative is some sort of session tracking, and I don't know how
// reliable that can be.
func (r *Runtime) renumberLocks() error {
// Start off by deallocating all locks
if err := r.lockManager.FreeAllLocks(); err != nil {
return err
}
allCtrs, err := r.state.AllContainers()
if err != nil {
return err
}
for _, ctr := range allCtrs {
lock, err := r.lockManager.AllocateLock()
if err != nil {
return errors.Wrapf(err, "error allocating lock for container %s", ctr.ID())
}
ctr.config.LockID = lock.ID()
// Write the new lock ID
if err := r.state.RewriteContainerConfig(ctr, ctr.config); err != nil {
return err
}
}
allPods, err := r.state.AllPods()
if err != nil {
return err
}
for _, pod := range allPods {
lock, err := r.lockManager.AllocateLock()
if err != nil {
return errors.Wrapf(err, "error allocating lock for pod %s", pod.ID())
}
pod.config.LockID = lock.ID()
// Write the new lock ID
if err := r.state.RewritePodConfig(pod, pod.config); err != nil {
return err
}
}
return nil
}

View File

@ -19,7 +19,7 @@ type VolumeCreateOption func(*Volume) error
type VolumeFilter func(*Volume) bool
// RemoveVolume removes a volumes
func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force, prune bool) error {
func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool) error {
r.lock.Lock()
defer r.lock.Unlock()
@ -35,10 +35,7 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force, prune bool
}
}
v.lock.Lock()
defer v.lock.Unlock()
return r.removeVolume(ctx, v, force, prune)
return r.removeVolume(ctx, v, force)
}
// RemoveVolumes removes a slice of volumes or all with a force bool
@ -64,7 +61,7 @@ func (r *Runtime) RemoveVolumes(ctx context.Context, volumes []string, all, forc
}
for _, vol := range vols {
if err := r.RemoveVolume(ctx, vol, force, false); err != nil {
if err := r.RemoveVolume(ctx, vol, force); err != nil {
return deletedVols, err
}
logrus.Debugf("removed volume %s", vol.Name())
@ -168,8 +165,8 @@ func (r *Runtime) PruneVolumes(ctx context.Context) ([]string, []error) {
}
for _, vol := range vols {
if err := r.RemoveVolume(ctx, vol, false, true); err != nil {
if err != ErrVolumeBeingUsed {
if err := r.RemoveVolume(ctx, vol, false); err != nil {
if errors.Cause(err) != ErrVolumeBeingUsed && errors.Cause(err) != ErrVolumeRemoved {
pruneErrors = append(pruneErrors, err)
}
continue

View File

@ -67,13 +67,6 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
volume.config.MountPoint = fullVolPath
lock, err := r.lockManager.AllocateLock()
if err != nil {
return nil, errors.Wrapf(err, "error allocating lock for new volume")
}
volume.lock = lock
volume.config.LockID = volume.lock.ID()
volume.valid = true
// Add the volume to state
@ -85,9 +78,12 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
// removeVolume removes the specified volume from state as well tears down its mountpoint and storage
func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool) error {
func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error {
if !v.valid {
return ErrNoSuchVolume
if ok, _ := r.state.HasVolume(v.Name()); !ok {
return nil
}
return ErrVolumeRemoved
}
deps, err := r.state.VolumeInUse(v)
@ -95,9 +91,6 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool
return err
}
if len(deps) != 0 {
if prune {
return ErrVolumeBeingUsed
}
depsStr := strings.Join(deps, ", ")
if !force {
return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
@ -112,18 +105,20 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool
}
}
// Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
if err := v.teardownStorage(); err != nil {
return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
}
// Set volume as invalid so it can no longer be used
v.valid = false
// Remove the volume from the state
if err := r.state.RemoveVolume(v); err != nil {
return errors.Wrapf(err, "error removing volume %s", v.Name())
}
// Set volume as invalid so it can no longer be used
v.valid = false
// Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes
if err := v.teardownStorage(); err != nil {
return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
}
logrus.Debugf("Removed volume %s", v.Name())
return nil
}

View File

@ -97,6 +97,30 @@ type State interface {
// returned.
AllContainers() ([]*Container, error)
// PLEASE READ FULL DESCRIPTION BEFORE USING.
// Rewrite a container's configuration.
// This function breaks libpod's normal prohibition on a read-only
// configuration, and as such should be used EXTREMELY SPARINGLY and
// only in very specific circumstances.
// Specifically, it is ONLY safe to use thing function to make changes
// that result in a functionally identical configuration (migrating to
// newer, but identical, configuration fields), or during libpod init
// WHILE HOLDING THE ALIVE LOCK (to prevent other libpod instances from
// being initialized).
// Most things in config can be changed by this, but container ID and
// name ABSOLUTELY CANNOT BE ALTERED. If you do so, there is a high
// potential for database corruption.
// There are a lot of capital letters and conditions here, but the short
// answer is this: use this only very sparingly, and only if you really
// know what you're doing.
RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error
// PLEASE READ THE ABOVE DESCRIPTION BEFORE USING.
// This function is identical to RewriteContainerConfig, save for the
// fact that it is used with pods instead.
// It is subject to the same conditions as RewriteContainerConfig.
// Please do not use this unless you know what you're doing.
RewritePodConfig(pod *Pod, newCfg *PodConfig) error
// Accepts full ID of pod.
// If the pod given is not in the set namespace, an error will be
// returned.

View File

@ -1298,6 +1298,78 @@ func TestCannotUseBadIDAsGenericDependency(t *testing.T) {
})
}
func TestRewriteContainerConfigDoesNotExist(t *testing.T) {
runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
err := state.RewriteContainerConfig(&Container{}, &ContainerConfig{})
assert.Error(t, err)
})
}
func TestRewriteContainerConfigNotInState(t *testing.T) {
runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.RewriteContainerConfig(testCtr, &ContainerConfig{})
assert.Error(t, err)
})
}
func TestRewriteContainerConfigRewritesConfig(t *testing.T) {
runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr)
assert.NoError(t, err)
testCtr.config.LogPath = "/another/path/"
err = state.RewriteContainerConfig(testCtr, testCtr.config)
assert.NoError(t, err)
testCtrFromState, err := state.Container(testCtr.ID())
assert.NoError(t, err)
testContainersEqual(t, testCtrFromState, testCtr, true)
})
}
func TestRewritePodConfigDoesNotExist(t *testing.T) {
runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
err := state.RewritePodConfig(&Pod{}, &PodConfig{})
assert.Error(t, err)
})
}
func TestRewritePodConfigNotInState(t *testing.T) {
runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.RewritePodConfig(testPod, &PodConfig{})
assert.Error(t, err)
})
}
func TestRewritePodConfigRewritesConfig(t *testing.T) {
runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
assert.NoError(t, err)
testPod.config.CgroupParent = "/another_cgroup_parent"
err = state.RewritePodConfig(testPod, testPod.config)
assert.NoError(t, err)
testPodFromState, err := state.Pod(testPod.ID())
assert.NoError(t, err)
testPodsEqual(t, testPodFromState, testPod, true)
})
}
func TestGetPodDoesNotExist(t *testing.T) {
runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.Pod("doesnotexist")

View File

@ -1,7 +1,5 @@
package libpod
import "github.com/containers/libpod/libpod/lock"
// Volume is the type used to create named volumes
// TODO: all volumes should be created using this and the Volume API
type Volume struct {
@ -9,7 +7,6 @@ type Volume struct {
valid bool
runtime *Runtime
lock lock.Locker
}
// VolumeConfig holds the volume's config information
@ -17,8 +14,6 @@ type Volume struct {
type VolumeConfig struct {
// Name of the volume
Name string `json:"name"`
// ID of this volume's lock
LockID uint32 `json:"lockID"`
Labels map[string]string `json:"labels"`
MountPoint string `json:"mountPoint"`

View File

@ -18,8 +18,5 @@ func newVolume(runtime *Runtime) (*Volume, error) {
// teardownStorage deletes the volume from volumePath
func (v *Volume) teardownStorage() error {
if !v.valid {
return ErrNoSuchVolume
}
return os.RemoveAll(filepath.Join(v.runtime.config.VolumePath, v.Name()))
}