mirror of
https://github.com/containers/podman.git
synced 2025-06-22 18:08:11 +08:00
Merge pull request #2774 from mheon/db_rework_named_volume
Rework named volumes in DB
This commit is contained in:
@ -21,7 +21,8 @@ Remove all volumes.
|
||||
|
||||
**-f**, **--force**=""
|
||||
|
||||
Remove a volume by force, even if it is being used by a container
|
||||
Remove a volume by force.
|
||||
If it is being used by containers, the containers will be removed first.
|
||||
|
||||
**--help**
|
||||
|
||||
|
@ -1358,56 +1358,6 @@ func (s *BoltState) AddVolume(volume *Volume) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// RemoveVolCtrDep updates the container dependencies sub bucket of the given volume.
|
||||
// It deletes it from the bucket when found.
|
||||
// This is important when force removing a volume and we want to get rid of the dependencies.
|
||||
func (s *BoltState) RemoveVolCtrDep(volume *Volume, ctrID string) error {
|
||||
if ctrID == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !s.valid {
|
||||
return ErrDBBadConfig
|
||||
}
|
||||
|
||||
if !volume.valid {
|
||||
return ErrVolumeRemoved
|
||||
}
|
||||
|
||||
volName := []byte(volume.Name())
|
||||
|
||||
db, err := s.getDBCon()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer s.closeDBCon(db)
|
||||
|
||||
err = db.Update(func(tx *bolt.Tx) error {
|
||||
volBkt, err := getVolBucket(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
volDB := volBkt.Bucket(volName)
|
||||
if volDB == nil {
|
||||
volume.valid = false
|
||||
return errors.Wrapf(ErrNoSuchVolume, "no volume with name %s found in database", volume.Name())
|
||||
}
|
||||
|
||||
// Make a subbucket for the containers using the volume
|
||||
ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
|
||||
depCtrID := []byte(ctrID)
|
||||
if depExists := ctrDepsBkt.Get(depCtrID); depExists != nil {
|
||||
if err := ctrDepsBkt.Delete(depCtrID); err != nil {
|
||||
return errors.Wrapf(err, "error deleting container dependencies %q for volume %s in ctrDependencies bucket in DB", ctrID, volume.Name())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// RemoveVolume removes the given volume from the state
|
||||
func (s *BoltState) RemoveVolume(volume *Volume) error {
|
||||
if !s.valid {
|
||||
@ -1433,6 +1383,11 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
|
||||
return err
|
||||
}
|
||||
|
||||
ctrBkt, err := getCtrBucket(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if the volume exists
|
||||
volDB := volBkt.Bucket(volName)
|
||||
if volDB == nil {
|
||||
@ -1448,6 +1403,18 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
|
||||
if volCtrsBkt != nil {
|
||||
var deps []string
|
||||
err = volCtrsBkt.ForEach(func(id, value []byte) error {
|
||||
// Alright, this is ugly.
|
||||
// But we need it to work around the change in
|
||||
// volume dependency handling, to make sure that
|
||||
// older Podman versions don't cause DB
|
||||
// corruption.
|
||||
// Look up all dependencies and see that they
|
||||
// still exist before appending.
|
||||
ctrExists := ctrBkt.Bucket(id)
|
||||
if ctrExists == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
deps = append(deps, string(id))
|
||||
return nil
|
||||
})
|
||||
@ -1629,6 +1596,11 @@ func (s *BoltState) VolumeInUse(volume *Volume) ([]string, error) {
|
||||
return err
|
||||
}
|
||||
|
||||
ctrBucket, err := getCtrBucket(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
volDB := volBucket.Bucket([]byte(volume.Name()))
|
||||
if volDB == nil {
|
||||
volume.valid = false
|
||||
@ -1642,6 +1614,13 @@ func (s *BoltState) VolumeInUse(volume *Volume) ([]string, error) {
|
||||
|
||||
// Iterate through and add dependencies
|
||||
err = dependsBkt.ForEach(func(id, value []byte) error {
|
||||
// Look up all dependencies and see that they
|
||||
// still exist before appending.
|
||||
ctrExists := ctrBucket.Bucket(id)
|
||||
if ctrExists == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
depCtrs = append(depCtrs, string(id))
|
||||
|
||||
return nil
|
||||
|
@ -564,23 +564,17 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Add container to volume dependencies bucket if container is using a named volume
|
||||
if ctr.runtime.config.VolumePath == "" {
|
||||
return nil
|
||||
}
|
||||
for _, vol := range ctr.config.Spec.Mounts {
|
||||
if strings.Contains(vol.Source, ctr.runtime.config.VolumePath) {
|
||||
volName := strings.Split(vol.Source[len(ctr.runtime.config.VolumePath)+1:], "/")[0]
|
||||
volDB := volBkt.Bucket([]byte(volName))
|
||||
if volDB == nil {
|
||||
return errors.Wrapf(ErrNoSuchVolume, "no volume with name %s found in database", volName)
|
||||
}
|
||||
// Add container to named volume dependencies buckets
|
||||
for _, vol := range ctr.config.NamedVolumes {
|
||||
volDB := volBkt.Bucket([]byte(vol.Name))
|
||||
if volDB == nil {
|
||||
return errors.Wrapf(ErrNoSuchVolume, "no volume with name %s found in database when adding container %s", vol.Name, ctr.ID())
|
||||
}
|
||||
|
||||
ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
|
||||
if depExists := ctrDepsBkt.Get(ctrID); depExists == nil {
|
||||
if err := ctrDepsBkt.Put(ctrID, ctrID); err != nil {
|
||||
return errors.Wrapf(err, "error storing container dependencies %q for volume %s in ctrDependencies bucket in DB", ctr.ID(), volName)
|
||||
}
|
||||
ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
|
||||
if depExists := ctrDepsBkt.Get(ctrID); depExists == nil {
|
||||
if err := ctrDepsBkt.Put(ctrID, ctrID); err != nil {
|
||||
return errors.Wrapf(err, "error adding container %s to volume %s dependencies", ctr.ID(), vol.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -745,22 +739,19 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
|
||||
}
|
||||
}
|
||||
|
||||
// Remove container from volume dependencies bucket if container is using a named volume
|
||||
for _, vol := range ctr.config.Spec.Mounts {
|
||||
if strings.Contains(vol.Source, ctr.runtime.config.VolumePath) {
|
||||
volName := strings.Split(vol.Source[len(ctr.runtime.config.VolumePath)+1:], "/")[0]
|
||||
// Remove container from named volume dependencies buckets
|
||||
for _, vol := range ctr.config.NamedVolumes {
|
||||
volDB := volBkt.Bucket([]byte(vol.Name))
|
||||
if volDB == nil {
|
||||
// Let's assume the volume was already deleted and
|
||||
// continue to remove the container
|
||||
continue
|
||||
}
|
||||
|
||||
volDB := volBkt.Bucket([]byte(volName))
|
||||
if volDB == nil {
|
||||
// Let's assume the volume was already deleted and continue to remove the container
|
||||
continue
|
||||
}
|
||||
|
||||
ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
|
||||
if depExists := ctrDepsBkt.Get(ctrID); depExists != nil {
|
||||
if err := ctrDepsBkt.Delete(ctrID); err != nil {
|
||||
return errors.Wrapf(err, "error deleting container dependencies %q for volume %s in ctrDependencies bucket in DB", ctr.ID(), volName)
|
||||
}
|
||||
ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
|
||||
if depExists := ctrDepsBkt.Get(ctrID); depExists == nil {
|
||||
if err := ctrDepsBkt.Delete(ctrID); err != nil {
|
||||
return errors.Wrapf(err, "error deleting container %s dependency on volume %s", ctr.ID(), vol.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -234,6 +234,8 @@ type ContainerConfig struct {
|
||||
// These include the SHM mount.
|
||||
// These must be unmounted before the container's rootfs is unmounted.
|
||||
Mounts []string `json:"mounts,omitempty"`
|
||||
// NamedVolumes lists the named volumes to mount into the container.
|
||||
NamedVolumes []*ContainerNamedVolume `json:"namedVolumes,omitempty"`
|
||||
|
||||
// Security Config
|
||||
|
||||
@ -354,9 +356,6 @@ type ContainerConfig struct {
|
||||
// ExitCommand is the container's exit command.
|
||||
// This Command will be executed when the container exits
|
||||
ExitCommand []string `json:"exitCommand,omitempty"`
|
||||
// LocalVolumes are the built-in volumes we get from the --volumes-from flag
|
||||
// It picks up the built-in volumes of the container used by --volumes-from
|
||||
LocalVolumes []spec.Mount
|
||||
// IsInfra is a bool indicating whether this container is an infra container used for
|
||||
// sharing kernel namespaces in a pod
|
||||
IsInfra bool `json:"pause"`
|
||||
@ -368,6 +367,18 @@ type ContainerConfig struct {
|
||||
HealthCheckConfig *manifest.Schema2HealthConfig `json:"healthcheck"`
|
||||
}
|
||||
|
||||
// ContainerNamedVolume is a named volume that will be mounted into the
|
||||
// container. Each named volume is a libpod Volume present in the state.
|
||||
type ContainerNamedVolume struct {
|
||||
// Name is the name of the volume to mount in.
|
||||
// Must resolve to a valid volume present in this Podman.
|
||||
Name string `json:"volumeName"`
|
||||
// Dest is the mount's destination
|
||||
Dest string `json:"dest"`
|
||||
// Options are fstab style mount options
|
||||
Options []string `json:"options,omitempty"`
|
||||
}
|
||||
|
||||
// ContainerStatus returns a string representation for users
|
||||
// of a container state
|
||||
func (t ContainerStatus) String() string {
|
||||
@ -488,6 +499,22 @@ func (c *Container) StaticDir() string {
|
||||
return c.config.StaticDir
|
||||
}
|
||||
|
||||
// NamedVolumes returns the container's named volumes.
|
||||
// The name of each is guaranteed to point to a valid libpod Volume present in
|
||||
// the state.
|
||||
func (c *Container) NamedVolumes() []*ContainerNamedVolume {
|
||||
volumes := []*ContainerNamedVolume{}
|
||||
for _, vol := range c.config.NamedVolumes {
|
||||
newVol := new(ContainerNamedVolume)
|
||||
newVol.Name = vol.Name
|
||||
newVol.Dest = vol.Dest
|
||||
newVol.Options = vol.Options
|
||||
volumes = append(volumes, newVol)
|
||||
}
|
||||
|
||||
return volumes
|
||||
}
|
||||
|
||||
// Privileged returns whether the container is privileged
|
||||
func (c *Container) Privileged() bool {
|
||||
return c.config.Privileged
|
||||
|
@ -1403,22 +1403,6 @@ func getExcludedCGroups() (excludes []string) {
|
||||
return
|
||||
}
|
||||
|
||||
// namedVolumes returns named volumes for the container
|
||||
func (c *Container) namedVolumes() ([]string, error) {
|
||||
var volumes []string
|
||||
for _, vol := range c.config.Spec.Mounts {
|
||||
if strings.HasPrefix(vol.Source, c.runtime.config.VolumePath) {
|
||||
volume := strings.TrimPrefix(vol.Source, c.runtime.config.VolumePath+"/")
|
||||
split := strings.Split(volume, "/")
|
||||
volume = split[0]
|
||||
if _, err := c.runtime.state.Volume(volume); err == nil {
|
||||
volumes = append(volumes, volume)
|
||||
}
|
||||
}
|
||||
}
|
||||
return volumes, nil
|
||||
}
|
||||
|
||||
// this should be from chrootarchive.
|
||||
func (c *Container) copyWithTarFromImage(src, dest string) error {
|
||||
mountpoint, err := c.mount()
|
||||
|
@ -195,6 +195,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
|
||||
if err := c.makeBindMounts(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check if the spec file mounts contain the label Relabel flags z or Z.
|
||||
// If they do, relabel the source directory and then remove the option.
|
||||
for i := range g.Config.Mounts {
|
||||
@ -218,6 +219,23 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
|
||||
|
||||
g.SetProcessSelinuxLabel(c.ProcessLabel())
|
||||
g.SetLinuxMountLabel(c.MountLabel())
|
||||
|
||||
// Add named volumes
|
||||
for _, namedVol := range c.config.NamedVolumes {
|
||||
volume, err := c.runtime.GetVolume(namedVol.Name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error retrieving volume %s to add to container %s", namedVol.Name, c.ID())
|
||||
}
|
||||
mountPoint := volume.MountPoint()
|
||||
volMount := spec.Mount{
|
||||
Type: "bind",
|
||||
Source: mountPoint,
|
||||
Destination: namedVol.Dest,
|
||||
Options: namedVol.Options,
|
||||
}
|
||||
g.AddMount(volMount)
|
||||
}
|
||||
|
||||
// Add bind mounts to container
|
||||
for dstPath, srcPath := range c.state.BindMounts {
|
||||
newMount := spec.Mount{
|
||||
|
@ -249,11 +249,8 @@ func (s *InMemoryState) AddContainer(ctr *Container) error {
|
||||
}
|
||||
|
||||
// Add container to volume dependencies
|
||||
for _, vol := range ctr.config.Spec.Mounts {
|
||||
if strings.Contains(vol.Source, ctr.runtime.config.VolumePath) {
|
||||
volName := strings.Split(vol.Source[len(ctr.runtime.config.VolumePath)+1:], "/")[0]
|
||||
s.addCtrToVolDependsMap(ctr.ID(), volName)
|
||||
}
|
||||
for _, vol := range ctr.config.NamedVolumes {
|
||||
s.addCtrToVolDependsMap(ctr.ID(), vol.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -306,12 +303,9 @@ func (s *InMemoryState) RemoveContainer(ctr *Container) error {
|
||||
s.removeCtrFromDependsMap(ctr.ID(), depCtr)
|
||||
}
|
||||
|
||||
// Remove container from volume dependencies
|
||||
for _, vol := range ctr.config.Spec.Mounts {
|
||||
if strings.Contains(vol.Source, ctr.runtime.config.VolumePath) {
|
||||
volName := strings.Split(vol.Source[len(ctr.runtime.config.VolumePath)+1:], "/")[0]
|
||||
s.removeCtrFromVolDependsMap(ctr.ID(), volName)
|
||||
}
|
||||
// Remove this container from volume dependencies
|
||||
for _, vol := range ctr.config.NamedVolumes {
|
||||
s.removeCtrFromVolDependsMap(ctr.ID(), vol.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -492,22 +486,6 @@ func (s *InMemoryState) RemoveVolume(volume *Volume) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveVolCtrDep updates the container dependencies of the volume
|
||||
func (s *InMemoryState) RemoveVolCtrDep(volume *Volume, ctrID string) error {
|
||||
if !volume.valid {
|
||||
return errors.Wrapf(ErrVolumeRemoved, "volume with name %s is not valid", volume.Name())
|
||||
}
|
||||
|
||||
if _, ok := s.volumes[volume.Name()]; !ok {
|
||||
return errors.Wrapf(ErrNoSuchVolume, "volume with name %s doesn't exists in state", volume.Name())
|
||||
}
|
||||
|
||||
// Remove container that is using this volume
|
||||
s.removeCtrFromVolDependsMap(ctrID, volume.Name())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VolumeInUse checks if the given volume is being used by at least one container
|
||||
func (s *InMemoryState) VolumeInUse(volume *Volume) ([]string, error) {
|
||||
if !volume.valid {
|
||||
|
@ -13,7 +13,6 @@ import (
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/cri-o/ocicni/pkg/ocicni"
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@ -1111,24 +1110,6 @@ func WithUserVolumes(volumes []string) CtrCreateOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithLocalVolumes sets the built-in volumes of the container retrieved
|
||||
// from a container passed in to the --volumes-from flag.
|
||||
// This stores the built-in volume information in the Config so we can
|
||||
// add them when creating the container.
|
||||
func WithLocalVolumes(volumes []spec.Mount) CtrCreateOption {
|
||||
return func(ctr *Container) error {
|
||||
if ctr.valid {
|
||||
return ErrCtrFinalized
|
||||
}
|
||||
|
||||
if volumes != nil {
|
||||
ctr.config.LocalVolumes = append(ctr.config.LocalVolumes, volumes...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithEntrypoint sets the entrypoint of the container.
|
||||
// This is not used to change the container's spec, but will instead be used
|
||||
// during commit to populate the entrypoint of the new image.
|
||||
@ -1255,6 +1236,35 @@ func withIsInfra() CtrCreateOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithNamedVolumes adds the given named volumes to the container.
|
||||
func WithNamedVolumes(volumes []*ContainerNamedVolume) CtrCreateOption {
|
||||
return func(ctr *Container) error {
|
||||
if ctr.valid {
|
||||
return ErrCtrFinalized
|
||||
}
|
||||
|
||||
destinations := make(map[string]bool)
|
||||
|
||||
for _, vol := range volumes {
|
||||
// Don't check if they already exist.
|
||||
// If they don't we will automatically create them.
|
||||
|
||||
if _, ok := destinations[vol.Dest]; ok {
|
||||
return errors.Wrapf(ErrInvalidArg, "two volumes found with destination %s", vol.Dest)
|
||||
}
|
||||
destinations[vol.Dest] = true
|
||||
|
||||
ctr.config.NamedVolumes = append(ctr.config.NamedVolumes, &ContainerNamedVolume{
|
||||
Name: vol.Name,
|
||||
Dest: vol.Dest,
|
||||
Options: vol.Options,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Volume Creation Options
|
||||
|
||||
// WithVolumeName sets the name of the volume.
|
||||
@ -1274,28 +1284,6 @@ func WithVolumeName(name string) VolumeCreateOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithVolumeUID sets the uid of the owner.
|
||||
func WithVolumeUID(uid int) VolumeCreateOption {
|
||||
return func(volume *Volume) error {
|
||||
if volume.valid {
|
||||
return ErrVolumeFinalized
|
||||
}
|
||||
volume.config.UID = uid
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithVolumeGID sets the gid of the owner.
|
||||
func WithVolumeGID(gid int) VolumeCreateOption {
|
||||
return func(volume *Volume) error {
|
||||
if volume.valid {
|
||||
return ErrVolumeFinalized
|
||||
}
|
||||
volume.config.GID = gid
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithVolumeLabels sets the labels of the volume.
|
||||
func WithVolumeLabels(labels map[string]string) VolumeCreateOption {
|
||||
return func(volume *Volume) error {
|
||||
@ -1341,6 +1329,32 @@ func WithVolumeOptions(options map[string]string) VolumeCreateOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithVolumeUID sets the UID that the volume will be created as.
|
||||
func WithVolumeUID(uid int) VolumeCreateOption {
|
||||
return func(volume *Volume) error {
|
||||
if volume.valid {
|
||||
return ErrVolumeFinalized
|
||||
}
|
||||
|
||||
volume.config.UID = uid
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithVolumeGID sets the GID that the volume will be created as.
|
||||
func WithVolumeGID(gid int) VolumeCreateOption {
|
||||
return func(volume *Volume) error {
|
||||
if volume.valid {
|
||||
return ErrVolumeFinalized
|
||||
}
|
||||
|
||||
volume.config.GID = gid
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// withSetCtrSpecific sets a bool notifying libpod that a volume was created
|
||||
// specifically for a container.
|
||||
// These volumes will be removed when the container is removed and volumes are
|
||||
|
@ -99,9 +99,6 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
|
||||
ctr.state.State = ContainerStateConfigured
|
||||
ctr.runtime = r
|
||||
|
||||
ctr.valid = true
|
||||
ctr.state.State = ContainerStateConfigured
|
||||
|
||||
var pod *Pod
|
||||
if ctr.config.Pod != "" {
|
||||
// Get the pod from state
|
||||
@ -173,24 +170,29 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
|
||||
ctr.config.ConmonPidFile = filepath.Join(ctr.config.StaticDir, "conmon.pid")
|
||||
}
|
||||
|
||||
// Go through the volume mounts and check for named volumes
|
||||
// If the named volme already exists continue, otherwise create
|
||||
// the storage for the named volume.
|
||||
for i, vol := range ctr.config.Spec.Mounts {
|
||||
if vol.Source[0] != '/' && isNamedVolume(vol.Source) {
|
||||
volInfo, err := r.state.Volume(vol.Source)
|
||||
if err != nil {
|
||||
newVol, err := r.newVolume(ctx, WithVolumeName(vol.Source), withSetCtrSpecific(), WithVolumeUID(ctr.RootUID()), WithVolumeGID(ctr.RootGID()))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating named volume %q", vol.Source)
|
||||
}
|
||||
ctr.config.Spec.Mounts[i].Source = newVol.MountPoint()
|
||||
if err := ctr.copyWithTarFromImage(ctr.config.Spec.Mounts[i].Destination, ctr.config.Spec.Mounts[i].Source); err != nil && !os.IsNotExist(err) {
|
||||
return nil, errors.Wrapf(err, "failed to copy content into new volume mount %q", vol.Source)
|
||||
}
|
||||
continue
|
||||
}
|
||||
ctr.config.Spec.Mounts[i].Source = volInfo.MountPoint()
|
||||
// Go through named volumes and add them.
|
||||
// If they don't exist they will be created using basic options.
|
||||
for _, vol := range ctr.config.NamedVolumes {
|
||||
// Check if it exists already
|
||||
_, err := r.state.Volume(vol.Name)
|
||||
if err == nil {
|
||||
// The volume exists, we're good
|
||||
continue
|
||||
} else if errors.Cause(err) != ErrNoSuchVolume {
|
||||
return nil, errors.Wrapf(err, "error retrieving named volume %s for new container", vol.Name)
|
||||
}
|
||||
|
||||
logrus.Debugf("Creating new volume %s for container", vol.Name)
|
||||
|
||||
// The volume does not exist, so we need to create it.
|
||||
newVol, err := r.newVolume(ctx, WithVolumeName(vol.Name), withSetCtrSpecific(),
|
||||
WithVolumeUID(ctr.RootUID()), WithVolumeGID(ctr.RootGID()))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating named volume %q", vol.Name)
|
||||
}
|
||||
|
||||
if err := ctr.copyWithTarFromImage(vol.Dest, newVol.MountPoint()); err != nil && !os.IsNotExist(err) {
|
||||
return nil, errors.Wrapf(err, "Failed to copy content into new volume mount %q", vol.Name)
|
||||
}
|
||||
}
|
||||
|
||||
@ -344,13 +346,6 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
|
||||
return errors.Wrapf(ErrCtrExists, "container %s has dependent containers which must be removed before it: %s", c.ID(), depsStr)
|
||||
}
|
||||
|
||||
var volumes []string
|
||||
if removeVolume {
|
||||
volumes, err = c.namedVolumes()
|
||||
if err != nil {
|
||||
logrus.Errorf("unable to retrieve builtin volumes for container %v: %v", c.ID(), err)
|
||||
}
|
||||
}
|
||||
var cleanupErr error
|
||||
// Remove the container from the state
|
||||
if c.config.Pod != "" {
|
||||
@ -415,8 +410,12 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range volumes {
|
||||
if volume, err := runtime.state.Volume(v); err == nil {
|
||||
if !removeVolume {
|
||||
return cleanupErr
|
||||
}
|
||||
|
||||
for _, v := range c.config.NamedVolumes {
|
||||
if volume, err := runtime.state.Volume(v.Name); err == nil {
|
||||
if !volume.IsCtrSpecific() {
|
||||
continue
|
||||
}
|
||||
@ -548,14 +547,6 @@ func (r *Runtime) GetLatestContainer() (*Container, error) {
|
||||
return ctrs[lastCreatedIndex], nil
|
||||
}
|
||||
|
||||
// Check if volName is a named volume and not one of the default mounts we add to containers
|
||||
func isNamedVolume(volName string) bool {
|
||||
if volName != "proc" && volName != "tmpfs" && volName != "devpts" && volName != "shm" && volName != "mqueue" && volName != "sysfs" && volName != "cgroup" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Export is the libpod portion of exporting a container to a tar file
|
||||
func (r *Runtime) Export(name string, path string) error {
|
||||
ctr, err := r.LookupContainer(name)
|
||||
|
@ -98,12 +98,26 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
|
||||
if !force {
|
||||
return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr)
|
||||
}
|
||||
// If using force, log the warning that the volume is being used by at least one container
|
||||
logrus.Warnf("volume %s is being used by the following container(s): %s", v.Name(), depsStr)
|
||||
// Remove the container dependencies so we can go ahead and delete the volume
|
||||
|
||||
// We need to remove all containers using the volume
|
||||
for _, dep := range deps {
|
||||
if err := r.state.RemoveVolCtrDep(v, dep); err != nil {
|
||||
return errors.Wrapf(err, "unable to remove container dependency %q from volume %q while trying to delete volume by force", dep, v.Name())
|
||||
ctr, err := r.state.Container(dep)
|
||||
if err != nil {
|
||||
// If the container's removed, no point in
|
||||
// erroring.
|
||||
if errors.Cause(err) == ErrNoSuchCtr || errors.Cause(err) == ErrCtrRemoved {
|
||||
continue
|
||||
}
|
||||
|
||||
return errors.Wrapf(err, "error removing container %s that depends on volume %s", dep, v.Name())
|
||||
}
|
||||
|
||||
// TODO: do we want to set force here when removing
|
||||
// containers?
|
||||
// I'm inclined to say no, in case someone accidentally
|
||||
// wipes a container they're using...
|
||||
if err := r.removeContainer(ctx, ctr, false, false); err != nil {
|
||||
return errors.Wrapf(err, "error removing container %s that depends on volume %s", ctr.ID(), v.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -192,10 +192,6 @@ type State interface {
|
||||
// AddVolume adds the specified volume to state. The volume's name
|
||||
// must be unique within the list of existing volumes
|
||||
AddVolume(volume *Volume) error
|
||||
// RemoveVolCtrDep updates the list of container dependencies that the
|
||||
// volume has. It either deletes the dependent container ID from
|
||||
// the sub-bucket
|
||||
RemoveVolCtrDep(volume *Volume, ctrID string) error
|
||||
// RemoveVolume removes the specified volume.
|
||||
// Only volumes that have no container dependencies can be removed
|
||||
RemoveVolume(volume *Volume) error
|
||||
|
@ -1,7 +1,6 @@
|
||||
package createconfig
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
@ -23,18 +22,16 @@ import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type mountType string
|
||||
|
||||
// Type constants
|
||||
const (
|
||||
bps = iota
|
||||
iops
|
||||
// TypeBind is the type for mounting host dir
|
||||
TypeBind mountType = "bind"
|
||||
TypeBind = "bind"
|
||||
// TypeVolume is the type for remote storage volumes
|
||||
// TypeVolume mountType = "volume" // re-enable upon use
|
||||
// TypeVolume = "volume" // re-enable upon use
|
||||
// TypeTmpfs is the type for mounting tmpfs
|
||||
TypeTmpfs mountType = "tmpfs"
|
||||
TypeTmpfs = "tmpfs"
|
||||
)
|
||||
|
||||
// CreateResourceConfig represents resource elements in CreateConfig
|
||||
@ -130,15 +127,15 @@ type CreateConfig struct {
|
||||
Mounts []spec.Mount //mounts
|
||||
Volumes []string //volume
|
||||
VolumesFrom []string
|
||||
WorkDir string //workdir
|
||||
LabelOpts []string //SecurityOpts
|
||||
NoNewPrivs bool //SecurityOpts
|
||||
ApparmorProfile string //SecurityOpts
|
||||
SeccompProfilePath string //SecurityOpts
|
||||
NamedVolumes []*libpod.ContainerNamedVolume // Filled in by CreateConfigToOCISpec
|
||||
WorkDir string //workdir
|
||||
LabelOpts []string //SecurityOpts
|
||||
NoNewPrivs bool //SecurityOpts
|
||||
ApparmorProfile string //SecurityOpts
|
||||
SeccompProfilePath string //SecurityOpts
|
||||
SecurityOpts []string
|
||||
Rootfs string
|
||||
LocalVolumes []spec.Mount //Keeps track of the built-in volumes of container used in the --volumes-from flag
|
||||
Syslog bool // Whether to enable syslog on exit commands
|
||||
Syslog bool // Whether to enable syslog on exit commands
|
||||
}
|
||||
|
||||
func u32Ptr(i int64) *uint32 { u := uint32(i); return &u }
|
||||
@ -172,9 +169,9 @@ func (c *CreateConfig) AddContainerInitBinary(path string) error {
|
||||
c.Command = append([]string{"/dev/init", "--"}, c.Command...)
|
||||
c.Mounts = append(c.Mounts, spec.Mount{
|
||||
Destination: "/dev/init",
|
||||
Type: "bind",
|
||||
Type: TypeBind,
|
||||
Source: path,
|
||||
Options: []string{"bind", "ro"},
|
||||
Options: []string{TypeBind, "ro"},
|
||||
})
|
||||
return nil
|
||||
}
|
||||
@ -217,9 +214,9 @@ func (c *CreateConfig) initFSMounts() []spec.Mount {
|
||||
return mounts
|
||||
}
|
||||
|
||||
//GetVolumeMounts takes user provided input for bind mounts and creates Mount structs
|
||||
// GetVolumeMounts takes user provided input for bind mounts and creates Mount structs
|
||||
func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, error) {
|
||||
m := c.LocalVolumes
|
||||
m := []spec.Mount{}
|
||||
for _, i := range c.Volumes {
|
||||
var options []string
|
||||
spliti := strings.Split(i, ":")
|
||||
@ -255,9 +252,11 @@ func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, e
|
||||
mount.Source = "tmpfs"
|
||||
mount.Options = append(mount.Options, "tmpcopyup")
|
||||
} else {
|
||||
// TODO: Move support for this and tmpfs into libpod
|
||||
// Should tmpfs also be handled as named volumes? Wouldn't be hard
|
||||
// This will cause a new local Volume to be created on your system
|
||||
mount.Source = stringid.GenerateNonCryptoID()
|
||||
mount.Options = append(mount.Options, "bind")
|
||||
mount.Options = append(mount.Options, TypeBind)
|
||||
}
|
||||
m = append(m, mount)
|
||||
}
|
||||
@ -268,13 +267,12 @@ func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, e
|
||||
// GetVolumesFrom reads the create-config artifact of the container to get volumes from
|
||||
// and adds it to c.Volumes of the current container.
|
||||
func (c *CreateConfig) GetVolumesFrom() error {
|
||||
var options string
|
||||
|
||||
if os.Geteuid() != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, vol := range c.VolumesFrom {
|
||||
options := ""
|
||||
splitVol := strings.SplitN(vol, ":", 2)
|
||||
if len(splitVol) == 2 {
|
||||
options = splitVol[1]
|
||||
@ -283,41 +281,60 @@ func (c *CreateConfig) GetVolumesFrom() error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error looking up container %q", splitVol[0])
|
||||
}
|
||||
inspect, err := ctr.Inspect(false)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error inspecting %q", splitVol[0])
|
||||
|
||||
logrus.Debugf("Adding volumes from container %s", ctr.ID())
|
||||
|
||||
// Look up the container's user volumes. This gets us the
|
||||
// destinations of all mounts the user added to the container.
|
||||
userVolumesArr := ctr.UserVolumes()
|
||||
|
||||
// We're going to need to access them a lot, so convert to a map
|
||||
// to reduce looping.
|
||||
// We'll also use the map to indicate if we missed any volumes along the way.
|
||||
userVolumes := make(map[string]bool)
|
||||
for _, dest := range userVolumesArr {
|
||||
userVolumes[dest] = false
|
||||
}
|
||||
var createArtifact CreateConfig
|
||||
artifact, err := ctr.GetArtifact("create-config")
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting create-config artifact for %q", splitVol[0])
|
||||
|
||||
// Now we get the container's spec and loop through its volumes
|
||||
// and append them in if we can find them.
|
||||
spec := ctr.Spec()
|
||||
if spec == nil {
|
||||
return errors.Errorf("error retrieving container %s spec", ctr.ID())
|
||||
}
|
||||
if err := json.Unmarshal(artifact, &createArtifact); err != nil {
|
||||
return err
|
||||
}
|
||||
for key := range createArtifact.BuiltinImgVolumes {
|
||||
for _, m := range inspect.Mounts {
|
||||
if m.Destination == key {
|
||||
c.LocalVolumes = append(c.LocalVolumes, m)
|
||||
break
|
||||
for _, mnt := range spec.Mounts {
|
||||
if mnt.Type != TypeBind {
|
||||
continue
|
||||
}
|
||||
if _, exists := userVolumes[mnt.Destination]; exists {
|
||||
userVolumes[mnt.Destination] = true
|
||||
localOptions := options
|
||||
if localOptions == "" {
|
||||
localOptions = strings.Join(mnt.Options, ",")
|
||||
}
|
||||
c.Volumes = append(c.Volumes, fmt.Sprintf("%s:%s:%s", mnt.Source, mnt.Destination, localOptions))
|
||||
}
|
||||
}
|
||||
|
||||
for _, i := range createArtifact.Volumes {
|
||||
// Volumes format is host-dir:ctr-dir[:options], so get the host and ctr dir
|
||||
// and add on the options given by the user to the flag.
|
||||
spliti := strings.SplitN(i, ":", 3)
|
||||
// Throw error if mounting volume from container with Z option (private label)
|
||||
// Override this by adding 'z' to options.
|
||||
if len(spliti) > 2 && strings.Contains(spliti[2], "Z") && !strings.Contains(options, "z") {
|
||||
return errors.Errorf("volume mounted with private option 'Z' in %q. Use option 'z' to mount in current container", ctr.ID())
|
||||
// We're done with the spec mounts. Add named volumes.
|
||||
// Add these unconditionally - none of them are automatically
|
||||
// part of the container, as some spec mounts are.
|
||||
namedVolumes := ctr.NamedVolumes()
|
||||
for _, namedVol := range namedVolumes {
|
||||
if _, exists := userVolumes[namedVol.Dest]; exists {
|
||||
userVolumes[namedVol.Dest] = true
|
||||
}
|
||||
if options == "" {
|
||||
// Mount the volumes with the default options
|
||||
c.Volumes = append(c.Volumes, createArtifact.Volumes...)
|
||||
} else {
|
||||
c.Volumes = append(c.Volumes, spliti[0]+":"+spliti[1]+":"+options)
|
||||
localOptions := options
|
||||
if localOptions == "" {
|
||||
localOptions = strings.Join(namedVol.Options, ",")
|
||||
}
|
||||
c.Volumes = append(c.Volumes, fmt.Sprintf("%s:%s:%s", namedVol.Name, namedVol.Dest, localOptions))
|
||||
}
|
||||
|
||||
// Check if we missed any volumes
|
||||
for volDest, found := range userVolumes {
|
||||
if !found {
|
||||
logrus.Warnf("Unable to match volume %s from container %s for volumes-from", volDest, ctr.ID())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -417,14 +434,20 @@ func (c *CreateConfig) GetContainerCreateOptions(runtime *libpod.Runtime, pod *l
|
||||
// others, if they are included
|
||||
volumes := make([]string, 0, len(c.Volumes))
|
||||
for _, vol := range c.Volumes {
|
||||
volumes = append(volumes, strings.SplitN(vol, ":", 2)[0])
|
||||
// We always want the volume destination
|
||||
splitVol := strings.SplitN(vol, ":", 3)
|
||||
if len(splitVol) > 1 {
|
||||
volumes = append(volumes, splitVol[1])
|
||||
} else {
|
||||
volumes = append(volumes, splitVol[0])
|
||||
}
|
||||
}
|
||||
|
||||
options = append(options, libpod.WithUserVolumes(volumes))
|
||||
}
|
||||
|
||||
if len(c.LocalVolumes) != 0 {
|
||||
options = append(options, libpod.WithLocalVolumes(c.LocalVolumes))
|
||||
if len(c.NamedVolumes) != 0 {
|
||||
options = append(options, libpod.WithNamedVolumes(c.NamedVolumes))
|
||||
}
|
||||
|
||||
if len(c.Command) != 0 {
|
||||
@ -538,7 +561,7 @@ func (c *CreateConfig) GetContainerCreateOptions(runtime *libpod.Runtime, pod *l
|
||||
|
||||
options = append(options, libpod.WithPrivileged(c.Privileged))
|
||||
|
||||
useImageVolumes := c.ImageVolumeType == "bind"
|
||||
useImageVolumes := c.ImageVolumeType == TypeBind
|
||||
// Gather up the options for NewContainer which consist of With... funcs
|
||||
options = append(options, libpod.WithRootFSFromImage(c.ImageID, c.Image, useImageVolumes))
|
||||
options = append(options, libpod.WithSecLabels(c.LabelOpts))
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/libpod/libpod"
|
||||
"github.com/containers/libpod/pkg/rootless"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
pmount "github.com/containers/storage/pkg/mount"
|
||||
@ -48,6 +49,33 @@ func supercedeUserMounts(mounts []spec.Mount, configMount []spec.Mount) []spec.M
|
||||
return configMount
|
||||
}
|
||||
|
||||
// Split named volumes from normal volumes
|
||||
func splitNamedVolumes(mounts []spec.Mount) ([]spec.Mount, []*libpod.ContainerNamedVolume) {
|
||||
newMounts := make([]spec.Mount, 0)
|
||||
namedVolumes := make([]*libpod.ContainerNamedVolume, 0)
|
||||
for _, mount := range mounts {
|
||||
// If it's not a named volume, append unconditionally
|
||||
if mount.Type != TypeBind {
|
||||
newMounts = append(newMounts, mount)
|
||||
continue
|
||||
}
|
||||
// Volumes that are not named volumes must be an absolute or
|
||||
// relative path.
|
||||
// Volume names may not begin with a non-alphanumeric character
|
||||
// so the HasPrefix() check is safe here.
|
||||
if strings.HasPrefix(mount.Source, "/") || strings.HasPrefix(mount.Source, ".") {
|
||||
newMounts = append(newMounts, mount)
|
||||
} else {
|
||||
namedVolume := new(libpod.ContainerNamedVolume)
|
||||
namedVolume.Name = mount.Source
|
||||
namedVolume.Dest = mount.Destination
|
||||
namedVolume.Options = mount.Options
|
||||
namedVolumes = append(namedVolumes, namedVolume)
|
||||
}
|
||||
}
|
||||
return newMounts, namedVolumes
|
||||
}
|
||||
|
||||
func getAvailableGids() (int64, error) {
|
||||
idMap, err := user.ParseIDMapFile("/proc/self/gid_map")
|
||||
if err != nil {
|
||||
@ -99,7 +127,7 @@ func CreateConfigToOCISpec(config *CreateConfig) (*spec.Spec, error) { //nolint
|
||||
}
|
||||
sysMnt := spec.Mount{
|
||||
Destination: "/sys",
|
||||
Type: "bind",
|
||||
Type: TypeBind,
|
||||
Source: "/sys",
|
||||
Options: []string{"rprivate", "nosuid", "noexec", "nodev", r, "rbind"},
|
||||
}
|
||||
@ -126,7 +154,7 @@ func CreateConfigToOCISpec(config *CreateConfig) (*spec.Spec, error) { //nolint
|
||||
g.RemoveMount("/dev/mqueue")
|
||||
devMqueue := spec.Mount{
|
||||
Destination: "/dev/mqueue",
|
||||
Type: "bind",
|
||||
Type: TypeBind,
|
||||
Source: "/dev/mqueue",
|
||||
Options: []string{"bind", "nosuid", "noexec", "nodev"},
|
||||
}
|
||||
@ -136,7 +164,7 @@ func CreateConfigToOCISpec(config *CreateConfig) (*spec.Spec, error) { //nolint
|
||||
g.RemoveMount("/proc")
|
||||
procMount := spec.Mount{
|
||||
Destination: "/proc",
|
||||
Type: "bind",
|
||||
Type: TypeBind,
|
||||
Source: "/proc",
|
||||
Options: []string{"rbind", "nosuid", "noexec", "nodev"},
|
||||
}
|
||||
@ -377,6 +405,12 @@ func CreateConfigToOCISpec(config *CreateConfig) (*spec.Spec, error) { //nolint
|
||||
configSpec.Mounts = supercedeUserMounts(volumeMounts, configSpec.Mounts)
|
||||
//--mount
|
||||
configSpec.Mounts = supercedeUserMounts(config.initFSMounts(), configSpec.Mounts)
|
||||
|
||||
// Split normal mounts and named volumes
|
||||
newMounts, namedVolumes := splitNamedVolumes(configSpec.Mounts)
|
||||
configSpec.Mounts = newMounts
|
||||
config.NamedVolumes = namedVolumes
|
||||
|
||||
// BLOCK IO
|
||||
blkio, err := config.CreateBlockIO()
|
||||
if err != nil {
|
||||
|
@ -144,7 +144,7 @@ var _ = Describe("Podman commit", func() {
|
||||
inspect.WaitWithDefaultTimeout()
|
||||
Expect(inspect.ExitCode()).To(Equal(0))
|
||||
image := inspect.InspectImageJSON()
|
||||
_, ok := image[0].Config.Volumes["/tmp"]
|
||||
_, ok := image[0].Config.Volumes["/foo"]
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
r := podmanTest.Podman([]string{"run", "newimage"})
|
||||
|
@ -611,7 +611,6 @@ USER mail`
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session.ExitCode()).To(Equal(0))
|
||||
Expect(session.OutputToString()).To(ContainSubstring("data"))
|
||||
|
||||
})
|
||||
|
||||
It("podman run --volumes flag with multiple volumes", func() {
|
||||
|
@ -32,7 +32,7 @@ var _ = Describe("Podman volume rm", func() {
|
||||
|
||||
})
|
||||
|
||||
It("podman rm volume", func() {
|
||||
It("podman volume rm", func() {
|
||||
session := podmanTest.Podman([]string{"volume", "create", "myvol"})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session.ExitCode()).To(Equal(0))
|
||||
@ -47,7 +47,7 @@ var _ = Describe("Podman volume rm", func() {
|
||||
Expect(len(session.OutputToStringArray())).To(Equal(0))
|
||||
})
|
||||
|
||||
It("podman rm with --force flag", func() {
|
||||
It("podman volume rm with --force flag", func() {
|
||||
SkipIfRemote()
|
||||
session := podmanTest.Podman([]string{"create", "-v", "myvol:/myvol", ALPINE, "ls"})
|
||||
cid := session.OutputToString()
|
||||
|
Reference in New Issue
Block a user