Merge pull request #2229 from rhatdan/volumes

Fix volume handling in podman
This commit is contained in:
OpenShift Merge Robot
2019-02-14 21:45:58 +01:00
committed by GitHub
25 changed files with 132 additions and 172 deletions

6
API.md
View File

@ -97,7 +97,7 @@ in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in
[func ReceiveFile(path: string, delete: bool) int](#ReceiveFile)
[func RemoveContainer(name: string, force: bool) string](#RemoveContainer)
[func RemoveContainer(name: string, force: bool, removeVolumes: bool) string](#RemoveContainer)
[func RemoveImage(name: string, force: bool) string](#RemoveImage)
@ -777,9 +777,9 @@ method ReceiveFile(path: [string](https://godoc.org/builtin#string), delete: [bo
### <a name="RemoveContainer"></a>func RemoveContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
method RemoveContainer(name: [string](https://godoc.org/builtin#string), force: [bool](https://godoc.org/builtin#bool)) [string](https://godoc.org/builtin#string)</div>
method RemoveContainer(name: [string](https://godoc.org/builtin#string), force: [bool](https://godoc.org/builtin#bool), removeVolumes: [bool](https://godoc.org/builtin#bool)) [string](https://godoc.org/builtin#string)</div>
RemoveContainer takes requires the name or ID of container as well a boolean representing whether a running
container can be stopped and removed. Upon successful removal of the container, its ID is returned. If the
container can be stopped and removed. It also takes a flag on whether or not to remove builtin volumes. Upon successful removal of the container, its ID is returned. If the
container cannot be found by name or ID, a [ContainerNotFound](#ContainerNotFound) error will be returned.
#### Example
~~~

View File

@ -58,7 +58,7 @@ func cleanupCmd(c *cliconfig.CleanupValues) error {
for _, ctr := range cleanupContainers {
hadError := false
if c.Remove {
if err := runtime.RemoveContainer(ctx, ctr, false); err != nil {
if err := runtime.RemoveContainer(ctx, ctr, false, false); err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}

View File

@ -135,6 +135,11 @@ type PruneImagesValues struct {
All bool
}
type PruneContainersValues struct {
PodmanCommand
Force bool
}
type ImportValues struct {
PodmanCommand
Change []string

View File

@ -13,13 +13,12 @@ import (
)
var (
pruneContainersCommand cliconfig.ContainersPrune
pruneContainersCommand cliconfig.PruneContainersValues
pruneContainersDescription = `
podman container prune
Removes all exited containers
`
_pruneContainersCommand = &cobra.Command{
Use: "prune",
Short: "Remove all stopped containers",
@ -35,9 +34,11 @@ var (
func init() {
pruneContainersCommand.Command = _pruneContainersCommand
pruneContainersCommand.SetUsageTemplate(UsageTemplate())
flags := pruneContainersCommand.Flags()
flags.BoolVarP(&pruneContainersCommand.Force, "force", "f", false, "Force removal of a running container. The default is false")
}
func pruneContainers(runtime *adapter.LocalRuntime, ctx context.Context, maxWorkers int, force bool) error {
func pruneContainers(runtime *adapter.LocalRuntime, ctx context.Context, maxWorkers int, force, volumes bool) error {
var deleteFuncs []shared.ParallelWorkerInput
filter := func(c *libpod.Container) bool {
@ -57,7 +58,7 @@ func pruneContainers(runtime *adapter.LocalRuntime, ctx context.Context, maxWork
for _, container := range delContainers {
con := container
f := func() error {
return runtime.RemoveContainer(ctx, con, force)
return runtime.RemoveContainer(ctx, con, force, volumes)
}
deleteFuncs = append(deleteFuncs, shared.ParallelWorkerInput{
@ -70,7 +71,7 @@ func pruneContainers(runtime *adapter.LocalRuntime, ctx context.Context, maxWork
return printParallelOutput(deleteErrors, errCount)
}
func pruneContainersCmd(c *cliconfig.ContainersPrune) error {
func pruneContainersCmd(c *cliconfig.PruneContainersValues) error {
runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
@ -83,5 +84,5 @@ func pruneContainersCmd(c *cliconfig.ContainersPrune) error {
}
logrus.Debugf("Setting maximum workers to %d", maxWorkers)
return pruneContainers(runtime, getContext(), maxWorkers, c.Bool("force"))
return pruneContainers(runtime, getContext(), maxWorkers, c.Bool("force"), c.Bool("volumes"))
}

View File

@ -646,9 +646,10 @@ func parseCreateOpts(ctx context.Context, c *cliconfig.PodmanCommand, runtime *l
}
var ImageVolumes map[string]struct{}
if data != nil {
if data != nil && c.String("image-volume") != "ignore" {
ImageVolumes = data.Config.Volumes
}
var imageVolType = map[string]string{
"bind": "",
"tmpfs": "",

View File

@ -39,8 +39,7 @@ func init() {
flags.BoolVarP(&rmCommand.All, "all", "a", false, "Remove all containers")
flags.BoolVarP(&rmCommand.Force, "force", "f", false, "Force removal of a running container. The default is false")
flags.BoolVarP(&rmCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
flags.BoolVarP(&rmCommand.Volumes, "volumes", "v", false, "Remove the volumes associated with the container (Not implemented yet)")
flags.BoolVarP(&rmCommand.Volumes, "volumes", "v", false, "Remove the volumes associated with the container")
}
// saveCmd saves the image to either docker-archive or oci
@ -79,7 +78,7 @@ func rmCmd(c *cliconfig.RmValues) error {
for _, container := range delContainers {
con := container
f := func() error {
return runtime.RemoveContainer(ctx, con, c.Force)
return runtime.RemoveContainer(ctx, con, c.Force, c.Volumes)
}
deleteFuncs = append(deleteFuncs, shared.ParallelWorkerInput{

View File

@ -132,7 +132,7 @@ func runCmd(c *cliconfig.RunValues) error {
exitCode = 126
}
if c.IsSet("rm") {
if deleteError := runtime.RemoveContainer(ctx, ctr, true); deleteError != nil {
if deleteError := runtime.RemoveContainer(ctx, ctr, true, false); deleteError != nil {
logrus.Errorf("unable to remove container %s after failing to start and attach to it", ctr.ID())
}
}

View File

@ -144,7 +144,7 @@ func startCmd(c *cliconfig.StartValues) error {
logrus.Errorf("unable to detect if container %s should be deleted", ctr.ID())
}
if createArtifact.Rm {
if rmErr := runtime.RemoveContainer(ctx, ctr, true); rmErr != nil {
if rmErr := runtime.RemoveContainer(ctx, ctr, true, false); rmErr != nil {
logrus.Errorf("unable to remove container %s after it failed to start", ctr.ID())
}
}

View File

@ -76,7 +76,7 @@ Are you sure you want to continue? [y/N] `, volumeString)
ctx := getContext()
fmt.Println("Deleted Containers")
lasterr := pruneContainers(runtime, ctx, shared.Parallelize("rm"), false)
lasterr := pruneContainers(runtime, ctx, shared.Parallelize("rm"), false, false)
if c.Bool("volumes") {
fmt.Println("Deleted Volumes")
err := volumePrune(runtime, getContext())

View File

@ -600,7 +600,7 @@ method GetAttachSockets(name: string) -> (sockets: Sockets)
# a [ContainerNotFound](#ContainerNotFound) error is returned.
method WaitContainer(name: string) -> (exitcode: int)
# RemoveContainer takes requires the name or ID of container as well a boolean representing whether a running
# RemoveContainer takes requires the name or ID of container as well a boolean representing whether a running and a boolean indicating whether to remove builtin volumes
# container can be stopped and removed. Upon successful removal of the container, its ID is returned. If the
# container cannot be found by name or ID, a [ContainerNotFound](#ContainerNotFound) error will be returned.
# #### Example
@ -610,7 +610,7 @@ method WaitContainer(name: string) -> (exitcode: int)
# "container": "62f4fd98cb57f529831e8f90610e54bba74bd6f02920ffb485e15376ed365c20"
# }
# ~~~
method RemoveContainer(name: string, force: bool) -> (container: string)
method RemoveContainer(name: string, force: bool, removeVolumes: bool) -> (container: string)
# DeleteStoppedContainers will delete all containers that are not running. It will return a list the deleted
# container IDs. See also [RemoveContainer](RemoveContainer).

View File

@ -73,6 +73,7 @@
| [podman-unpause(1)](/docs/podman-unpause.1.md) | Unpause one or more running containers |[![...](/docs/play.png)](https://asciinema.org/a/141292)|
| [podman-varlink(1)](/docs/podman-varlink.1.md) | Run the varlink backend ||
| [podman-version(1)](/docs/podman-version.1.md) | Display the version information |[![...](/docs/play.png)](https://asciinema.org/a/mfrn61pjZT9Fc8L4NbfdSqfgu)|
| [podman-volume(1)](/docs/podman-volume.1.md) | Manage Volumes ||
| [podman-volume-create(1)](/docs/podman-volume-create.1.md) | Create a volume ||
| [podman-volume-inspect(1)](/docs/podman-volume-inspect.1.md) | Get detailed information on one or more volumes ||
| [podman-volume-ls(1)](/docs/podman-volume-ls.1.md) | List all the available volumes ||

View File

@ -218,7 +218,7 @@ func runSingleThreadedStressTest(ctx context.Context, client *libpod.Runtime, im
//Delete Container
deleteStartTime := time.Now()
err = client.RemoveContainer(ctx, ctr, true)
err = client.RemoveContainer(ctx, ctr, true, false)
if err != nil {
return nil, err
}

View File

@ -168,6 +168,7 @@ the exit codes follow the `chroot` standard, see below:
| [podman-umount(1)](podman-umount.1.md) | Unmount a working container's root filesystem. |
| [podman-unpause(1)](podman-unpause.1.md) | Unpause one or more containers. |
| [podman-version(1)](podman-version.1.md) | Display the Podman version information. |
| [podman-volume(1)](podman-volume.1.md) | Manage Volumes. |
| [podman-wait(1)](podman-wait.1.md) | Wait on one or more containers to stop and print their exit codes. |
## FILES

View File

@ -544,7 +544,7 @@ func (r *LocalRuntime) GetContainers(filters ...libpod.ContainerFilter) ([]*libp
// RemoveContainer removes the given container
// If force is specified, the container will be stopped first
// Otherwise, RemoveContainer will return an error if the container is running
func (r *LocalRuntime) RemoveContainer(ctx context.Context, c *libpod.Container, force bool) error {
func (r *LocalRuntime) RemoveContainer(ctx context.Context, c *libpod.Container, force, volumes bool) error {
return libpod.ErrNotImplemented
}

View File

@ -358,8 +358,7 @@ type ContainerConfig struct {
ExitCommand []string `json:"exitCommand,omitempty"`
// LocalVolumes are the built-in volumes we get from the --volumes-from flag
// It picks up the built-in volumes of the container used by --volumes-from
LocalVolumes []string
LocalVolumes []spec.Mount
// IsInfra is a bool indicating whether this container is an infra container used for
// sharing kernel namespaces in a pod
IsInfra bool `json:"pause"`

View File

@ -10,21 +10,16 @@ import (
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/containers/buildah/imagebuildah"
"github.com/containers/libpod/pkg/ctime"
"github.com/containers/libpod/pkg/hooks"
"github.com/containers/libpod/pkg/hooks/exec"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/mount"
"github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -1053,113 +1048,6 @@ func (c *Container) writeStringToRundir(destFile, output string) (string, error)
return filepath.Join(c.state.DestinationRunDir, destFile), nil
}
func (c *Container) addLocalVolumes(ctx context.Context, g *generate.Generator, execUser *user.ExecUser) error {
var uid, gid int
mountPoint := c.state.Mountpoint
if !c.state.Mounted {
return errors.Wrapf(ErrInternal, "container is not mounted")
}
newImage, err := c.runtime.imageRuntime.NewFromLocal(c.config.RootfsImageID)
if err != nil {
return err
}
imageData, err := newImage.Inspect(ctx)
if err != nil {
return err
}
// Add the built-in volumes of the container passed in to --volumes-from
for _, vol := range c.config.LocalVolumes {
if imageData.Config.Volumes == nil {
imageData.Config.Volumes = map[string]struct{}{
vol: {},
}
} else {
imageData.Config.Volumes[vol] = struct{}{}
}
}
if c.config.User != "" {
if execUser == nil {
return errors.Wrapf(ErrInternal, "nil pointer passed to addLocalVolumes for execUser")
}
uid = execUser.Uid
gid = execUser.Gid
}
for k := range imageData.Config.Volumes {
mount := spec.Mount{
Destination: k,
Type: "bind",
Options: []string{"private", "bind", "rw"},
}
if MountExists(g.Mounts(), k) {
continue
}
volumePath := filepath.Join(c.config.StaticDir, "volumes", k)
// Ensure the symlinks are resolved
resolvedSymlink, err := imagebuildah.ResolveSymLink(mountPoint, k)
if err != nil {
return errors.Wrapf(ErrCtrStateInvalid, "cannot resolve %s in %s for container %s", k, mountPoint, c.ID())
}
var srcPath string
if resolvedSymlink != "" {
srcPath = filepath.Join(mountPoint, resolvedSymlink)
} else {
srcPath = filepath.Join(mountPoint, k)
}
if _, err := os.Stat(srcPath); os.IsNotExist(err) {
logrus.Infof("Volume image mount point %s does not exist in root FS, need to create it", k)
if err = os.MkdirAll(srcPath, 0755); err != nil {
return errors.Wrapf(err, "error creating directory %q for volume %q in container %q", volumePath, k, c.ID())
}
if err = os.Chown(srcPath, uid, gid); err != nil {
return errors.Wrapf(err, "error chowning directory %q for volume %q in container %q", srcPath, k, c.ID())
}
}
if _, err := os.Stat(volumePath); os.IsNotExist(err) {
if err = os.MkdirAll(volumePath, 0755); err != nil {
return errors.Wrapf(err, "error creating directory %q for volume %q in container %q", volumePath, k, c.ID())
}
if err = os.Chown(volumePath, uid, gid); err != nil {
return errors.Wrapf(err, "error chowning directory %q for volume %q in container %q", volumePath, k, c.ID())
}
if err = label.Relabel(volumePath, c.config.MountLabel, false); err != nil {
return errors.Wrapf(err, "error relabeling directory %q for volume %q in container %q", volumePath, k, c.ID())
}
if err = chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "error populating directory %q for volume %q in container %q using contents of %q", volumePath, k, c.ID(), srcPath)
}
// Set the volume path with the same owner and permission of source path
sstat, _ := os.Stat(srcPath)
st, ok := sstat.Sys().(*syscall.Stat_t)
if !ok {
return fmt.Errorf("could not convert to syscall.Stat_t")
}
uid := int(st.Uid)
gid := int(st.Gid)
if err := os.Lchown(volumePath, uid, gid); err != nil {
return err
}
if os.Chmod(volumePath, sstat.Mode()); err != nil {
return err
}
}
mount.Source = volumePath
g.AddMount(mount)
}
return nil
}
// Save OCI spec to disk, replacing any existing specs for the container
func (c *Container) saveSpec(spec *spec.Spec) error {
// If the OCI spec already exists, we need to replace it
@ -1303,3 +1191,30 @@ func getExcludedCGroups() (excludes []string) {
excludes = []string{"rdma"}
return
}
// namedVolumes returns named volumes for the container
func (c *Container) namedVolumes() ([]string, error) {
var volumes []string
for _, vol := range c.config.Spec.Mounts {
if strings.HasPrefix(vol.Source, c.runtime.config.VolumePath) {
volume := strings.TrimPrefix(vol.Source, c.runtime.config.VolumePath+"/")
split := strings.Split(volume, "/")
volume = split[0]
if _, err := c.runtime.state.Volume(volume); err == nil {
volumes = append(volumes, volume)
}
}
}
return volumes, nil
}
// this should be from chrootarchive.
func (c *Container) copyWithTarFromImage(src, dest string) error {
mountpoint, err := c.mount()
if err != nil {
return err
}
a := archive.NewDefaultArchiver()
source := filepath.Join(mountpoint, src)
return a.CopyWithTar(source, dest)
}

View File

@ -235,13 +235,6 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
}
}
// Bind builtin image volumes
if c.config.Rootfs == "" && c.config.ImageVolumes {
if err := c.addLocalVolumes(ctx, &g, execUser); err != nil {
return nil, errors.Wrapf(err, "error mounting image volumes")
}
}
if c.config.User != "" {
// User and Group must go together
g.SetProcessUID(uint32(execUser.Uid))

View File

@ -11,6 +11,7 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
"github.com/cri-o/ocicni/pkg/ocicni"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
@ -1058,7 +1059,7 @@ func WithUserVolumes(volumes []string) CtrCreateOption {
// from a container passed in to the --volumes-from flag.
// This stores the built-in volume information in the Config so we can
// add them when creating the container.
func WithLocalVolumes(volumes []string) CtrCreateOption {
func WithLocalVolumes(volumes []spec.Mount) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
return ErrCtrFinalized

View File

@ -177,9 +177,12 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
if err != nil {
newVol, err := r.newVolume(ctx, WithVolumeName(vol.Source))
if err != nil {
logrus.Errorf("error creating named volume %q: %v", vol.Source, err)
return nil, errors.Wrapf(err, "error creating named volume %q", vol.Source)
}
ctr.config.Spec.Mounts[i].Source = newVol.MountPoint()
if err := ctr.copyWithTarFromImage(ctr.config.Spec.Mounts[i].Destination, ctr.config.Spec.Mounts[i].Source); err != nil && !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "Failed to copy content into new volume mount %q", vol.Source)
}
continue
}
ctr.config.Spec.Mounts[i].Source = volInfo.MountPoint()
@ -225,17 +228,19 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
// RemoveContainer removes the given container
// If force is specified, the container will be stopped first
// If removeVolume is specified, named volumes used by the container will
// be removed also if and only if the container is the sole user
// Otherwise, RemoveContainer will return an error if the container is running
func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool) error {
func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool, removeVolume bool) error {
r.lock.Lock()
defer r.lock.Unlock()
return r.removeContainer(ctx, c, force)
return r.removeContainer(ctx, c, force, removeVolume)
}
// Internal function to remove a container
// Locks the container, but does not lock the runtime
func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool) error {
func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, removeVolume bool) error {
if !c.valid {
if ok, _ := r.state.HasContainer(c.ID()); !ok {
// Container probably already removed
@ -248,6 +253,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool)
// To avoid races around removing a container and the pod it is in
var pod *Pod
var err error
runtime := c.runtime
if c.config.Pod != "" {
pod, err = r.state.Pod(c.config.Pod)
if err != nil {
@ -333,6 +339,13 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool)
return errors.Wrapf(ErrCtrExists, "container %s has dependent containers which must be removed before it: %s", c.ID(), depsStr)
}
var volumes []string
if removeVolume {
volumes, err = c.namedVolumes()
if err != nil {
logrus.Errorf("unable to retrieve builtin volumes for container %v: %v", c.ID(), err)
}
}
var cleanupErr error
// Remove the container from the state
if c.config.Pod != "" {
@ -397,6 +410,14 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool)
}
}
for _, v := range volumes {
if volume, err := runtime.state.Volume(v); err == nil {
if err := runtime.removeVolume(ctx, volume, false, true); err != nil && err != ErrNoSuchVolume && err != ErrVolumeBeingUsed {
logrus.Errorf("cleanup volume (%s): %v", v, err)
}
}
}
return cleanupErr
}

View File

@ -43,7 +43,7 @@ func (r *Runtime) RemoveImage(ctx context.Context, img *image.Image, force bool)
if len(imageCtrs) > 0 && len(img.Names()) <= 1 {
if force {
for _, ctr := range imageCtrs {
if err := r.removeContainer(ctx, ctr, true); err != nil {
if err := r.removeContainer(ctx, ctr, true, false); err != nil {
return "", errors.Wrapf(err, "error removing image %s: container %s using image could not be removed", img.ID(), ctr.ID())
}
}

View File

@ -5,10 +5,6 @@ import (
"path/filepath"
)
// VolumePath is the path under which all volumes that are created using the
// local driver will be created
// const VolumePath = "/var/lib/containers/storage/volumes"
// Creates a new volume
func newVolume(runtime *Runtime) (*Volume, error) {
volume := new(Volume)

View File

@ -11,7 +11,9 @@ import (
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/namespaces"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/containers/storage/pkg/stringid"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/go-connections/nat"
spec "github.com/opencontainers/runtime-spec/specs-go"
@ -133,8 +135,8 @@ type CreateConfig struct {
SeccompProfilePath string //SecurityOpts
SecurityOpts []string
Rootfs string
LocalVolumes []string //Keeps track of the built-in volumes of container used in the --volumes-from flag
Syslog bool // Whether to enable syslog on exit commands
LocalVolumes []spec.Mount //Keeps track of the built-in volumes of container used in the --volumes-from flag
Syslog bool // Whether to enable syslog on exit commands
}
func u32Ptr(i int64) *uint32 { u := uint32(i); return &u }
@ -215,7 +217,7 @@ func (c *CreateConfig) initFSMounts() []spec.Mount {
//GetVolumeMounts takes user provided input for bind mounts and creates Mount structs
func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, error) {
var m []spec.Mount
m := c.LocalVolumes
for _, i := range c.Volumes {
var options []string
spliti := strings.Split(i, ":")
@ -233,22 +235,31 @@ func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, e
logrus.Debugf("User mount %s:%s options %v", spliti[0], spliti[1], options)
}
// volumes from image config
if c.ImageVolumeType != "tmpfs" {
if c.ImageVolumeType == "ignore" {
return m, nil
}
for vol := range c.BuiltinImgVolumes {
if libpod.MountExists(specMounts, vol) {
continue
}
mount := spec.Mount{
Destination: vol,
Type: string(TypeTmpfs),
Source: string(TypeTmpfs),
Options: []string{"rprivate", "rw", "noexec", "nosuid", "nodev", "tmpcopyup"},
Type: c.ImageVolumeType,
Options: []string{"rprivate", "rw", "nodev"},
}
if c.ImageVolumeType == "tmpfs" {
mount.Source = "tmpfs"
mount.Options = append(mount.Options, "tmpcopyup")
} else {
// This will cause a new local Volume to be created on your system
mount.Source = stringid.GenerateNonCryptoID()
mount.Options = append(mount.Options, "bind")
}
m = append(m, mount)
}
return m, nil
}
@ -256,6 +267,11 @@ func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, e
// and adds it to c.Volumes of the current container.
func (c *CreateConfig) GetVolumesFrom() error {
var options string
if rootless.SkipStorageSetup() {
return nil
}
for _, vol := range c.VolumesFrom {
splitVol := strings.SplitN(vol, ":", 2)
if len(splitVol) == 2 {
@ -265,6 +281,10 @@ func (c *CreateConfig) GetVolumesFrom() error {
if err != nil {
return errors.Wrapf(err, "error looking up container %q", splitVol[0])
}
inspect, err := ctr.Inspect(false)
if err != nil {
return errors.Wrapf(err, "error inspecting %q", splitVol[0])
}
var createArtifact CreateConfig
artifact, err := ctr.GetArtifact("create-config")
if err != nil {
@ -273,9 +293,13 @@ func (c *CreateConfig) GetVolumesFrom() error {
if err := json.Unmarshal(artifact, &createArtifact); err != nil {
return err
}
for key := range createArtifact.BuiltinImgVolumes {
c.LocalVolumes = append(c.LocalVolumes, key)
for _, m := range inspect.Mounts {
if m.Destination == key {
c.LocalVolumes = append(c.LocalVolumes, m)
break
}
}
}
for _, i := range createArtifact.Volumes {

View File

@ -259,8 +259,8 @@ func GetRootlessStorageOpts() (storage.StoreOptions, error) {
return opts, nil
}
// GetRootlessVolumeInfo returns where all the name volumes will be created in rootless mode
func GetRootlessVolumeInfo() (string, error) {
// GetRootlessVolumePath returns where all the name volumes will be created in rootless mode
func GetRootlessVolumePath() (string, error) {
dataDir, _, err := GetRootlessDirInfo()
if err != nil {
return "", err
@ -307,15 +307,13 @@ func GetDefaultStoreOptions() (storage.StoreOptions, string, error) {
err error
)
storageOpts := storage.DefaultStoreOptions
volumePath := "/var/lib/containers/storage"
volumePath := filepath.Join(storageOpts.GraphRoot, "volumes")
if rootless.IsRootless() {
storageOpts, err = GetRootlessStorageOpts()
if err != nil {
return storageOpts, volumePath, err
}
volumePath, err = GetRootlessVolumeInfo()
volumePath, err = GetRootlessVolumePath()
if err != nil {
return storageOpts, volumePath, err
}

View File

@ -358,13 +358,13 @@ func (i *LibpodAPI) WaitContainer(call iopodman.VarlinkCall, name string) error
}
// RemoveContainer ...
func (i *LibpodAPI) RemoveContainer(call iopodman.VarlinkCall, name string, force bool) error {
func (i *LibpodAPI) RemoveContainer(call iopodman.VarlinkCall, name string, force bool, removeVolumes bool) error {
ctx := getContext()
ctr, err := i.Runtime.LookupContainer(name)
if err != nil {
return call.ReplyContainerNotFound(name)
}
if err := i.Runtime.RemoveContainer(ctx, ctr, force); err != nil {
if err := i.Runtime.RemoveContainer(ctx, ctr, force, removeVolumes); err != nil {
return call.ReplyErrorOccurred(err.Error())
}
return call.ReplyRemoveContainer(ctr.ID())
@ -385,7 +385,7 @@ func (i *LibpodAPI) DeleteStoppedContainers(call iopodman.VarlinkCall) error {
return call.ReplyErrorOccurred(err.Error())
}
if state != libpod.ContainerStateRunning {
if err := i.Runtime.RemoveContainer(ctx, ctr, false); err != nil {
if err := i.Runtime.RemoveContainer(ctx, ctr, false, false); err != nil {
return call.ReplyErrorOccurred(err.Error())
}
deletedContainers = append(deletedContainers, ctr.ID())

View File

@ -131,9 +131,14 @@ func varlinkCreateToCreateConfig(ctx context.Context, create iopodman.Create, ru
}
imageID := data.ID
var ImageVolumes map[string]struct{}
if data != nil && create.Image_volume_type != "ignore" {
ImageVolumes = data.Config.Volumes
}
config := &cc.CreateConfig{
Runtime: runtime,
BuiltinImgVolumes: data.Config.Volumes,
BuiltinImgVolumes: ImageVolumes,
ConmonPidFile: create.Conmon_pidfile,
ImageVolumeType: create.Image_volume_type,
CapAdd: create.Cap_add,