Add --volumes-from flag to podman run and create

podman now supports --volumes-from flag, which allows users
to add all the volumes an existing container has to a new one.

Signed-off-by: umohnani8 <umohnani@redhat.com>

Closes: #931
Approved by: mheon
This commit is contained in:
umohnani8
2018-06-11 15:27:42 -04:00
committed by Atomic Bot
parent c7424b6991
commit 4855998f1c
11 changed files with 224 additions and 14 deletions

View File

@ -302,6 +302,10 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
return nil, err
}
if err = parseVolumesFrom(c.StringSlice("volumes-from")); err != nil {
return nil, err
}
tty := c.Bool("tty")
pidMode := container.PidMode(c.String("pid"))
@ -596,6 +600,7 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
Volumes: c.StringSlice("volume"),
WorkDir: workDir,
Rootfs: rootfs,
VolumesFrom: c.StringSlice("volumes-from"),
}
if !config.Privileged {

View File

@ -75,9 +75,6 @@ func addWarning(warnings []string, msg string) []string {
}
func parseVolumes(volumes []string) error {
if len(volumes) == 0 {
return nil
}
for _, volume := range volumes {
arr := strings.SplitN(volume, ":", 3)
if len(arr) < 2 {
@ -98,6 +95,21 @@ func parseVolumes(volumes []string) error {
return nil
}
func parseVolumesFrom(volumesFrom []string) error {
for _, vol := range volumesFrom {
arr := strings.SplitN(vol, ":", 2)
if len(arr) == 2 {
if strings.Contains(arr[1], "Z") || strings.Contains(arr[1], "private") || strings.Contains(arr[1], "slave") || strings.Contains(arr[1], "shared") {
return errors.Errorf("invalid options %q, can only specify 'ro', 'rw', and 'z", arr[1])
}
if err := validateVolumeOpts(arr[1]); err != nil {
return err
}
}
}
return nil
}
func validateVolumeHostDir(hostDir string) error {
if !filepath.IsAbs(hostDir) {
return errors.Errorf("invalid host path, must be an absolute path %q", hostDir)
@ -121,20 +133,20 @@ func validateVolumeOpts(option string) error {
for _, opt := range options {
switch opt {
case "rw", "ro":
foundRWRO++
if foundRWRO > 1 {
return errors.Errorf("invalid options %q, can only specify 1 'rw' or 'ro' option", option)
}
foundRWRO++
case "z", "Z":
foundLabelChange++
if foundLabelChange > 1 {
return errors.Errorf("invalid options %q, can only specify 1 'z' or 'Z' option", option)
}
foundLabelChange++
case "private", "rprivate", "shared", "rshared", "slave", "rslave":
foundRootPropagation++
if foundRootPropagation > 1 {
return errors.Errorf("invalid options %q, can only specify 1 '[r]shared', '[r]private' or '[r]slave' option", option)
}
foundRootPropagation++
default:
return errors.Errorf("invalid option type %q", option)
}

View File

@ -654,6 +654,40 @@ change propagation properties of source mount. Say `/` is source mount for
To disable automatic copying of data from the container path to the volume, use
the `nocopy` flag. The `nocopy` flag can be set on bind mounts and named volumes.
**--volumes-from**[=*CONTAINER*[:*OPTIONS*]]
Mount volumes from the specified container(s).
*OPTIONS* is a comma delimited list with the following available elements:
* [rw|ro]
* z
Mounts already mounted volumes from a source container onto another
container. You must supply the source's container-id or container-name.
To share a volume, use the --volumes-from option when running
the target container. You can share volumes even if the source container
is not running.
By default, podman mounts the volumes in the same mode (read-write or
read-only) as it is mounted in the source container. Optionally, you
can change this by suffixing the container-id with either the `ro` or
`rw` keyword.
Labeling systems like SELinux require that proper labels are placed on volume
content mounted into a container. Without a label, the security system might
prevent the processes running inside the container from using the content. By
default, podman does not change the labels set by the OS.
To change a label in the container context, you can add `z` to the volume mount.
This suffix tells podman to relabel file objects on the shared volumes. The `z`
option tells podman that two containers share the volume content. As a result,
podman labels the content with a shared content label. Shared volume labels allow
all containers to read/write content.
If the location of the volume from the source container overlaps with
data residing on a target container, then the volume hides
that data on the target.
**-w**, **--workdir**=""
Working directory inside the container

View File

@ -686,6 +686,40 @@ change propagation properties of source mount. Say `/` is source mount for
To disable automatic copying of data from the container path to the volume, use
the `nocopy` flag. The `nocopy` flag can be set on bind mounts and named volumes.
**--volumes-from**[=*CONTAINER*[:*OPTIONS*]]
Mount volumes from the specified container(s).
*OPTIONS* is a comma delimited list with the following available elements:
* [rw|ro]
* z
Mounts already mounted volumes from a source container onto another
container. You must supply the source's container-id or container-name.
To share a volume, use the --volumes-from option when running
the target container. You can share volumes even if the source container
is not running.
By default, podman mounts the volumes in the same mode (read-write or
read-only) as it is mounted in the source container. Optionally, you
can change this by suffixing the container-id with either the `ro` or
`rw` keyword.
Labeling systems like SELinux require that proper labels are placed on volume
content mounted into a container. Without a label, the security system might
prevent the processes running inside the container from using the content. By
default, podman does not change the labels set by the OS.
To change a label in the container context, you can add `z` to the volume mount.
This suffix tells podman to relabel file objects on the shared volumes. The `z`
option tells podman that two containers share the volume content. As a result,
podman labels the content with a shared content label. Shared volume labels allow
all containers to read/write content.
If the location of the volume from the source container overlaps with
data residing on a target container, then the volume hides
that data on the target.
**-w**, **--workdir**=""
Working directory inside the container

View File

@ -313,6 +313,9 @@ type ContainerConfig struct {
// ExitCommand is the container's exit command.
// This Command will be executed when the container exits
ExitCommand []string `json:"exitCommand,omitempty"`
// LocalVolumes are the built-in volumes we get from the --volumes-from flag
// It picks up the built-in volumes of the container used by --volumes-from
LocalVolumes []string
}
// ContainerStatus returns a string representation for users

View File

@ -1108,7 +1108,7 @@ func (c *Container) generateHosts() (string, error) {
return c.writeStringToRundir("hosts", hosts)
}
func (c *Container) addImageVolumes(ctx context.Context, g *generate.Generator) error {
func (c *Container) addLocalVolumes(ctx context.Context, g *generate.Generator) error {
mountPoint := c.state.Mountpoint
if !c.state.Mounted {
return errors.Wrapf(ErrInternal, "container is not mounted")
@ -1121,6 +1121,17 @@ func (c *Container) addImageVolumes(ctx context.Context, g *generate.Generator)
if err != nil {
return err
}
// Add the built-in volumes of the container passed in to --volumes-from
for _, vol := range c.config.LocalVolumes {
if imageData.ContainerConfig.Volumes == nil {
imageData.ContainerConfig.Volumes = map[string]struct{}{
vol: {},
}
} else {
imageData.ContainerConfig.Volumes[vol] = struct{}{}
}
}
for k := range imageData.ContainerConfig.Volumes {
mount := spec.Mount{
Destination: k,

View File

@ -127,7 +127,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// Bind builtin image volumes
if c.config.Rootfs == "" && c.config.ImageVolumes {
if err := c.addImageVolumes(ctx, &g); err != nil {
if err := c.addLocalVolumes(ctx, &g); err != nil {
return nil, errors.Wrapf(err, "error mounting image volumes")
}
}

View File

@ -885,6 +885,24 @@ func WithUserVolumes(volumes []string) CtrCreateOption {
}
}
// WithLocalVolumes sets the built-in volumes of the container retrieved
// from a container passed in to the --volumes-from flag.
// This stores the built-in volume information in the ContainerConfig so we can
// add them when creating the container.
func WithLocalVolumes(volumes []string) CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
return ErrCtrFinalized
}
if volumes != nil {
ctr.config.LocalVolumes = append(ctr.config.LocalVolumes, volumes...)
}
return nil
}
}
// WithEntrypoint sets the entrypoint of the container.
// This is not used to change the container's spec, but will instead be used
// during commit to populate the entrypoint of the new image.

View File

@ -1,6 +1,7 @@
package createconfig
import (
"encoding/json"
"os"
"strconv"
"strings"
@ -123,14 +124,16 @@ type CreateConfig struct {
User string //user
UtsMode container.UTSMode //uts
Volumes []string //volume
WorkDir string //workdir
MountLabel string //SecurityOpts
ProcessLabel string //SecurityOpts
NoNewPrivs bool //SecurityOpts
ApparmorProfile string //SecurityOpts
SeccompProfilePath string //SecurityOpts
VolumesFrom []string
WorkDir string //workdir
MountLabel string //SecurityOpts
ProcessLabel string //SecurityOpts
NoNewPrivs bool //SecurityOpts
ApparmorProfile string //SecurityOpts
SeccompProfilePath string //SecurityOpts
SecurityOpts []string
Rootfs string
LocalVolumes []string //Keeps track of the built-in volumes of container used in the --volumes-from flag
}
func u32Ptr(i int64) *uint32 { u := uint32(i); return &u }
@ -215,6 +218,52 @@ func (c *CreateConfig) GetVolumeMounts(specMounts []spec.Mount) ([]spec.Mount, e
return m, nil
}
// GetVolumesFrom reads the create-config artifact of the container to get volumes from
// and adds it to c.Volumes of the curent container.
func (c *CreateConfig) GetVolumesFrom() error {
var options string
for _, vol := range c.VolumesFrom {
splitVol := strings.SplitN(vol, ":", 2)
if len(splitVol) == 2 {
options = splitVol[1]
}
ctr, err := c.Runtime.LookupContainer(splitVol[0])
if err != nil {
return errors.Wrapf(err, "error looking up container %q", splitVol[0])
}
var createArtifact CreateConfig
artifact, err := ctr.GetArtifact("create-config")
if err != nil {
return errors.Wrapf(err, "error getting create-config artifact for %q", splitVol[0])
}
if err := json.Unmarshal(artifact, &createArtifact); err != nil {
return err
}
for key := range createArtifact.BuiltinImgVolumes {
c.LocalVolumes = append(c.LocalVolumes, key)
}
for _, i := range createArtifact.Volumes {
// Volumes format is host-dir:ctr-dir[:options], so get the host and ctr dir
// and add on the options given by the user to the flag.
spliti := strings.SplitN(i, ":", 3)
// Throw error if mounting volume from container with Z option (private label)
// Override this by adding 'z' to options.
if len(spliti) > 2 && strings.Contains(spliti[2], "Z") && !strings.Contains(options, "z") {
return errors.Errorf("volume mounted with private option 'Z' in %q. Use option 'z' to mount in current container", ctr.ID())
}
if options == "" {
// Mount the volumes with the default options
c.Volumes = append(c.Volumes, createArtifact.Volumes...)
} else {
c.Volumes = append(c.Volumes, spliti[0]+":"+spliti[1]+":"+options)
}
}
}
return nil
}
//GetTmpfsMounts takes user provided input for Tmpfs mounts and creates Mount structs
func (c *CreateConfig) GetTmpfsMounts() []spec.Mount {
var m []spec.Mount
@ -289,6 +338,10 @@ func (c *CreateConfig) GetContainerCreateOptions(runtime *libpod.Runtime) ([]lib
options = append(options, libpod.WithUserVolumes(volumes))
}
if len(c.LocalVolumes) != 0 {
options = append(options, libpod.WithLocalVolumes(c.LocalVolumes))
}
if len(c.Command) != 0 {
options = append(options, libpod.WithCommand(c.Command))
}

View File

@ -248,6 +248,9 @@ func CreateConfigToOCISpec(config *CreateConfig) (*spec.Spec, error) { //nolint
}
// BIND MOUNTS
if err := config.GetVolumesFrom(); err != nil {
return nil, errors.Wrap(err, "error getting volume mounts from --volumes-from flag")
}
mounts, err := config.GetVolumeMounts(configSpec.Mounts)
if err != nil {
return nil, errors.Wrapf(err, "error getting volume mounts")

View File

@ -510,4 +510,41 @@ USER mail`
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
})
It("podman run --volumes-from flag", func() {
vol := filepath.Join(podmanTest.TempDir, "vol-test")
err := os.MkdirAll(vol, 0755)
Expect(err).To(BeNil())
volFile := filepath.Join(vol, "test.txt")
data := "Testing --volumes-from!!!"
err = ioutil.WriteFile(volFile, []byte(data), 0755)
Expect(err).To(BeNil())
session := podmanTest.Podman([]string{"create", "--volume", vol + ":/myvol", "docker.io/library/redis:alpine", "sh"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
ctrID := session.OutputToString()
session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID, ALPINE, "echo", "'testing read-write!' >> myvol/test.txt"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID + ":z", ALPINE, "ls"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
})
It("podman run --volumes-from flag with built-in volumes", func() {
session := podmanTest.Podman([]string{"create", "docker.io/library/redis:alpine", "sh"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
ctrID := session.OutputToString()
session = podmanTest.Podman([]string{"run", "--volumes-from", ctrID, ALPINE, "ls"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
Expect(session.OutputToString()).To(ContainSubstring("data"))
})
})