Merge pull request #6747 from giuseppe/fix-user-volumes

container: move volume chown after spec generation
This commit is contained in:
OpenShift Merge Robot
2020-06-30 12:01:40 -04:00
committed by GitHub
8 changed files with 180 additions and 17 deletions

View File

@ -1015,6 +1015,12 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
return err
}
for _, v := range c.config.NamedVolumes {
if err := c.chownVolume(v.Name); err != nil {
return err
}
}
// With the spec complete, do an OCI create
if err := c.ociRuntime.CreateContainer(c, nil); err != nil {
// Fedora 31 is carrying a patch to display improved error
@ -1508,6 +1514,48 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
return vol, nil
}
// Chown the specified volume if necessary.
func (c *Container) chownVolume(volumeName string) error {
vol, err := c.runtime.state.Volume(volumeName)
if err != nil {
return errors.Wrapf(err, "error retrieving named volume %s for container %s", volumeName, c.ID())
}
uid := int(c.config.Spec.Process.User.UID)
gid := int(c.config.Spec.Process.User.GID)
vol.lock.Lock()
defer vol.lock.Unlock()
// The volume may need a copy-up. Check the state.
if err := vol.update(); err != nil {
return err
}
if vol.state.NeedsChown {
vol.state.NeedsChown = false
vol.state.UIDChowned = uid
vol.state.GIDChowned = gid
if err := vol.save(); err != nil {
return err
}
err := filepath.Walk(vol.MountPoint(), func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if err := os.Chown(path, uid, gid); err != nil {
return err
}
return nil
})
if err != nil {
return err
}
}
return nil
}
// cleanupStorage unmounts and cleans up the container's root filesystem
func (c *Container) cleanupStorage() error {
if !c.state.Mounted {
@ -1854,8 +1902,8 @@ func (c *Container) unmount(force bool) error {
// this should be from chrootarchive.
// Container MUST be mounted before calling.
func (c *Container) copyWithTarFromImage(source, dest string) error {
a := archive.NewDefaultArchiver()
mappings := idtools.NewIDMappingsFromMaps(c.config.IDMappings.UIDMap, c.config.IDMappings.GIDMap)
a := archive.NewArchiver(mappings)
if err := c.copyOwnerAndPerms(source, dest); err != nil {
return err
}

View File

@ -1497,6 +1497,19 @@ func WithVolumeGID(gid int) VolumeCreateOption {
}
}
// WithVolumeNeedsChown sets the NeedsChown flag for the volume.
func WithVolumeNeedsChown() VolumeCreateOption {
return func(volume *Volume) error {
if volume.valid {
return define.ErrVolumeFinalized
}
volume.state.NeedsChown = true
return nil
}
}
// withSetAnon sets a bool notifying libpod that this volume is anonymous and
// should be removed when containers using it are removed and volumes are
// specified for removal.

View File

@ -309,7 +309,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
logrus.Debugf("Creating new volume %s for container", vol.Name)
// The volume does not exist, so we need to create it.
volOptions := []VolumeCreateOption{WithVolumeName(vol.Name), WithVolumeUID(ctr.RootUID()), WithVolumeGID(ctr.RootGID())}
volOptions := []VolumeCreateOption{WithVolumeName(vol.Name), WithVolumeUID(ctr.RootUID()), WithVolumeGID(ctr.RootGID()), WithVolumeNeedsChown()}
if isAnonymous {
volOptions = append(volOptions, withSetAnon())
}

View File

@ -64,6 +64,14 @@ type VolumeState struct {
// create time, then cleared after the copy up is done and never set
// again.
NeedsCopyUp bool `json:"notYetMounted,omitempty"`
// NeedsChown indicates that the next time the volume is mounted into
// a container, the container will chown the volume to the container process
// UID/GID.
NeedsChown bool `json:"notYetChowned,omitempty"`
// UIDChowned is the UID the volume was chowned to.
UIDChowned int `json:"uidChowned,omitempty"`
// GIDChowned is the GID the volume was chowned to.
GIDChowned int `json:"gidChowned,omitempty"`
}
// Name retrieves the volume's name
@ -113,13 +121,33 @@ func (v *Volume) Anonymous() bool {
}
// UID returns the UID the volume will be created as.
func (v *Volume) UID() int {
return v.config.UID
func (v *Volume) UID() (int, error) {
v.lock.Lock()
defer v.lock.Unlock()
if !v.valid {
return -1, define.ErrVolumeRemoved
}
if v.state.UIDChowned > 0 {
return v.state.UIDChowned, nil
}
return v.config.UID, nil
}
// GID returns the GID the volume will be created as.
func (v *Volume) GID() int {
return v.config.GID
func (v *Volume) GID() (int, error) {
v.lock.Lock()
defer v.lock.Unlock()
if !v.valid {
return -1, define.ErrVolumeRemoved
}
if v.state.GIDChowned > 0 {
return v.state.GIDChowned, nil
}
return v.config.GID, nil
}
// CreatedTime returns the time the volume was created at. It was not tracked

View File

@ -65,8 +65,15 @@ func (v *Volume) Inspect() (*InspectVolumeData, error) {
for k, v := range v.config.Options {
data.Options[k] = v
}
data.UID = v.config.UID
data.GID = v.config.GID
var err error
data.UID, err = v.UID()
if err != nil {
return nil, err
}
data.GID, err = v.GID()
if err != nil {
return nil, err
}
data.Anonymous = v.config.IsAnon
return data, nil

View File

@ -86,6 +86,17 @@ func InspectVolume(w http.ResponseWriter, r *http.Request) {
utils.VolumeNotFound(w, name, err)
return
}
var uid, gid int
uid, err = vol.UID()
if err != nil {
utils.Error(w, "Error fetching volume UID", http.StatusInternalServerError, err)
return
}
gid, err = vol.GID()
if err != nil {
utils.Error(w, "Error fetching volume GID", http.StatusInternalServerError, err)
return
}
volResponse := entities.VolumeConfigResponse{
Name: vol.Name(),
Driver: vol.Driver(),
@ -94,8 +105,8 @@ func InspectVolume(w http.ResponseWriter, r *http.Request) {
Labels: vol.Labels(),
Scope: vol.Scope(),
Options: vol.Options(),
UID: vol.UID(),
GID: vol.GID(),
UID: uid,
GID: gid,
}
utils.WriteResponse(w, http.StatusOK, volResponse)
}
@ -130,6 +141,17 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) {
}
volumeConfigs := make([]*entities.VolumeListReport, 0, len(vols))
for _, v := range vols {
var uid, gid int
uid, err = v.UID()
if err != nil {
utils.Error(w, "Error fetching volume UID", http.StatusInternalServerError, err)
return
}
gid, err = v.GID()
if err != nil {
utils.Error(w, "Error fetching volume GID", http.StatusInternalServerError, err)
return
}
config := entities.VolumeConfigResponse{
Name: v.Name(),
Driver: v.Driver(),
@ -138,8 +160,8 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) {
Labels: v.Labels(),
Scope: v.Scope(),
Options: v.Options(),
UID: v.UID(),
GID: v.GID(),
UID: uid,
GID: gid,
}
volumeConfigs = append(volumeConfigs, &entities.VolumeListReport{VolumeConfigResponse: config})
}

View File

@ -95,6 +95,15 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin
}
reports := make([]*entities.VolumeInspectReport, 0, len(vols))
for _, v := range vols {
var uid, gid int
uid, err = v.UID()
if err != nil {
return nil, err
}
gid, err = v.GID()
if err != nil {
return nil, err
}
config := entities.VolumeConfigResponse{
Name: v.Name(),
Driver: v.Driver(),
@ -103,8 +112,8 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin
Labels: v.Labels(),
Scope: v.Scope(),
Options: v.Options(),
UID: v.UID(),
GID: v.GID(),
UID: uid,
GID: gid,
}
reports = append(reports, &entities.VolumeInspectReport{VolumeConfigResponse: &config})
}
@ -141,6 +150,15 @@ func (ic *ContainerEngine) VolumeList(ctx context.Context, opts entities.VolumeL
}
reports := make([]*entities.VolumeListReport, 0, len(vols))
for _, v := range vols {
var uid, gid int
uid, err = v.UID()
if err != nil {
return nil, err
}
gid, err = v.GID()
if err != nil {
return nil, err
}
config := entities.VolumeConfigResponse{
Name: v.Name(),
Driver: v.Driver(),
@ -149,8 +167,8 @@ func (ic *ContainerEngine) VolumeList(ctx context.Context, opts entities.VolumeL
Labels: v.Labels(),
Scope: v.Scope(),
Options: v.Options(),
UID: v.UID(),
GID: v.GID(),
UID: uid,
GID: gid,
}
reports = append(reports, &entities.VolumeListReport{VolumeConfigResponse: config})
}

View File

@ -245,4 +245,31 @@ var _ = Describe("Podman UserNS support", func() {
ok, _ := session.GrepString("4998")
Expect(ok).To(BeTrue())
})
It("podman --user with volume", func() {
tests := []struct {
uid, gid, arg, vol string
}{
{"0", "0", "0:0", "vol-0"},
{"1000", "0", "1000", "vol-1"},
{"1000", "1000", "1000:1000", "vol-2"},
}
for _, tt := range tests {
session := podmanTest.Podman([]string{"run", "-d", "--user", tt.arg, "--mount", "type=volume,src=" + tt.vol + ",dst=/home/user", "alpine", "top"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
inspectUID := podmanTest.Podman([]string{"volume", "inspect", "--format", "{{ .UID }}", tt.vol})
inspectUID.WaitWithDefaultTimeout()
Expect(inspectUID.ExitCode()).To(Equal(0))
Expect(inspectUID.OutputToString()).To(Equal(tt.uid))
// Make sure we're defaulting to 0.
inspectGID := podmanTest.Podman([]string{"volume", "inspect", "--format", "{{ .GID }}", tt.vol})
inspectGID.WaitWithDefaultTimeout()
Expect(inspectGID.ExitCode()).To(Equal(0))
Expect(inspectGID.OutputToString()).To(Equal(tt.gid))
}
})
})