Fix permissions on initially created named volumes

Permission of volume should match the directory it is being mounted on.

Fixes: https://github.com/containers/podman/issues/10188

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
Daniel J Walsh
2021-06-02 05:28:26 -04:00
parent e3dd12a2be
commit 81eb71fe36
4 changed files with 92 additions and 59 deletions

View File

@ -1062,7 +1062,7 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
}
for _, v := range c.config.NamedVolumes {
if err := c.chownVolume(v.Name); err != nil {
if err := c.fixVolumePermissions(v); err != nil {
return err
}
}
@ -1681,64 +1681,6 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
return vol, nil
}
// Chown the specified volume if necessary.
func (c *Container) chownVolume(volumeName string) error {
vol, err := c.runtime.state.Volume(volumeName)
if err != nil {
return errors.Wrapf(err, "error retrieving named volume %s for container %s", volumeName, c.ID())
}
vol.lock.Lock()
defer vol.lock.Unlock()
// The volume may need a copy-up. Check the state.
if err := vol.update(); err != nil {
return err
}
// TODO: For now, I've disabled chowning volumes owned by non-Podman
// drivers. This may be safe, but it's really going to be a case-by-case
// thing, I think - safest to leave disabled now and re-enable later if
// there is a demand.
if vol.state.NeedsChown && !vol.UsesVolumeDriver() {
vol.state.NeedsChown = false
uid := int(c.config.Spec.Process.User.UID)
gid := int(c.config.Spec.Process.User.GID)
if c.config.IDMappings.UIDMap != nil {
p := idtools.IDPair{
UID: uid,
GID: gid,
}
mappings := idtools.NewIDMappingsFromMaps(c.config.IDMappings.UIDMap, c.config.IDMappings.GIDMap)
newPair, err := mappings.ToHost(p)
if err != nil {
return errors.Wrapf(err, "error mapping user %d:%d", uid, gid)
}
uid = newPair.UID
gid = newPair.GID
}
vol.state.UIDChowned = uid
vol.state.GIDChowned = gid
if err := vol.save(); err != nil {
return err
}
mountPoint, err := vol.MountPoint()
if err != nil {
return err
}
if err := os.Lchown(mountPoint, uid, gid); err != nil {
return err
}
}
return nil
}
// cleanupStorage unmounts and cleans up the container's root filesystem
func (c *Container) cleanupStorage() error {
if !c.state.Mounted {

View File

@ -2426,3 +2426,77 @@ func (c *Container) createSecretMountDir() error {
return err
}
// Fix ownership and permissions of the specified volume if necessary.
func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error {
vol, err := c.runtime.state.Volume(v.Name)
if err != nil {
return errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID())
}
vol.lock.Lock()
defer vol.lock.Unlock()
// The volume may need a copy-up. Check the state.
if err := vol.update(); err != nil {
return err
}
// TODO: For now, I've disabled chowning volumes owned by non-Podman
// drivers. This may be safe, but it's really going to be a case-by-case
// thing, I think - safest to leave disabled now and re-enable later if
// there is a demand.
if vol.state.NeedsChown && !vol.UsesVolumeDriver() {
vol.state.NeedsChown = false
uid := int(c.config.Spec.Process.User.UID)
gid := int(c.config.Spec.Process.User.GID)
if c.config.IDMappings.UIDMap != nil {
p := idtools.IDPair{
UID: uid,
GID: gid,
}
mappings := idtools.NewIDMappingsFromMaps(c.config.IDMappings.UIDMap, c.config.IDMappings.GIDMap)
newPair, err := mappings.ToHost(p)
if err != nil {
return errors.Wrapf(err, "error mapping user %d:%d", uid, gid)
}
uid = newPair.UID
gid = newPair.GID
}
vol.state.UIDChowned = uid
vol.state.GIDChowned = gid
if err := vol.save(); err != nil {
return err
}
mountPoint, err := vol.MountPoint()
if err != nil {
return err
}
if err := os.Lchown(mountPoint, uid, gid); err != nil {
return err
}
// Make sure the new volume matches the permissions of the target directory.
// https://github.com/containers/podman/issues/10188
st, err := os.Lstat(filepath.Join(c.state.Mountpoint, v.Dest))
if err == nil {
if err := os.Chmod(mountPoint, st.Mode()|0111); err != nil {
return err
}
stat := st.Sys().(*syscall.Stat_t)
atime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
if err := os.Chtimes(mountPoint, atime, st.ModTime()); err != nil {
return err
}
} else if !os.IsNotExist(err) {
return err
}
}
return nil
}

View File

@ -57,3 +57,8 @@ func (c *Container) reloadNetwork() error {
func (c *Container) getUserOverrides() *lookup.Overrides {
return nil
}
// Fix ownership and permissions of the specified volume if necessary.
func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error {
return define.ErrNotImplemented
}

View File

@ -904,6 +904,18 @@ USER bin`, BB)
Expect(session.ExitCode()).To(Equal(100))
})
It("podman run with named volume", func() {
session := podmanTest.Podman([]string{"run", "--rm", ALPINE, "stat", "-c", "%a %Y", "/var/tmp"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
perms := session.OutputToString()
session = podmanTest.Podman([]string{"run", "--rm", "-v", "test:/var/tmp", ALPINE, "stat", "-c", "%a %Y", "/var/tmp"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
Expect(session.OutputToString()).To(Equal(perms))
})
It("podman run with built-in volume image", func() {
session := podmanTest.Podman([]string{"run", "--rm", redis, "ls"})
session.WaitWithDefaultTimeout()