Merge pull request #3876 from mheon/fix_mount_flags

Allow suid, exec, dev mount options to cancel nosuid/noexec/nodev
This commit is contained in:
OpenShift Merge Robot
2019-09-04 22:43:41 +02:00
committed by GitHub
37 changed files with 857 additions and 623 deletions

2
go.mod
View File

@ -13,7 +13,7 @@ require (
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect
github.com/containernetworking/cni v0.7.1 github.com/containernetworking/cni v0.7.1
github.com/containernetworking/plugins v0.8.1 github.com/containernetworking/plugins v0.8.1
github.com/containers/buildah v1.10.1 github.com/containers/buildah v1.8.4-0.20190821140209-376e52ee0142
github.com/containers/conmon v0.3.0 // indirect github.com/containers/conmon v0.3.0 // indirect
github.com/containers/image v3.0.2+incompatible github.com/containers/image v3.0.2+incompatible
github.com/containers/psgo v1.3.1 github.com/containers/psgo v1.3.1

2
go.sum
View File

@ -67,6 +67,8 @@ github.com/containernetworking/plugins v0.7.4 h1:ugkuXfg1Pdzm54U5DGMzreYIkZPSCmS
github.com/containernetworking/plugins v0.7.4/go.mod h1:dagHaAhNjXjT9QYOklkKJDGaQPTg4pf//FrUcJeb7FU= github.com/containernetworking/plugins v0.7.4/go.mod h1:dagHaAhNjXjT9QYOklkKJDGaQPTg4pf//FrUcJeb7FU=
github.com/containernetworking/plugins v0.8.1 h1:dJbykiiSIS3Xvo8d+A6rSXcUEFGfvCjUA+bUED4qegQ= github.com/containernetworking/plugins v0.8.1 h1:dJbykiiSIS3Xvo8d+A6rSXcUEFGfvCjUA+bUED4qegQ=
github.com/containernetworking/plugins v0.8.1/go.mod h1:dagHaAhNjXjT9QYOklkKJDGaQPTg4pf//FrUcJeb7FU= github.com/containernetworking/plugins v0.8.1/go.mod h1:dagHaAhNjXjT9QYOklkKJDGaQPTg4pf//FrUcJeb7FU=
github.com/containers/buildah v1.8.4-0.20190821140209-376e52ee0142 h1:RxJ7MbdmorTHiKcJDOz6SwRPasZVp4LOdRWoZ1fdlsQ=
github.com/containers/buildah v1.8.4-0.20190821140209-376e52ee0142/go.mod h1:QIIw13J1YIwWQskItX1wqZPQtUOOKrOnHE+LTibbLLA=
github.com/containers/buildah v1.9.0 h1:ktVRCGNoVfW8PlTuCKUeh+zGdqn1Nik80DSWvGX+v4Y= github.com/containers/buildah v1.9.0 h1:ktVRCGNoVfW8PlTuCKUeh+zGdqn1Nik80DSWvGX+v4Y=
github.com/containers/buildah v1.9.0/go.mod h1:1CsiLJvyU+h+wOjnqJJOWuJCVcMxZOr5HN/gHGdzJxY= github.com/containers/buildah v1.9.0/go.mod h1:1CsiLJvyU+h+wOjnqJJOWuJCVcMxZOr5HN/gHGdzJxY=
github.com/containers/buildah v1.9.2 h1:dg87r1W1poWVQE0lTmP3BzaqgEI5IRudZ3jKjNIZ3xQ= github.com/containers/buildah v1.9.2 h1:dg87r1W1poWVQE0lTmP3BzaqgEI5IRudZ3jKjNIZ3xQ=

View File

@ -1360,10 +1360,15 @@ func WithNamedVolumes(volumes []*ContainerNamedVolume) CtrCreateOption {
} }
destinations[vol.Dest] = true destinations[vol.Dest] = true
mountOpts, err := util.ProcessOptions(vol.Options, false, nil)
if err != nil {
return errors.Wrapf(err, "error processing options for named volume %q mounted at %q", vol.Name, vol.Dest)
}
ctr.config.NamedVolumes = append(ctr.config.NamedVolumes, &ContainerNamedVolume{ ctr.config.NamedVolumes = append(ctr.config.NamedVolumes, &ContainerNamedVolume{
Name: vol.Name, Name: vol.Name,
Dest: vol.Dest, Dest: vol.Dest,
Options: util.ProcessOptions(vol.Options), Options: mountOpts,
}) })
} }

View File

@ -2,13 +2,11 @@ package createconfig
import ( import (
"os" "os"
"path/filepath"
"strings" "strings"
"github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/cgroups"
"github.com/containers/libpod/pkg/rootless" "github.com/containers/libpod/pkg/rootless"
pmount "github.com/containers/storage/pkg/mount"
"github.com/docker/docker/oci/caps" "github.com/docker/docker/oci/caps"
"github.com/docker/go-units" "github.com/docker/go-units"
"github.com/opencontainers/runc/libcontainer/user" "github.com/opencontainers/runc/libcontainer/user"
@ -368,7 +366,11 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
// BIND MOUNTS // BIND MOUNTS
configSpec.Mounts = supercedeUserMounts(userMounts, configSpec.Mounts) configSpec.Mounts = supercedeUserMounts(userMounts, configSpec.Mounts)
// Process mounts to ensure correct options // Process mounts to ensure correct options
configSpec.Mounts = initFSMounts(configSpec.Mounts) finalMounts, err := initFSMounts(configSpec.Mounts)
if err != nil {
return nil, err
}
configSpec.Mounts = finalMounts
// BLOCK IO // BLOCK IO
blkio, err := config.CreateBlockIO() blkio, err := config.CreateBlockIO()
@ -394,43 +396,6 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
} }
} }
// Make sure that the bind mounts keep options like nosuid, noexec, nodev.
mounts, err := pmount.GetMounts()
if err != nil {
return nil, err
}
for i := range configSpec.Mounts {
m := &configSpec.Mounts[i]
isBind := false
for _, o := range m.Options {
if o == "bind" || o == "rbind" {
isBind = true
break
}
}
if !isBind {
continue
}
mount, err := findMount(m.Source, mounts)
if err != nil {
return nil, err
}
if mount == nil {
continue
}
next_option:
for _, o := range strings.Split(mount.Opts, ",") {
if o == "nosuid" || o == "noexec" || o == "nodev" {
for _, e := range m.Options {
if e == o {
continue next_option
}
}
m.Options = append(m.Options, o)
}
}
}
// Add annotations // Add annotations
if configSpec.Annotations == nil { if configSpec.Annotations == nil {
configSpec.Annotations = make(map[string]string) configSpec.Annotations = make(map[string]string)
@ -490,25 +455,6 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
return configSpec, nil return configSpec, nil
} }
func findMount(target string, mounts []*pmount.Info) (*pmount.Info, error) {
var err error
target, err = filepath.Abs(target)
if err != nil {
return nil, errors.Wrapf(err, "cannot resolve %s", target)
}
var bestSoFar *pmount.Info
for _, i := range mounts {
if bestSoFar != nil && len(bestSoFar.Mountpoint) > len(i.Mountpoint) {
// Won't be better than what we have already found
continue
}
if strings.HasPrefix(target, i.Mountpoint) {
bestSoFar = i
}
}
return bestSoFar, nil
}
func blockAccessToKernelFilesystems(config *CreateConfig, g *generate.Generator) { func blockAccessToKernelFilesystems(config *CreateConfig, g *generate.Generator) {
if !config.Privileged { if !config.Privileged {
for _, mp := range []string{ for _, mp := range []string{

View File

@ -10,6 +10,7 @@ import (
"github.com/containers/buildah/pkg/parse" "github.com/containers/buildah/pkg/parse"
"github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/util" "github.com/containers/libpod/pkg/util"
pmount "github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/stringid"
spec "github.com/opencontainers/runtime-spec/specs-go" spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -160,22 +161,18 @@ func (config *CreateConfig) parseVolumes(runtime *libpod.Runtime) ([]spec.Mount,
} }
// If requested, add tmpfs filesystems for read-only containers. // If requested, add tmpfs filesystems for read-only containers.
// Need to keep track of which we created, so we don't modify options
// for them later...
readonlyTmpfs := map[string]bool{
"/tmp": false,
"/var/tmp": false,
"/run": false,
}
if config.ReadOnlyRootfs && config.ReadOnlyTmpfs { if config.ReadOnlyRootfs && config.ReadOnlyTmpfs {
readonlyTmpfs := []string{"/tmp", "/var/tmp", "/run"}
options := []string{"rw", "rprivate", "nosuid", "nodev", "tmpcopyup"} options := []string{"rw", "rprivate", "nosuid", "nodev", "tmpcopyup"}
for dest := range readonlyTmpfs { for _, dest := range readonlyTmpfs {
if _, ok := baseMounts[dest]; ok { if _, ok := baseMounts[dest]; ok {
continue continue
} }
localOpts := options localOpts := options
if dest == "/run" { if dest == "/run" {
localOpts = append(localOpts, "noexec", "size=65536k") localOpts = append(localOpts, "noexec", "size=65536k")
} else {
localOpts = append(localOpts, "exec")
} }
baseMounts[dest] = spec.Mount{ baseMounts[dest] = spec.Mount{
Destination: dest, Destination: dest,
@ -183,7 +180,6 @@ func (config *CreateConfig) parseVolumes(runtime *libpod.Runtime) ([]spec.Mount,
Source: "tmpfs", Source: "tmpfs",
Options: localOpts, Options: localOpts,
} }
readonlyTmpfs[dest] = true
} }
} }
@ -202,16 +198,6 @@ func (config *CreateConfig) parseVolumes(runtime *libpod.Runtime) ([]spec.Mount,
// Final step: maps to arrays // Final step: maps to arrays
finalMounts := make([]spec.Mount, 0, len(baseMounts)) finalMounts := make([]spec.Mount, 0, len(baseMounts))
for _, mount := range baseMounts { for _, mount := range baseMounts {
// All user-added tmpfs mounts need their options processed.
// Exception: mounts added by the ReadOnlyTmpfs option, which
// contain several exceptions to normal options rules.
if mount.Type == TypeTmpfs && !readonlyTmpfs[mount.Destination] {
opts, err := util.ProcessTmpfsOptions(mount.Options)
if err != nil {
return nil, nil, err
}
mount.Options = opts
}
if mount.Type == TypeBind { if mount.Type == TypeBind {
absSrc, err := filepath.Abs(mount.Source) absSrc, err := filepath.Abs(mount.Source)
if err != nil { if err != nil {
@ -226,9 +212,6 @@ func (config *CreateConfig) parseVolumes(runtime *libpod.Runtime) ([]spec.Mount,
finalVolumes = append(finalVolumes, volume) finalVolumes = append(finalVolumes, volume)
} }
logrus.Debugf("Got mounts: %v", finalMounts)
logrus.Debugf("Got volumes: %v", finalVolumes)
return finalMounts, finalVolumes, nil return finalMounts, finalVolumes, nil
} }
@ -250,14 +233,17 @@ func (config *CreateConfig) getVolumesFrom(runtime *libpod.Runtime) (map[string]
splitVol = strings.SplitN(vol, ":", 2) splitVol = strings.SplitN(vol, ":", 2)
) )
if len(splitVol) == 2 { if len(splitVol) == 2 {
if strings.Contains(splitVol[1], "Z") || splitOpts := strings.Split(splitVol[1], ",")
strings.Contains(splitVol[1], "private") || for _, checkOpt := range splitOpts {
strings.Contains(splitVol[1], "slave") || switch checkOpt {
strings.Contains(splitVol[1], "shared") { case "z", "ro", "rw":
return nil, nil, errors.Errorf("invalid options %q, can only specify 'ro', 'rw', and 'z", splitVol[1]) // Do nothing, these are valid options
default:
return nil, nil, errors.Errorf("invalid options %q, can only specify 'ro', 'rw', and 'z'", splitVol[1])
}
} }
if options, err = parse.ValidateVolumeOpts(strings.Split(splitVol[1], ",")); err != nil { if options, err = parse.ValidateVolumeOpts(splitOpts); err != nil {
return nil, nil, err return nil, nil, err
} }
} }
@ -403,9 +389,7 @@ func getBindMount(args []string) (spec.Mount, error) {
Type: TypeBind, Type: TypeBind,
} }
setSource := false var setSource, setDest, setRORW, setSuid, setDev, setExec bool
setDest := false
setRORW := false
for _, val := range args { for _, val := range args {
kv := strings.Split(val, "=") kv := strings.Split(val, "=")
@ -440,9 +424,23 @@ func getBindMount(args []string) (spec.Mount, error) {
} else { } else {
return newMount, errors.Wrapf(optionArgError, "badly formatted option %q", val) return newMount, errors.Wrapf(optionArgError, "badly formatted option %q", val)
} }
case "nosuid", "nodev", "noexec": case "nosuid", "suid":
// TODO: detect duplication of these options. if setSuid {
// (Is this necessary?) return newMount, errors.Wrapf(optionArgError, "cannot pass 'nosuid' and 'suid' options more than once")
}
setSuid = true
newMount.Options = append(newMount.Options, kv[0])
case "nodev", "dev":
if setDev {
return newMount, errors.Wrapf(optionArgError, "cannot pass 'nodev' and 'dev' options more than once")
}
setDev = true
newMount.Options = append(newMount.Options, kv[0])
case "noexec", "exec":
if setExec {
return newMount, errors.Wrapf(optionArgError, "cannot pass 'noexec' and 'exec' options more than once")
}
setExec = true
newMount.Options = append(newMount.Options, kv[0]) newMount.Options = append(newMount.Options, kv[0])
case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z": case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z":
newMount.Options = append(newMount.Options, kv[0]) newMount.Options = append(newMount.Options, kv[0])
@ -497,12 +495,34 @@ func getTmpfsMount(args []string) (spec.Mount, error) {
Source: TypeTmpfs, Source: TypeTmpfs,
} }
setDest := false var setDest, setRORW, setSuid, setDev, setExec bool
for _, val := range args { for _, val := range args {
kv := strings.Split(val, "=") kv := strings.Split(val, "=")
switch kv[0] { switch kv[0] {
case "ro", "nosuid", "nodev", "noexec": case "ro", "rw":
if setRORW {
return newMount, errors.Wrapf(optionArgError, "cannot pass 'ro' and 'rw' options more than once")
}
setRORW = true
newMount.Options = append(newMount.Options, kv[0])
case "nosuid", "suid":
if setSuid {
return newMount, errors.Wrapf(optionArgError, "cannot pass 'nosuid' and 'suid' options more than once")
}
setSuid = true
newMount.Options = append(newMount.Options, kv[0])
case "nodev", "dev":
if setDev {
return newMount, errors.Wrapf(optionArgError, "cannot pass 'nodev' and 'dev' options more than once")
}
setDev = true
newMount.Options = append(newMount.Options, kv[0])
case "noexec", "exec":
if setExec {
return newMount, errors.Wrapf(optionArgError, "cannot pass 'noexec' and 'exec' options more than once")
}
setExec = true
newMount.Options = append(newMount.Options, kv[0]) newMount.Options = append(newMount.Options, kv[0])
case "tmpfs-mode": case "tmpfs-mode":
if len(kv) == 1 { if len(kv) == 1 {
@ -543,14 +563,34 @@ func getTmpfsMount(args []string) (spec.Mount, error) {
func getNamedVolume(args []string) (*libpod.ContainerNamedVolume, error) { func getNamedVolume(args []string) (*libpod.ContainerNamedVolume, error) {
newVolume := new(libpod.ContainerNamedVolume) newVolume := new(libpod.ContainerNamedVolume)
setSource := false var setSource, setDest, setRORW, setSuid, setDev, setExec bool
setDest := false
for _, val := range args { for _, val := range args {
kv := strings.Split(val, "=") kv := strings.Split(val, "=")
switch kv[0] { switch kv[0] {
case "ro", "nosuid", "nodev", "noexec": case "ro", "rw":
// TODO: detect duplication of these options if setRORW {
return nil, errors.Wrapf(optionArgError, "cannot pass 'ro' and 'rw' options more than once")
}
setRORW = true
newVolume.Options = append(newVolume.Options, kv[0])
case "nosuid", "suid":
if setSuid {
return nil, errors.Wrapf(optionArgError, "cannot pass 'nosuid' and 'suid' options more than once")
}
setSuid = true
newVolume.Options = append(newVolume.Options, kv[0])
case "nodev", "dev":
if setDev {
return nil, errors.Wrapf(optionArgError, "cannot pass 'nodev' and 'dev' options more than once")
}
setDev = true
newVolume.Options = append(newVolume.Options, kv[0])
case "noexec", "exec":
if setExec {
return nil, errors.Wrapf(optionArgError, "cannot pass 'noexec' and 'exec' options more than once")
}
setExec = true
newVolume.Options = append(newVolume.Options, kv[0]) newVolume.Options = append(newVolume.Options, kv[0])
case "volume-label": case "volume-label":
return nil, errors.Errorf("the --volume-label option is not presently implemented") return nil, errors.Errorf("the --volume-label option is not presently implemented")
@ -692,6 +732,9 @@ func (config *CreateConfig) getTmpfsMounts() (map[string]spec.Mount, error) {
var options []string var options []string
spliti := strings.Split(i, ":") spliti := strings.Split(i, ":")
destPath := spliti[0] destPath := spliti[0]
if err := parse.ValidateVolumeCtrDir(spliti[0]); err != nil {
return nil, err
}
if len(spliti) > 1 { if len(spliti) > 1 {
options = strings.Split(spliti[1], ",") options = strings.Split(spliti[1], ",")
} }
@ -775,16 +818,75 @@ func supercedeUserMounts(mounts []spec.Mount, configMount []spec.Mount) []spec.M
} }
// Ensure mount options on all mounts are correct // Ensure mount options on all mounts are correct
func initFSMounts(inputMounts []spec.Mount) []spec.Mount { func initFSMounts(inputMounts []spec.Mount) ([]spec.Mount, error) {
// We need to look up mounts so we can figure out the proper mount flags
// to apply.
systemMounts, err := pmount.GetMounts()
if err != nil {
return nil, errors.Wrapf(err, "error retrieving system mounts to look up mount options")
}
// TODO: We probably don't need to re-build the mounts array
var mounts []spec.Mount var mounts []spec.Mount
for _, m := range inputMounts { for _, m := range inputMounts {
if m.Type == TypeBind { if m.Type == TypeBind {
m.Options = util.ProcessOptions(m.Options) baseMnt, err := findMount(m.Destination, systemMounts)
if err != nil {
return nil, errors.Wrapf(err, "error looking up mountpoint for mount %s", m.Destination)
}
var noexec, nosuid, nodev bool
for _, baseOpt := range strings.Split(baseMnt.Opts, ",") {
switch baseOpt {
case "noexec":
noexec = true
case "nosuid":
nosuid = true
case "nodev":
nodev = true
}
}
defaultMountOpts := new(util.DefaultMountOptions)
defaultMountOpts.Noexec = noexec
defaultMountOpts.Nosuid = nosuid
defaultMountOpts.Nodev = nodev
opts, err := util.ProcessOptions(m.Options, false, defaultMountOpts)
if err != nil {
return nil, err
}
m.Options = opts
} }
if m.Type == TypeTmpfs && filepath.Clean(m.Destination) != "/dev" { if m.Type == TypeTmpfs && filepath.Clean(m.Destination) != "/dev" {
m.Options = append(m.Options, "tmpcopyup") opts, err := util.ProcessOptions(m.Options, true, nil)
if err != nil {
return nil, err
} }
m.Options = opts
}
mounts = append(mounts, m) mounts = append(mounts, m)
} }
return mounts return mounts, nil
}
// TODO: We could make this a bit faster by building a tree of the mountpoints
// and traversing it to identify the correct mount.
func findMount(target string, mounts []*pmount.Info) (*pmount.Info, error) {
var err error
target, err = filepath.Abs(target)
if err != nil {
return nil, errors.Wrapf(err, "cannot resolve %s", target)
}
var bestSoFar *pmount.Info
for _, i := range mounts {
if bestSoFar != nil && len(bestSoFar.Mountpoint) > len(i.Mountpoint) {
// Won't be better than what we have already found
continue
}
if strings.HasPrefix(target, i.Mountpoint) {
bestSoFar = i
}
}
return bestSoFar, nil
} }

View File

@ -10,91 +10,120 @@ var (
// ErrBadMntOption indicates that an invalid mount option was passed. // ErrBadMntOption indicates that an invalid mount option was passed.
ErrBadMntOption = errors.Errorf("invalid mount option") ErrBadMntOption = errors.Errorf("invalid mount option")
// ErrDupeMntOption indicates that a duplicate mount option was passed. // ErrDupeMntOption indicates that a duplicate mount option was passed.
ErrDupeMntOption = errors.Errorf("duplicate option passed") ErrDupeMntOption = errors.Errorf("duplicate mount option passed")
) )
// ProcessOptions parses the options for a bind mount and ensures that they are // DefaultMountOptions sets default mount options for ProcessOptions.
// sensible and follow convention. type DefaultMountOptions struct {
func ProcessOptions(options []string) []string { Noexec bool
Nosuid bool
Nodev bool
}
// ProcessOptions parses the options for a bind or tmpfs mount and ensures that
// they are sensible and follow convention. The isTmpfs variable controls
// whether extra, tmpfs-specific options will be allowed.
// The defaults variable controls default mount options that will be set. If it
// is not included, they will be set unconditionally.
func ProcessOptions(options []string, isTmpfs bool, defaults *DefaultMountOptions) ([]string, error) {
var ( var (
foundbind, foundrw, foundro bool foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ bool
rootProp string
) )
for _, opt := range options {
switch opt {
case "bind", "rbind":
foundbind = true
case "ro":
foundro = true
case "rw":
foundrw = true
case "private", "rprivate", "slave", "rslave", "shared", "rshared":
rootProp = opt
}
}
if !foundbind {
options = append(options, "rbind")
}
if !foundrw && !foundro {
options = append(options, "rw")
}
if rootProp == "" {
options = append(options, "rprivate")
}
return options
}
// ProcessTmpfsOptions parses the options for a tmpfs mountpoint and ensures
// that they are sensible and follow convention.
func ProcessTmpfsOptions(options []string) ([]string, error) {
var (
foundWrite, foundSize, foundProp, foundMode bool
)
baseOpts := []string{"noexec", "nosuid", "nodev"}
for _, opt := range options { for _, opt := range options {
// Some options have parameters - size, mode // Some options have parameters - size, mode
splitOpt := strings.SplitN(opt, "=", 2) splitOpt := strings.SplitN(opt, "=", 2)
switch splitOpt[0] { switch splitOpt[0] {
case "exec", "noexec":
if foundExec {
return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'noexec' and 'exec' can be used")
}
foundExec = true
case "suid", "nosuid":
if foundSuid {
return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'nosuid' and 'suid' can be used")
}
foundSuid = true
case "nodev", "dev":
if foundDev {
return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'nodev' and 'dev' can be used")
}
foundDev = true
case "rw", "ro": case "rw", "ro":
if foundWrite { if foundWrite {
return nil, errors.Wrapf(ErrDupeMntOption, "only one of rw and ro can be used") return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'rw' and 'ro' can be used")
} }
foundWrite = true foundWrite = true
baseOpts = append(baseOpts, opt)
case "private", "rprivate", "slave", "rslave", "shared", "rshared": case "private", "rprivate", "slave", "rslave", "shared", "rshared":
if foundProp { if foundProp {
return nil, errors.Wrapf(ErrDupeMntOption, "only one root propagation mode can be used") return nil, errors.Wrapf(ErrDupeMntOption, "only one root propagation mode can be used")
} }
foundProp = true foundProp = true
baseOpts = append(baseOpts, opt)
case "size": case "size":
if !isTmpfs {
return nil, errors.Wrapf(ErrBadMntOption, "the 'size' option is only allowed with tmpfs mounts")
}
if foundSize { if foundSize {
return nil, errors.Wrapf(ErrDupeMntOption, "only one tmpfs size can be specified") return nil, errors.Wrapf(ErrDupeMntOption, "only one tmpfs size can be specified")
} }
foundSize = true foundSize = true
baseOpts = append(baseOpts, opt)
case "mode": case "mode":
if !isTmpfs {
return nil, errors.Wrapf(ErrBadMntOption, "the 'mode' option is only allowed with tmpfs mounts")
}
if foundMode { if foundMode {
return nil, errors.Wrapf(ErrDupeMntOption, "only one tmpfs mode can be specified") return nil, errors.Wrapf(ErrDupeMntOption, "only one tmpfs mode can be specified")
} }
foundMode = true foundMode = true
baseOpts = append(baseOpts, opt) case "tmpcopyup":
case "noexec", "nodev", "nosuid": if !isTmpfs {
// Do nothing. We always include these even if they are return nil, errors.Wrapf(ErrBadMntOption, "the 'tmpcopyup' option is only allowed with tmpfs mounts")
// not explicitly requested. }
if foundCopyUp {
return nil, errors.Wrapf(ErrDupeMntOption, "the 'tmpcopyup' option can only be set once")
}
foundCopyUp = true
case "bind", "rbind":
if isTmpfs {
return nil, errors.Wrapf(ErrBadMntOption, "the 'bind' and 'rbind' options are not allowed with tmpfs mounts")
}
if foundBind {
return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'rbind' and 'bind' can be used")
}
foundBind = true
case "z", "Z":
if isTmpfs {
return nil, errors.Wrapf(ErrBadMntOption, "the 'z' and 'Z' options are not allowed with tmpfs mounts")
}
if foundZ {
return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'z' and 'Z' can be used")
}
default: default:
return nil, errors.Wrapf(ErrBadMntOption, "unknown tmpfs option %q", opt) return nil, errors.Wrapf(ErrBadMntOption, "unknown mount option %q", opt)
} }
} }
if !foundWrite { if !foundWrite {
baseOpts = append(baseOpts, "rw") options = append(options, "rw")
} }
if !foundProp { if !foundProp {
baseOpts = append(baseOpts, "rprivate") options = append(options, "rprivate")
}
if !foundExec && (defaults == nil || defaults.Noexec) {
options = append(options, "noexec")
}
if !foundSuid && (defaults == nil || defaults.Nosuid) {
options = append(options, "nosuid")
}
if !foundDev && (defaults == nil || defaults.Nodev) {
options = append(options, "nodev")
}
if isTmpfs && !foundCopyUp {
options = append(options, "tmpcopyup")
}
if !isTmpfs && !foundBind {
options = append(options, "rbind")
} }
return baseOpts, nil return options, nil
} }

View File

@ -162,4 +162,32 @@ var _ = Describe("Podman run with volumes", func() {
Expect(session.OutputToString()).To(ContainSubstring("/testvol1")) Expect(session.OutputToString()).To(ContainSubstring("/testvol1"))
Expect(session.OutputToString()).To(ContainSubstring("/testvol2")) Expect(session.OutputToString()).To(ContainSubstring("/testvol2"))
}) })
It("podman run with volumes and suid/dev/exec options", func() {
mountPath := filepath.Join(podmanTest.TempDir, "secrets")
os.Mkdir(mountPath, 0755)
session := podmanTest.Podman([]string{"run", "--rm", "-v", fmt.Sprintf("%s:/run/test:suid,dev,exec", mountPath), ALPINE, "grep", "/run/test", "/proc/self/mountinfo"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
found, matches := session.GrepString("/run/test")
Expect(found).Should(BeTrue())
Expect(matches[0]).To(Not(ContainSubstring("noexec")))
Expect(matches[0]).To(Not(ContainSubstring("nodev")))
Expect(matches[0]).To(Not(ContainSubstring("nosuid")))
session = podmanTest.Podman([]string{"run", "--rm", "--tmpfs", "/run/test:suid,dev,exec", ALPINE, "grep", "/run/test", "/proc/self/mountinfo"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
found, matches = session.GrepString("/run/test")
Expect(found).Should(BeTrue())
Expect(matches[0]).To(Not(ContainSubstring("noexec")))
Expect(matches[0]).To(Not(ContainSubstring("nodev")))
Expect(matches[0]).To(Not(ContainSubstring("nosuid")))
})
It("podman run with noexec can't exec", func() {
session := podmanTest.Podman([]string{"run", "--rm", "-v", "/bin:/hostbin:noexec", ALPINE, "/hostbin/ls", "/"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Not(Equal(0)))
})
}) })

View File

@ -19,10 +19,10 @@ env:
#### ####
# GCE project where images live # GCE project where images live
IMAGE_PROJECT: "libpod-218412" IMAGE_PROJECT: "libpod-218412"
FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-30-1-2-1559164849" FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-30-1-2-1565360543"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-29-1-2-1559164849" PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-29-1-2-1565360543"
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-1904-disco-v20190514" # Latest UBUNTU_CACHE_IMAGE_NAME: "ubuntu-1904-disco-v20190724" # Latest
PRIOR_UBUNTU_CACHE_IMAGE_NAME: "ubuntu-1804-bionic-v20190530" # LTS PRIOR_UBUNTU_CACHE_IMAGE_NAME: "ubuntu-1804-bionic-v20190722a" # LTS
#### ####
#### Command variables to help avoid duplication #### Command variables to help avoid duplication

View File

@ -2,6 +2,16 @@
# Changelog # Changelog
## v1.10.1 (2019-08-08)
Bump containers/image to v3.0.2 to fix keyring issue
Bug fix for volume minus syntax
Bump container/storage v1.13.1 and containers/image v3.0.1
bump github.com/containernetworking/cni to v0.7.1
Add overlayfs to fuse-overlayfs tip
Add automatic apparmor tag discovery
Fix bug whereby --get-login has no effect
Bump to v1.11.0-dev
## v1.10.0 (2019-08-02) ## v1.10.0 (2019-08-02)
vendor github.com/containers/image@v3.0.0 vendor github.com/containers/image@v3.0.0
Remove GO111MODULE in favor of `-mod=vendor` Remove GO111MODULE in favor of `-mod=vendor`

View File

@ -21,7 +21,7 @@ export GO_BUILD=$(GO) build
endif endif
GIT_COMMIT ?= $(if $(shell git rev-parse --short HEAD),$(shell git rev-parse --short HEAD),$(error "git failed")) GIT_COMMIT ?= $(if $(shell git rev-parse --short HEAD),$(shell git rev-parse --short HEAD),$(error "git failed"))
BUILD_INFO := $(if $(shell date +%s),$(shell date +%s),$(error "date failed")) SOURCE_DATE_EPOCH ?= $(if $(shell date +%s),$(shell date +%s),$(error "date failed"))
STATIC_STORAGETAGS = "containers_image_ostree_stub containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)" STATIC_STORAGETAGS = "containers_image_ostree_stub containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)"
CNI_COMMIT := $(shell sed -n 's;\tgithub.com/containernetworking/cni \([^ \n]*\).*$\;\1;p' go.mod) CNI_COMMIT := $(shell sed -n 's;\tgithub.com/containernetworking/cni \([^ \n]*\).*$\;\1;p' go.mod)
@ -29,7 +29,7 @@ RUNC_COMMIT := $(shell sed -n 's;\tgithub.com/opencontainers/runc \([^ \n]*\).*$
LIBSECCOMP_COMMIT := release-2.3 LIBSECCOMP_COMMIT := release-2.3
EXTRALDFLAGS := EXTRALDFLAGS :=
LDFLAGS := -ldflags '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(BUILD_INFO) -X main.cniVersion=$(CNI_COMMIT)' $(EXTRALDFLAGS) LDFLAGS := -ldflags '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT)' $(EXTRALDFLAGS)
SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go cmd/buildah/*.go docker/*.go pkg/blobcache/*.go pkg/cli/*.go pkg/parse/*.go pkg/unshare/*.c pkg/unshare/*.go util/*.go SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go cmd/buildah/*.go docker/*.go pkg/blobcache/*.go pkg/cli/*.go pkg/parse/*.go pkg/unshare/*.c pkg/unshare/*.go util/*.go
all: buildah imgtype docs all: buildah imgtype docs
@ -65,19 +65,9 @@ docs: install.tools ## build the docs on the host
gopath: gopath:
test $(shell pwd) = $(shell cd ../../../../src/github.com/containers/buildah ; pwd) test $(shell pwd) = $(shell cd ../../../../src/github.com/containers/buildah ; pwd)
# We use https://github.com/lk4d4/vndr to manage dependencies.
.PHONY: deps
deps: gopath
env GOPATH=$(shell cd ../../../.. ; pwd) vndr
.PHONY: validate .PHONY: validate
validate: install.tools validate: install.tools
# Run gofmt on version 1.11 and higher
ifneq ($(GO110),$(GOVERSION))
@./tests/validate/gofmt.sh
endif
@./tests/validate/whitespace.sh @./tests/validate/whitespace.sh
@./tests/validate/govet.sh
@./tests/validate/git-validation.sh @./tests/validate/git-validation.sh
.PHONY: install.tools .PHONY: install.tools
@ -124,7 +114,7 @@ install.runc:
.PHONY: test-integration .PHONY: test-integration
test-integration: install.tools test-integration: install.tools
./tests/tools/ginkgo $(BUILDFLAGS) -v tests/e2e/. ./tests/tools/build/ginkgo $(BUILDFLAGS) -v tests/e2e/.
cd tests; ./test_runner.sh cd tests; ./test_runner.sh
tests/testreport/testreport: tests/testreport/testreport.go tests/testreport/testreport: tests/testreport/testreport.go

View File

@ -55,7 +55,8 @@ into other tools.
Podman specializes in all of the commands and functions that help you to maintain and modify Podman specializes in all of the commands and functions that help you to maintain and modify
OCI images, such as pulling and tagging. It also allows you to create, run, and maintain those containers OCI images, such as pulling and tagging. It also allows you to create, run, and maintain those containers
created from those images. created from those images. For building container images via Dockerfiles, Podman uses Buildah's
golang API and can be installed independently from Buildah.
A major difference between Podman and Buildah is their concept of a container. Podman A major difference between Podman and Buildah is their concept of a container. Podman
allows users to create "traditional containers" where the intent of these containers is allows users to create "traditional containers" where the intent of these containers is
@ -76,7 +77,7 @@ From [`./examples/lighttpd.sh`](examples/lighttpd.sh):
```bash ```bash
$ cat > lighttpd.sh <<"EOF" $ cat > lighttpd.sh <<"EOF"
#!/bin/bash -x #!/usr/bin/env bash -x
ctr1=$(buildah from "${1:-fedora}") ctr1=$(buildah from "${1:-fedora}")

View File

@ -16,7 +16,6 @@ import (
"github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
"github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -36,28 +35,43 @@ type AddAndCopyOptions struct {
Hasher io.Writer Hasher io.Writer
// Excludes is the contents of the .dockerignore file // Excludes is the contents of the .dockerignore file
Excludes []string Excludes []string
// The base directory for Excludes and data to copy in // ContextDir is the base directory for Excludes for content being copied
ContextDir string ContextDir string
// ID mapping options to use when contents to be copied are part of // ID mapping options to use when contents to be copied are part of
// another container, and need ownerships to be mapped from the host to // another container, and need ownerships to be mapped from the host to
// that container's values before copying them into the container. // that container's values before copying them into the container.
IDMappingOptions *IDMappingOptions IDMappingOptions *IDMappingOptions
// DryRun indicates that the content should be digested, but not actually
// copied into the container.
DryRun bool
} }
// addURL copies the contents of the source URL to the destination. This is // addURL copies the contents of the source URL to the destination. This is
// its own function so that deferred closes happen after we're done pulling // its own function so that deferred closes happen after we're done pulling
// down each item of potentially many. // down each item of potentially many.
func addURL(destination, srcurl string, owner idtools.IDPair, hasher io.Writer) error { func (b *Builder) addURL(destination, srcurl string, owner idtools.IDPair, hasher io.Writer, dryRun bool) error {
logrus.Debugf("saving %q to %q", srcurl, destination)
resp, err := http.Get(srcurl) resp, err := http.Get(srcurl)
if err != nil { if err != nil {
return errors.Wrapf(err, "error getting %q", srcurl) return errors.Wrapf(err, "error getting %q", srcurl)
} }
defer resp.Body.Close() defer resp.Body.Close()
thisHasher := hasher
if thisHasher != nil && b.ContentDigester.Hash() != nil {
thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
}
if thisHasher == nil {
thisHasher = b.ContentDigester.Hash()
}
thisWriter := thisHasher
if !dryRun {
logrus.Debugf("saving %q to %q", srcurl, destination)
f, err := os.Create(destination) f, err := os.Create(destination)
if err != nil { if err != nil {
return errors.Wrapf(err, "error creating %q", destination) return errors.Wrapf(err, "error creating %q", destination)
} }
defer f.Close()
if err = f.Chown(owner.UID, owner.GID); err != nil { if err = f.Chown(owner.UID, owner.GID); err != nil {
return errors.Wrapf(err, "error setting owner of %q to %d:%d", destination, owner.UID, owner.GID) return errors.Wrapf(err, "error setting owner of %q to %d:%d", destination, owner.UID, owner.GID)
} }
@ -72,21 +86,21 @@ func addURL(destination, srcurl string, owner idtools.IDPair, hasher io.Writer)
}() }()
} }
} }
defer f.Close() defer func() {
bodyReader := io.Reader(resp.Body) if err2 := f.Chmod(0600); err2 != nil {
if hasher != nil { logrus.Debugf("error setting permissions on %q: %v", destination, err2)
bodyReader = io.TeeReader(bodyReader, hasher)
} }
n, err := io.Copy(f, bodyReader) }()
thisWriter = io.MultiWriter(f, thisWriter)
}
n, err := io.Copy(thisWriter, resp.Body)
if err != nil { if err != nil {
return errors.Wrapf(err, "error reading contents for %q from %q", destination, srcurl) return errors.Wrapf(err, "error reading contents for %q from %q", destination, srcurl)
} }
if resp.ContentLength >= 0 && n != resp.ContentLength { if resp.ContentLength >= 0 && n != resp.ContentLength {
return errors.Errorf("error reading contents for %q from %q: wrong length (%d != %d)", destination, srcurl, n, resp.ContentLength) return errors.Errorf("error reading contents for %q from %q: wrong length (%d != %d)", destination, srcurl, n, resp.ContentLength)
} }
if err := f.Chmod(0600); err != nil {
return errors.Wrapf(err, "error setting permissions on %q", destination)
}
return nil return nil
} }
@ -119,6 +133,8 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
} }
hostOwner := idtools.IDPair{UID: int(hostUID), GID: int(hostGID)} hostOwner := idtools.IDPair{UID: int(hostUID), GID: int(hostGID)}
dest := mountPoint dest := mountPoint
if !options.DryRun {
// Resolve the destination if it was specified as a relative path.
if destination != "" && filepath.IsAbs(destination) { if destination != "" && filepath.IsAbs(destination) {
dir := filepath.Dir(destination) dir := filepath.Dir(destination)
if dir != "." && dir != "/" { if dir != "." && dir != "/" {
@ -145,6 +161,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
if destpfi, err2 := os.Stat(filepath.Dir(dest)); err2 == nil && !destpfi.IsDir() { if destpfi, err2 := os.Stat(filepath.Dir(dest)); err2 == nil && !destpfi.IsDir() {
return errors.Errorf("%q already exists, but is not a subdirectory)", filepath.Dir(dest)) return errors.Errorf("%q already exists, but is not a subdirectory)", filepath.Dir(dest))
} }
}
// Now look at the destination itself. // Now look at the destination itself.
destfi, err := os.Stat(dest) destfi, err := os.Stat(dest)
if err != nil { if err != nil {
@ -156,10 +173,10 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
if len(source) > 1 && (destfi == nil || !destfi.IsDir()) { if len(source) > 1 && (destfi == nil || !destfi.IsDir()) {
return errors.Errorf("destination %q is not a directory", dest) return errors.Errorf("destination %q is not a directory", dest)
} }
copyFileWithTar := b.copyFileWithTar(options.IDMappingOptions, &containerOwner, options.Hasher) copyFileWithTar := b.copyFileWithTar(options.IDMappingOptions, &containerOwner, options.Hasher, options.DryRun)
copyWithTar := b.copyWithTar(options.IDMappingOptions, &containerOwner, options.Hasher) copyWithTar := b.copyWithTar(options.IDMappingOptions, &containerOwner, options.Hasher, options.DryRun)
untarPath := b.untarPath(nil, options.Hasher) untarPath := b.untarPath(nil, options.Hasher, options.DryRun)
err = addHelper(excludes, extract, dest, destfi, hostOwner, options, copyFileWithTar, copyWithTar, untarPath, source...) err = b.addHelper(excludes, extract, dest, destfi, hostOwner, options, copyFileWithTar, copyWithTar, untarPath, source...)
if err != nil { if err != nil {
return err return err
} }
@ -230,9 +247,10 @@ func dockerIgnoreMatcher(lines []string, contextDir string) (*fileutils.PatternM
return matcher, nil return matcher, nil
} }
func addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, destfi os.FileInfo, hostOwner idtools.IDPair, options AddAndCopyOptions, copyFileWithTar, copyWithTar, untarPath func(src, dest string) error, source ...string) error { func (b *Builder) addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, destfi os.FileInfo, hostOwner idtools.IDPair, options AddAndCopyOptions, copyFileWithTar, copyWithTar, untarPath func(src, dest string) error, source ...string) error {
for _, src := range source { for n, src := range source {
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
b.ContentDigester.Start("")
// We assume that source is a file, and we're copying // We assume that source is a file, and we're copying
// it to the destination. If the destination is // it to the destination. If the destination is
// already a directory, create a file inside of it. // already a directory, create a file inside of it.
@ -246,7 +264,7 @@ func addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, de
if destfi != nil && destfi.IsDir() { if destfi != nil && destfi.IsDir() {
d = filepath.Join(dest, path.Base(url.Path)) d = filepath.Join(dest, path.Base(url.Path))
} }
if err = addURL(d, src, hostOwner, options.Hasher); err != nil { if err = b.addURL(d, src, hostOwner, options.Hasher, options.DryRun); err != nil {
return err return err
} }
continue continue
@ -270,14 +288,17 @@ func addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, de
return errors.Wrapf(err, "error reading %q", esrc) return errors.Wrapf(err, "error reading %q", esrc)
} }
if srcfi.IsDir() { if srcfi.IsDir() {
b.ContentDigester.Start("dir")
// The source is a directory, so copy the contents of // The source is a directory, so copy the contents of
// the source directory into the target directory. Try // the source directory into the target directory. Try
// to create it first, so that if there's a problem, // to create it first, so that if there's a problem,
// we'll discover why that won't work. // we'll discover why that won't work.
if !options.DryRun {
if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil { if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil {
return errors.Wrapf(err, "error creating directory %q", dest) return errors.Wrapf(err, "error creating directory %q", dest)
} }
logrus.Debugf("copying %q to %q", esrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*") }
logrus.Debugf("copying[%d] %q to %q", n, esrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*")
if excludes == nil || !excludes.Exclusions() { if excludes == nil || !excludes.Exclusions() {
if err = copyWithTar(esrc, dest); err != nil { if err = copyWithTar(esrc, dest); err != nil {
return errors.Wrapf(err, "error copying %q to %q", esrc, dest) return errors.Wrapf(err, "error copying %q to %q", esrc, dest)
@ -295,26 +316,11 @@ func addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, de
if skip { if skip {
return nil return nil
} }
// combine the filename with the dest directory // combine the source's basename with the dest directory
fpath, err := filepath.Rel(esrc, path) fpath, err := filepath.Rel(esrc, path)
if err != nil { if err != nil {
return errors.Wrapf(err, "error converting %s to a path relative to %s", path, esrc) return errors.Wrapf(err, "error converting %s to a path relative to %s", path, esrc)
} }
mtime := info.ModTime()
atime := mtime
times := []syscall.Timespec{
syscall.NsecToTimespec(atime.Unix()),
syscall.NsecToTimespec(mtime.Unix()),
}
if info.IsDir() {
return addHelperDirectory(esrc, path, filepath.Join(dest, fpath), info, hostOwner, times)
}
if info.Mode()&os.ModeSymlink == os.ModeSymlink {
return addHelperSymlink(path, filepath.Join(dest, fpath), hostOwner, times)
}
if !info.Mode().IsRegular() {
return errors.Errorf("error copying %q to %q: source is not a regular file; file mode is %s", path, dest, info.Mode())
}
if err = copyFileWithTar(path, filepath.Join(dest, fpath)); err != nil { if err = copyFileWithTar(path, filepath.Join(dest, fpath)); err != nil {
return errors.Wrapf(err, "error copying %q to %q", path, dest) return errors.Wrapf(err, "error copying %q to %q", path, dest)
} }
@ -326,6 +332,8 @@ func addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, de
continue continue
} }
b.ContentDigester.Start("file")
if !extract || !archive.IsArchivePath(esrc) { if !extract || !archive.IsArchivePath(esrc) {
// This source is a file, and either it's not an // This source is a file, and either it's not an
// archive, or we don't care whether or not it's an // archive, or we don't care whether or not it's an
@ -335,7 +343,7 @@ func addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, de
d = filepath.Join(dest, filepath.Base(gsrc)) d = filepath.Join(dest, filepath.Base(gsrc))
} }
// Copy the file, preserving attributes. // Copy the file, preserving attributes.
logrus.Debugf("copying %q to %q", esrc, d) logrus.Debugf("copying[%d] %q to %q", n, esrc, d)
if err = copyFileWithTar(esrc, d); err != nil { if err = copyFileWithTar(esrc, d); err != nil {
return errors.Wrapf(err, "error copying %q to %q", esrc, d) return errors.Wrapf(err, "error copying %q to %q", esrc, d)
} }
@ -343,7 +351,7 @@ func addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, de
} }
// We're extracting an archive into the destination directory. // We're extracting an archive into the destination directory.
logrus.Debugf("extracting contents of %q into %q", esrc, dest) logrus.Debugf("extracting contents[%d] of %q into %q", n, esrc, dest)
if err = untarPath(esrc, dest); err != nil { if err = untarPath(esrc, dest); err != nil {
return errors.Wrapf(err, "error extracting %q into %q", esrc, dest) return errors.Wrapf(err, "error extracting %q into %q", esrc, dest)
} }
@ -351,45 +359,3 @@ func addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, de
} }
return nil return nil
} }
func addHelperDirectory(esrc, path, dest string, info os.FileInfo, hostOwner idtools.IDPair, times []syscall.Timespec) error {
if err := idtools.MkdirAllAndChownNew(dest, info.Mode().Perm(), hostOwner); err != nil {
// discard only EEXIST on the top directory, which would have been created earlier in the caller
if !os.IsExist(err) || path != esrc {
return errors.Errorf("error creating directory %q", dest)
}
}
if err := idtools.SafeLchown(dest, hostOwner.UID, hostOwner.GID); err != nil {
return errors.Wrapf(err, "error setting owner of directory %q to %d:%d", dest, hostOwner.UID, hostOwner.GID)
}
if err := system.LUtimesNano(dest, times); err != nil {
return errors.Wrapf(err, "error setting dates on directory %q", dest)
}
return nil
}
func addHelperSymlink(src, dest string, hostOwner idtools.IDPair, times []syscall.Timespec) error {
linkContents, err := os.Readlink(src)
if err != nil {
return errors.Wrapf(err, "error reading contents of symbolic link at %q", src)
}
if err = os.Symlink(linkContents, dest); err != nil {
if !os.IsExist(err) {
return errors.Wrapf(err, "error creating symbolic link to %q at %q", linkContents, dest)
}
if err = os.RemoveAll(dest); err != nil {
return errors.Wrapf(err, "error clearing symbolic link target %q", dest)
}
if err = os.Symlink(linkContents, dest); err != nil {
return errors.Wrapf(err, "error creating symbolic link to %q at %q (second try)", linkContents, dest)
}
}
if err = idtools.SafeLchown(dest, hostOwner.UID, hostOwner.GID); err != nil {
return errors.Wrapf(err, "error setting owner of symbolic link %q to %d:%d", dest, hostOwner.UID, hostOwner.GID)
}
if err = system.LUtimesNano(dest, times); err != nil {
return errors.Wrapf(err, "error setting dates on symbolic link %q", dest)
}
logrus.Debugf("Symlink(%s, %s)", linkContents, dest)
return nil
}

View File

@ -1,4 +1,4 @@
#!/bin/bash #!/usr/bin/env bash
cc -E - > /dev/null 2> /dev/null << EOF cc -E - > /dev/null 2> /dev/null << EOF
#include <btrfs/ioctl.h> #include <btrfs/ioctl.h>
EOF EOF

View File

@ -1,4 +1,4 @@
#!/bin/bash #!/usr/bin/env bash
cc -E - > /dev/null 2> /dev/null << EOF cc -E - > /dev/null 2> /dev/null << EOF
#include <btrfs/version.h> #include <btrfs/version.h>
EOF EOF

View File

@ -26,7 +26,7 @@ const (
Package = "buildah" Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec // Version for the Package. Bump version in contrib/rpm/buildah.spec
// too. // too.
Version = "1.10.1" Version = "1.11.0-dev"
// The value we use to identify what type of information, currently a // The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state. // serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to // This should only be changed when we make incompatible changes to
@ -196,6 +196,8 @@ type Builder struct {
Format string Format string
// TempVolumes are temporary mount points created during container runs // TempVolumes are temporary mount points created during container runs
TempVolumes map[string]bool TempVolumes map[string]bool
// ContentDigester counts the digest of all Add()ed content
ContentDigester CompositeDigester
} }
// BuilderInfo are used as objects to display container information // BuilderInfo are used as objects to display container information

View File

@ -1181,6 +1181,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
switch m.Type { switch m.Type {
case "bind": case "bind":
// Do the bind mount. // Do the bind mount.
logrus.Debugf("bind mounting %q on %q", m.Destination, filepath.Join(spec.Root.Path, m.Destination))
if err := unix.Mount(m.Source, target, "", requestFlags, ""); err != nil { if err := unix.Mount(m.Source, target, "", requestFlags, ""); err != nil {
return undoBinds, errors.Wrapf(err, "error bind mounting %q from host to %q in mount namespace (%q)", m.Source, m.Destination, target) return undoBinds, errors.Wrapf(err, "error bind mounting %q from host to %q in mount namespace (%q)", m.Source, m.Destination, target)
} }
@ -1366,7 +1367,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
} }
} else { } else {
// If the target's is not a directory or os.DevNull, bind mount os.DevNull over it. // If the target's is not a directory or os.DevNull, bind mount os.DevNull over it.
if isDevNull(targetinfo) { if !isDevNull(targetinfo) {
if err = unix.Mount(os.DevNull, target, "", uintptr(syscall.MS_BIND|syscall.MS_RDONLY|syscall.MS_PRIVATE), ""); err != nil { if err = unix.Mount(os.DevNull, target, "", uintptr(syscall.MS_BIND|syscall.MS_RDONLY|syscall.MS_PRIVATE), ""); err != nil {
return undoBinds, errors.Wrapf(err, "error masking non-directory %q in mount namespace", target) return undoBinds, errors.Wrapf(err, "error masking non-directory %q in mount namespace", target)
} }

64
vendor/github.com/containers/buildah/digester.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
package buildah
import (
"hash"
"strings"
digest "github.com/opencontainers/go-digest"
)
type singleDigester struct {
digester digest.Digester
prefix string
}
// CompositeDigester can compute a digest over multiple items.
type CompositeDigester struct {
digesters []singleDigester
}
// Restart clears all state, so that the composite digester can start over.
func (c *CompositeDigester) Restart() {
c.digesters = nil
}
// Start starts recording the digest for a new item. The caller should call
// Hash() immediately after to retrieve the new io.Writer.
func (c *CompositeDigester) Start(prefix string) {
prefix = strings.TrimSuffix(prefix, ":")
c.digesters = append(c.digesters, singleDigester{digester: digest.Canonical.Digester(), prefix: prefix})
}
// Hash returns the hasher for the current item.
func (c *CompositeDigester) Hash() hash.Hash {
num := len(c.digesters)
if num == 0 {
return nil
}
return c.digesters[num-1].digester.Hash()
}
// Digest returns the prefix and a composite digest over everything that's been
// digested.
func (c *CompositeDigester) Digest() (string, digest.Digest) {
num := len(c.digesters)
switch num {
case 0:
return "", ""
case 1:
return c.digesters[0].prefix, c.digesters[0].digester.Digest()
default:
content := ""
for i, digester := range c.digesters {
if i > 0 {
content += ","
}
prefix := digester.prefix
if digester.prefix != "" {
digester.prefix += ":"
}
content += prefix + digester.digester.Digest().Encoded()
}
return "multi", digest.Canonical.FromString(content)
}
}

View File

@ -8,7 +8,7 @@ require (
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 // indirect github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 // indirect
github.com/containernetworking/cni v0.7.1 github.com/containernetworking/cni v0.7.1
github.com/containers/image v3.0.2+incompatible github.com/containers/image v3.0.2+incompatible
github.com/containers/storage v1.13.1 github.com/containers/storage v1.13.2
github.com/cyphar/filepath-securejoin v0.2.1 github.com/cyphar/filepath-securejoin v0.2.1
github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65 github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65
github.com/docker/docker-credential-helpers v0.6.1 // indirect github.com/docker/docker-credential-helpers v0.6.1 // indirect

View File

@ -49,6 +49,8 @@ github.com/containers/storage v1.12.16 h1:zePYS1GiG8CuRqLCeA0ufx4X27K06HcJLV50Dd
github.com/containers/storage v1.12.16/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c= github.com/containers/storage v1.12.16/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c=
github.com/containers/storage v1.13.1 h1:rjVirLS9fCGkUFlLDZEoGDDUugtIf46DufWvJu08wxQ= github.com/containers/storage v1.13.1 h1:rjVirLS9fCGkUFlLDZEoGDDUugtIf46DufWvJu08wxQ=
github.com/containers/storage v1.13.1/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA= github.com/containers/storage v1.13.1/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA=
github.com/containers/storage v1.13.2 h1:UXZ0Ckmk6+6+4vj2M2ywruVtH97pnRoAhTG8ctd+yQI=
github.com/containers/storage v1.13.2/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA=
github.com/cyphar/filepath-securejoin v0.2.1 h1:5DPkzz/0MwUpvR4fxASKzgApeq2OMFY5FfYtrX28Coo= github.com/cyphar/filepath-securejoin v0.2.1 h1:5DPkzz/0MwUpvR4fxASKzgApeq2OMFY5FfYtrX28Coo=
github.com/cyphar/filepath-securejoin v0.2.1/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cyphar/filepath-securejoin v0.2.1/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=

View File

@ -6,7 +6,6 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"time"
"github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/reexec"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -15,13 +14,11 @@ import (
const ( const (
symlinkChrootedCommand = "chrootsymlinks-resolve" symlinkChrootedCommand = "chrootsymlinks-resolve"
symlinkModifiedTime = "modtimesymlinks-resolve"
maxSymlinksResolved = 40 maxSymlinksResolved = 40
) )
func init() { func init() {
reexec.Register(symlinkChrootedCommand, resolveChrootedSymlinks) reexec.Register(symlinkChrootedCommand, resolveChrootedSymlinks)
reexec.Register(symlinkModifiedTime, resolveSymlinkTimeModified)
} }
// resolveSymlink uses a child subprocess to resolve any symlinks in filename // resolveSymlink uses a child subprocess to resolve any symlinks in filename
@ -71,118 +68,6 @@ func resolveChrootedSymlinks() {
os.Exit(status) os.Exit(status)
} }
// main() for grandparent subprocess. Its main job is to shuttle stdio back
// and forth, managing a pseudo-terminal if we want one, for our child, the
// parent subprocess.
func resolveSymlinkTimeModified() {
status := 0
flag.Parse()
if len(flag.Args()) < 1 {
os.Exit(1)
}
// Our first parameter is the directory to chroot into.
if err := unix.Chdir(flag.Arg(0)); err != nil {
fmt.Fprintf(os.Stderr, "chdir(): %v\n", err)
os.Exit(1)
}
if err := unix.Chroot(flag.Arg(0)); err != nil {
fmt.Fprintf(os.Stderr, "chroot(): %v\n", err)
os.Exit(1)
}
// Our second parameter is the path name to evaluate for symbolic links.
// Our third parameter is the time the cached intermediate image was created.
// We check whether the modified time of the filepath we provide is after the time the cached image was created.
timeIsGreater, err := modTimeIsGreater(flag.Arg(0), flag.Arg(1), flag.Arg(2))
if err != nil {
fmt.Fprintf(os.Stderr, "error checking if modified time of resolved symbolic link is greater: %v\n", err)
os.Exit(1)
}
if _, err := os.Stdout.WriteString(fmt.Sprintf("%v", timeIsGreater)); err != nil {
fmt.Fprintf(os.Stderr, "error writing string to stdout: %v\n", err)
os.Exit(1)
}
os.Exit(status)
}
// resolveModifiedTime (in the grandparent process) checks filename for any symlinks,
// resolves it and compares the modified time of the file with historyTime, which is
// the creation time of the cached image. It returns true if filename was modified after
// historyTime, otherwise returns false.
func resolveModifiedTime(rootdir, filename, historyTime string) (bool, error) {
// The child process expects a chroot and one path that
// will be consulted relative to the chroot directory and evaluated
// for any symbolic links present.
cmd := reexec.Command(symlinkModifiedTime, rootdir, filename, historyTime)
output, err := cmd.CombinedOutput()
if err != nil {
return false, errors.Wrapf(err, string(output))
}
// Hand back true/false depending on in the file was modified after the caches image was created.
return string(output) == "true", nil
}
// modTimeIsGreater goes through the files added/copied in using the Dockerfile and
// checks the time stamp (follows symlinks) with the time stamp of when the cached
// image was created. IT compares the two and returns true if the file was modified
// after the cached image was created, otherwise it returns false.
func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) {
var timeIsGreater bool
// Convert historyTime from string to time.Time for comparison
histTime, err := time.Parse(time.RFC3339Nano, historyTime)
if err != nil {
return false, errors.Wrapf(err, "error converting string to time.Time %q", historyTime)
}
// Since we are chroot in rootdir, we want a relative path, i.e (path - rootdir)
relPath, err := filepath.Rel(rootdir, path)
if err != nil {
return false, errors.Wrapf(err, "error making path %q relative to %q", path, rootdir)
}
// Walk the file tree and check the time stamps.
err = filepath.Walk(relPath, func(path string, info os.FileInfo, err error) error {
// If using cached images, it is possible for files that are being copied to come from
// previous build stages. But if using cached images, then the copied file won't exist
// since a container won't have been created for the previous build stage and info will be nil.
// In that case just return nil and continue on with using the cached image for the whole build process.
if info == nil {
return nil
}
modTime := info.ModTime()
if info.Mode()&os.ModeSymlink == os.ModeSymlink {
// Evaluate any symlink that occurs to get updated modified information
resolvedPath, err := filepath.EvalSymlinks(path)
if err != nil && os.IsNotExist(err) {
return errors.Wrapf(errDanglingSymlink, "%q", path)
}
if err != nil {
return errors.Wrapf(err, "error evaluating symlink %q", path)
}
fileInfo, err := os.Stat(resolvedPath)
if err != nil {
return errors.Wrapf(err, "error getting file info %q", resolvedPath)
}
modTime = fileInfo.ModTime()
}
if modTime.After(histTime) {
timeIsGreater = true
return nil
}
return nil
})
if err != nil {
// if error is due to dangling symlink, ignore error and return nil
if errors.Cause(err) == errDanglingSymlink {
return false, nil
}
return false, errors.Wrapf(err, "error walking file tree %q", path)
}
return timeIsGreater, err
}
// getSymbolic link goes through each part of the path and continues resolving symlinks as they appear. // getSymbolic link goes through each part of the path and continues resolving symlinks as they appear.
// Returns what the whole target path for what "path" resolves to. // Returns what the whole target path for what "path" resolves to.
func getSymbolicLink(path string) (string, error) { func getSymbolicLink(path string) (string, error) {

View File

@ -1,7 +0,0 @@
package imagebuildah
import "errors"
var (
errDanglingSymlink = errors.New("error evaluating dangling symlink")
)

View File

@ -42,8 +42,8 @@ var builtinAllowedBuildArgs = map[string]bool{
} }
// Executor is a buildah-based implementation of the imagebuilder.Executor // Executor is a buildah-based implementation of the imagebuilder.Executor
// interface. It coordinates the entire build by using one StageExecutors to // interface. It coordinates the entire build by using one or more
// handle each stage of the build. // StageExecutors to handle each stage of the build.
type Executor struct { type Executor struct {
stages map[string]*StageExecutor stages map[string]*StageExecutor
store storage.Store store storage.Store
@ -248,26 +248,36 @@ func (b *Executor) getImageHistory(ctx context.Context, imageID string) ([]v1.Hi
return oci.History, nil return oci.History, nil
} }
// getCreatedBy returns the command the image at node will be created by. // getCreatedBy returns the command the image at node will be created by. If
func (b *Executor) getCreatedBy(node *parser.Node) string { // the passed-in CompositeDigester is not nil, it is assumed to have the digest
// information for the content if the node is ADD or COPY.
func (b *Executor) getCreatedBy(node *parser.Node, addedContentDigest string) string {
if node == nil { if node == nil {
return "/bin/sh" return "/bin/sh"
} }
if node.Value == "run" { switch strings.ToUpper(node.Value) {
case "RUN":
buildArgs := b.getBuildArgs() buildArgs := b.getBuildArgs()
if buildArgs != "" { if buildArgs != "" {
return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + node.Original[4:] return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + node.Original[4:]
} }
return "/bin/sh -c " + node.Original[4:] return "/bin/sh -c " + node.Original[4:]
case "ADD", "COPY":
destination := node
for destination.Next != nil {
destination = destination.Next
} }
return "/bin/sh -c #(nop) " + strings.ToUpper(node.Value) + " " + addedContentDigest + " in " + destination.Value + " "
default:
return "/bin/sh -c #(nop) " + node.Original return "/bin/sh -c #(nop) " + node.Original
} }
}
// historyMatches returns true if a candidate history matches the history of our // historyMatches returns true if a candidate history matches the history of our
// base image (if we have one), plus the current instruction. // base image (if we have one), plus the current instruction.
// Used to verify whether a cache of the intermediate image exists and whether // Used to verify whether a cache of the intermediate image exists and whether
// to run the build again. // to run the build again.
func (b *Executor) historyMatches(baseHistory []v1.History, child *parser.Node, history []v1.History) bool { func (b *Executor) historyMatches(baseHistory []v1.History, child *parser.Node, history []v1.History, addedContentDigest string) bool {
if len(baseHistory) >= len(history) { if len(baseHistory) >= len(history) {
return false return false
} }
@ -297,7 +307,7 @@ func (b *Executor) historyMatches(baseHistory []v1.History, child *parser.Node,
return false return false
} }
} }
return history[len(baseHistory)].CreatedBy == b.getCreatedBy(child) return history[len(baseHistory)].CreatedBy == b.getCreatedBy(child, addedContentDigest)
} }
// getBuildArgs returns a string of the build-args specified during the build process // getBuildArgs returns a string of the build-args specified during the build process
@ -406,13 +416,12 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
// arg expansion, so if the previous stage // arg expansion, so if the previous stage
// was named using argument values, we might // was named using argument values, we might
// not record the right value here. // not record the right value here.
rootfs := flag[7:] rootfs := strings.TrimPrefix(flag, "--from=")
b.rootfsMap[rootfs] = true b.rootfsMap[rootfs] = true
logrus.Debugf("rootfs: %q", rootfs) logrus.Debugf("rootfs: %q", rootfs)
} }
} }
} }
break
} }
node = node.Next // next line node = node.Next // next line
} }

View File

@ -4,7 +4,6 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"net/http"
"os" "os"
"path/filepath" "path/filepath"
"strconv" "strconv"
@ -249,9 +248,112 @@ func (s *StageExecutor) volumeCacheRestore() error {
return nil return nil
} }
// digestContent digests any content that this next instruction would add to
// the image, returning the digester if there is any, or nil otherwise. We
// don't care about the details of where in the filesystem the content actually
// goes, because we're not actually going to add it here, so this is less
// involved than Copy().
func (s *StageExecutor) digestSpecifiedContent(node *parser.Node) (string, error) {
// No instruction: done.
if node == nil {
return "", nil
}
// Not adding content: done.
switch strings.ToUpper(node.Value) {
default:
return "", nil
case "ADD", "COPY":
}
// Pull out everything except the first node (the instruction) and the
// last node (the destination).
var srcs []string
destination := node
for destination.Next != nil {
destination = destination.Next
if destination.Next != nil {
srcs = append(srcs, destination.Value)
}
}
var sources []string
var idMappingOptions *buildah.IDMappingOptions
contextDir := s.executor.contextDir
for _, flag := range node.Flags {
if strings.HasPrefix(flag, "--from=") {
// Flag says to read the content from another
// container. Update the ID mappings and
// all-content-comes-from-below-this-directory value.
from := strings.TrimPrefix(flag, "--from=")
if other, ok := s.executor.stages[from]; ok {
contextDir = other.mountPoint
idMappingOptions = &other.builder.IDMappingOptions
} else if builder, ok := s.executor.containerMap[from]; ok {
contextDir = builder.MountPoint
idMappingOptions = &builder.IDMappingOptions
} else {
return "", errors.Errorf("the stage %q has not been built", from)
}
}
}
for _, src := range srcs {
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
// Source is a URL. TODO: cache this content
// somewhere, so that we can avoid pulling it down
// again if we end up needing to drop it into the
// filesystem.
sources = append(sources, src)
} else {
// Source is not a URL, so it's a location relative to
// the all-content-comes-from-below-this-directory
// directory.
contextSrc, err := securejoin.SecureJoin(contextDir, src)
if err != nil {
return "", errors.Wrapf(err, "error joining %q and %q", contextDir, src)
}
sources = append(sources, contextSrc)
}
}
// If the all-content-comes-from-below-this-directory is the build
// context, read its .dockerignore.
var excludes []string
if contextDir == s.executor.contextDir {
var err error
if excludes, err = imagebuilder.ParseDockerignore(contextDir); err != nil {
return "", errors.Wrapf(err, "error parsing .dockerignore in %s", contextDir)
}
}
// Restart the digester and have it do a dry-run copy to compute the
// digest information.
options := buildah.AddAndCopyOptions{
Excludes: excludes,
ContextDir: contextDir,
IDMappingOptions: idMappingOptions,
DryRun: true,
}
s.builder.ContentDigester.Restart()
download := strings.ToUpper(node.Value) == "ADD"
err := s.builder.Add(destination.Value, download, options, sources...)
if err != nil {
return "", errors.Wrapf(err, "error dry-running %q", node.Original)
}
// Return the formatted version of the digester's result.
contentDigest := ""
prefix, digest := s.builder.ContentDigester.Digest()
if prefix != "" {
prefix += ":"
}
if digest.Validate() == nil {
contentDigest = prefix + digest.Encoded()
}
return contentDigest, nil
}
// Copy copies data into the working tree. The "Download" field is how // Copy copies data into the working tree. The "Download" field is how
// imagebuilder tells us the instruction was "ADD" and not "COPY" // imagebuilder tells us the instruction was "ADD" and not "COPY".
func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error { func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
s.builder.ContentDigester.Restart()
for _, copy := range copies { for _, copy := range copies {
// Check the file and see if part of it is a symlink. // Check the file and see if part of it is a symlink.
// Convert it to the target if so. To be ultrasafe // Convert it to the target if so. To be ultrasafe
@ -283,34 +385,44 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
if err := s.volumeCacheInvalidate(copy.Dest); err != nil { if err := s.volumeCacheInvalidate(copy.Dest); err != nil {
return err return err
} }
sources := []string{} var sources []string
for _, src := range copy.Src { // The From field says to read the content from another
contextDir := s.executor.contextDir // container. Update the ID mappings and
copyExcludes := excludes // all-content-comes-from-below-this-directory value.
var idMappingOptions *buildah.IDMappingOptions var idMappingOptions *buildah.IDMappingOptions
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { var copyExcludes []string
sources = append(sources, src) contextDir := s.executor.contextDir
} else if len(copy.From) > 0 { if len(copy.From) > 0 {
var srcRoot string
if other, ok := s.executor.stages[copy.From]; ok && other.index < s.index { if other, ok := s.executor.stages[copy.From]; ok && other.index < s.index {
srcRoot = other.mountPoint
contextDir = other.mountPoint contextDir = other.mountPoint
idMappingOptions = &other.builder.IDMappingOptions idMappingOptions = &other.builder.IDMappingOptions
} else if builder, ok := s.executor.containerMap[copy.From]; ok { } else if builder, ok := s.executor.containerMap[copy.From]; ok {
srcRoot = builder.MountPoint
contextDir = builder.MountPoint contextDir = builder.MountPoint
idMappingOptions = &builder.IDMappingOptions idMappingOptions = &builder.IDMappingOptions
} else { } else {
return errors.Errorf("the stage %q has not been built", copy.From) return errors.Errorf("the stage %q has not been built", copy.From)
} }
srcSecure, err := securejoin.SecureJoin(srcRoot, src) copyExcludes = excludes
} else {
copyExcludes = append(s.executor.excludes, excludes...)
}
for _, src := range copy.Src {
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
// Source is a URL.
sources = append(sources, src)
} else {
// Treat the source, which is not a URL, as a
// location relative to the
// all-content-comes-from-below-this-directory
// directory.
srcSecure, err := securejoin.SecureJoin(contextDir, src)
if err != nil { if err != nil {
return err return err
} }
if hadFinalPathSeparator {
// If destination is a folder, we need to take extra care to // If destination is a folder, we need to take extra care to
// ensure that files are copied with correct names (since // ensure that files are copied with correct names (since
// resolving a symlink may result in a different name). // resolving a symlink may result in a different name).
if hadFinalPathSeparator {
_, srcName := filepath.Split(src) _, srcName := filepath.Split(src)
_, srcNameSecure := filepath.Split(srcSecure) _, srcNameSecure := filepath.Split(srcSecure)
if srcName != srcNameSecure { if srcName != srcNameSecure {
@ -318,6 +430,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
Chown: copy.Chown, Chown: copy.Chown,
ContextDir: contextDir, ContextDir: contextDir,
Excludes: copyExcludes, Excludes: copyExcludes,
IDMappingOptions: idMappingOptions,
} }
if err := s.builder.Add(filepath.Join(copy.Dest, srcName), copy.Download, options, srcSecure); err != nil { if err := s.builder.Add(filepath.Join(copy.Dest, srcName), copy.Download, options, srcSecure); err != nil {
return err return err
@ -326,10 +439,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
} }
} }
sources = append(sources, srcSecure) sources = append(sources, srcSecure)
}
} else {
sources = append(sources, filepath.Join(s.executor.contextDir, src))
copyExcludes = append(s.executor.excludes, excludes...)
} }
options := buildah.AddAndCopyOptions{ options := buildah.AddAndCopyOptions{
Chown: copy.Chown, Chown: copy.Chown,
@ -341,7 +451,6 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
return err return err
} }
} }
}
return nil return nil
} }
@ -645,7 +754,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
// squash the contents of the base image. Whichever is // squash the contents of the base image. Whichever is
// the case, we need to commit() to create a new image. // the case, we need to commit() to create a new image.
logCommit(s.output, -1) logCommit(s.output, -1)
if imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(nil), false, s.output); err != nil { if imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(nil, ""), false, s.output); err != nil {
return "", nil, errors.Wrapf(err, "error committing base container") return "", nil, errors.Wrapf(err, "error committing base container")
} }
} else { } else {
@ -711,13 +820,18 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step)) logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
} }
// In case we added content, retrieve its digest.
addedContentDigest, err := s.digestSpecifiedContent(node)
if err != nil {
return "", nil, err
}
if moreInstructions { if moreInstructions {
// There are still more instructions to process // There are still more instructions to process
// for this stage. Make a note of the // for this stage. Make a note of the
// instruction in the history that we'll write // instruction in the history that we'll write
// for the image when we eventually commit it. // for the image when we eventually commit it.
now := time.Now() now := time.Now()
s.builder.AddPrependedEmptyLayer(&now, s.executor.getCreatedBy(node), "", "") s.builder.AddPrependedEmptyLayer(&now, s.executor.getCreatedBy(node, addedContentDigest), "", "")
continue continue
} else { } else {
// This is the last instruction for this stage, // This is the last instruction for this stage,
@ -726,7 +840,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
// if it's used as the basis for a later stage. // if it's used as the basis for a later stage.
if lastStage || imageIsUsedLater { if lastStage || imageIsUsedLater {
logCommit(s.output, i) logCommit(s.output, i)
imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(node), false, s.output) imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(node, addedContentDigest), false, s.output)
if err != nil { if err != nil {
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
} }
@ -756,7 +870,11 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
// cached images so far, look for one that matches what we // cached images so far, look for one that matches what we
// expect to produce for this instruction. // expect to produce for this instruction.
if checkForLayers && !(s.executor.squash && lastInstruction && lastStage) { if checkForLayers && !(s.executor.squash && lastInstruction && lastStage) {
cacheID, err = s.layerExists(ctx, node) addedContentDigest, err := s.digestSpecifiedContent(node)
if err != nil {
return "", nil, err
}
cacheID, err = s.intermediateImageExists(ctx, node, addedContentDigest)
if err != nil { if err != nil {
return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build") return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
} }
@ -809,9 +927,14 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step)) logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
} }
// In case we added content, retrieve its digest.
addedContentDigest, err := s.digestSpecifiedContent(node)
if err != nil {
return "", nil, err
}
// Create a new image, maybe with a new layer. // Create a new image, maybe with a new layer.
logCommit(s.output, i) logCommit(s.output, i)
imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(node), !s.stepRequiresLayer(step), commitName) imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(node, addedContentDigest), !s.stepRequiresLayer(step), commitName)
if err != nil { if err != nil {
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
} }
@ -899,9 +1022,9 @@ func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output st
return img.ID, ref, nil return img.ID, ref, nil
} }
// layerExists returns true if an intermediate image of currNode exists in the image store from a previous build. // intermediateImageExists returns true if an intermediate image of currNode exists in the image store from a previous build.
// It verifies this by checking the parent of the top layer of the image and the history. // It verifies this by checking the parent of the top layer of the image and the history.
func (s *StageExecutor) layerExists(ctx context.Context, currNode *parser.Node) (string, error) { func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string) (string, error) {
// Get the list of images available in the image store // Get the list of images available in the image store
images, err := s.executor.store.Images() images, err := s.executor.store.Images()
if err != nil { if err != nil {
@ -932,85 +1055,14 @@ func (s *StageExecutor) layerExists(ctx context.Context, currNode *parser.Node)
return "", errors.Wrapf(err, "error getting history of %q", image.ID) return "", errors.Wrapf(err, "error getting history of %q", image.ID)
} }
// children + currNode is the point of the Dockerfile we are currently at. // children + currNode is the point of the Dockerfile we are currently at.
if s.executor.historyMatches(baseHistory, currNode, history) { if s.executor.historyMatches(baseHistory, currNode, history, addedContentDigest) {
// This checks if the files copied during build have been changed if the node is
// a COPY or ADD command.
filesMatch, err := s.copiedFilesMatch(currNode, history[len(history)-1].Created)
if err != nil {
return "", errors.Wrapf(err, "error checking if copied files match")
}
if filesMatch {
return image.ID, nil return image.ID, nil
} }
} }
} }
}
return "", nil return "", nil
} }
// getFilesToCopy goes through node to get all the src files that are copied, added or downloaded.
// It is possible for the Dockerfile to have src as hom*, which means all files that have hom as a prefix.
// Another format is hom?.txt, which means all files that have that name format with the ? replaced by another character.
func (s *StageExecutor) getFilesToCopy(node *parser.Node) ([]string, error) {
currNode := node.Next
var src []string
for currNode.Next != nil {
if strings.HasPrefix(currNode.Value, "http://") || strings.HasPrefix(currNode.Value, "https://") {
src = append(src, currNode.Value)
currNode = currNode.Next
continue
}
matches, err := filepath.Glob(filepath.Join(s.copyFrom, currNode.Value))
if err != nil {
return nil, errors.Wrapf(err, "error finding match for pattern %q", currNode.Value)
}
src = append(src, matches...)
currNode = currNode.Next
}
return src, nil
}
// copiedFilesMatch checks to see if the node instruction is a COPY or ADD.
// If it is either of those two it checks the timestamps on all the files copied/added
// by the dockerfile. If the host version has a time stamp greater than the time stamp
// of the build, the build will not use the cached version and will rebuild.
func (s *StageExecutor) copiedFilesMatch(node *parser.Node, historyTime *time.Time) (bool, error) {
if node.Value != "add" && node.Value != "copy" {
return true, nil
}
src, err := s.getFilesToCopy(node)
if err != nil {
return false, err
}
for _, item := range src {
// for urls, check the Last-Modified field in the header.
if strings.HasPrefix(item, "http://") || strings.HasPrefix(item, "https://") {
urlContentNew, err := urlContentModified(item, historyTime)
if err != nil {
return false, err
}
if urlContentNew {
return false, nil
}
continue
}
// Walks the file tree for local files and uses chroot to ensure we don't escape out of the allowed path
// when resolving any symlinks.
// Change the time format to ensure we don't run into a parsing error when converting again from string
// to time.Time. It is a known Go issue that the conversions cause errors sometimes, so specifying a particular
// time format here when converting to a string.
timeIsGreater, err := resolveModifiedTime(s.copyFrom, item, historyTime.Format(time.RFC3339Nano))
if err != nil {
return false, errors.Wrapf(err, "error resolving symlinks and comparing modified times: %q", item)
}
if timeIsGreater {
return false, nil
}
}
return true, nil
}
// commit writes the container's contents to an image, using a passed-in tag as // commit writes the container's contents to an image, using a passed-in tag as
// the name if there is one, generating a unique ID-based one otherwise. // the name if there is one, generating a unique ID-based one otherwise.
func (s *StageExecutor) commit(ctx context.Context, ib *imagebuilder.Builder, createdBy string, emptyLayer bool, output string) (string, reference.Canonical, error) { func (s *StageExecutor) commit(ctx context.Context, ib *imagebuilder.Builder, createdBy string, emptyLayer bool, output string) (string, reference.Canonical, error) {
@ -1134,23 +1186,3 @@ func (s *StageExecutor) EnsureContainerPath(path string) error {
} }
return nil return nil
} }
// urlContentModified sends a get request to the url and checks if the header has a value in
// Last-Modified, and if it does compares the time stamp to that of the history of the cached image.
// returns true if there is no Last-Modified value in the header.
func urlContentModified(url string, historyTime *time.Time) (bool, error) {
resp, err := http.Get(url)
if err != nil {
return false, errors.Wrapf(err, "error getting %q", url)
}
defer resp.Body.Close()
if lastModified := resp.Header.Get("Last-Modified"); lastModified != "" {
lastModifiedTime, err := time.Parse(time.RFC1123, lastModified)
if err != nil {
return false, errors.Wrapf(err, "error parsing time for %q", url)
}
return lastModifiedTime.After(*historyTime), nil
}
logrus.Debugf("Response header did not have Last-Modified %q, will rebuild.", url)
return true, nil
}

View File

@ -385,3 +385,14 @@ Buildah uses Go Modules for vendoring purposes. If you need to update or add a
* `make` * `make`
* `make install` * `make install`
* Then add any updated or added files with `git add` then do a `git commit` and create a PR. * Then add any updated or added files with `git add` then do a `git commit` and create a PR.
### Vendor from your own fork
If you wish to vendor in your personal fork to try changes out (assuming containers/storage in the below example):
* `go mod edit -replace github.com/containers/storage=github.com/{mygithub_username}/storage@YOUR_BRANCH`
* `make vendor`
To revert
* `go mod edit -dropreplace github.com/containers/storage`
* `make vendor`

View File

@ -1,4 +1,4 @@
#!/bin/bash #!/usr/bin/env bash
tmpdir="$PWD/tmp.$RANDOM" tmpdir="$PWD/tmp.$RANDOM"
mkdir -p "$tmpdir" mkdir -p "$tmpdir"
trap 'rm -fr "$tmpdir"' EXIT trap 'rm -fr "$tmpdir"' EXIT

View File

@ -1,4 +1,4 @@
#!/bin/bash #!/usr/bin/env bash
if pkg-config ostree-1 2> /dev/null ; then if pkg-config ostree-1 2> /dev/null ; then
echo containers_image_ostree echo containers_image_ostree
else else

View File

@ -186,7 +186,7 @@ func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults,
fs.StringVar(&flags.CPUSetCPUs, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") fs.StringVar(&flags.CPUSetCPUs, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
fs.StringVar(&flags.CPUSetMems, "cpuset-mems", "", "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.") fs.StringVar(&flags.CPUSetMems, "cpuset-mems", "", "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.")
fs.StringSliceVar(&flags.DNSSearch, "dns-search", []string{}, "Set custom DNS search domains") fs.StringSliceVar(&flags.DNSSearch, "dns-search", []string{}, "Set custom DNS search domains")
fs.StringSliceVar(&flags.DNSServers, "dns", []string{}, "Set custom DNS servers") fs.StringSliceVar(&flags.DNSServers, "dns", []string{}, "Set custom DNS servers or disable it completely by setting it to 'none', which prevents the automatic creation of `/etc/resolv.conf`.")
fs.StringSliceVar(&flags.DNSOptions, "dns-option", []string{}, "Set custom DNS options") fs.StringSliceVar(&flags.DNSOptions, "dns-option", []string{}, "Set custom DNS options")
fs.BoolVar(&flags.HTTPProxy, "http-proxy", true, "pass thru HTTP Proxy environment variables") fs.BoolVar(&flags.HTTPProxy, "http-proxy", true, "pass thru HTTP Proxy environment variables")
fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.") fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.")

View File

@ -462,25 +462,40 @@ func ValidateVolumeCtrDir(ctrDir string) error {
// ValidateVolumeOpts validates a volume's options // ValidateVolumeOpts validates a volume's options
func ValidateVolumeOpts(options []string) ([]string, error) { func ValidateVolumeOpts(options []string) ([]string, error) {
var foundRootPropagation, foundRWRO, foundLabelChange, bindType int var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid int
finalOpts := make([]string, 0, len(options)) finalOpts := make([]string, 0, len(options))
for _, opt := range options { for _, opt := range options {
switch opt { switch opt {
case "noexec", "exec":
foundExec++
if foundExec > 1 {
return nil, errors.Errorf("invalid options %q, can only specify 1 'noexec' or 'exec' option", strings.Join(options, ", "))
}
case "nodev", "dev":
foundDev++
if foundDev > 1 {
return nil, errors.Errorf("invalid options %q, can only specify 1 'nodev' or 'dev' option", strings.Join(options, ", "))
}
case "nosuid", "suid":
foundSuid++
if foundSuid > 1 {
return nil, errors.Errorf("invalid options %q, can only specify 1 'nosuid' or 'suid' option", strings.Join(options, ", "))
}
case "rw", "ro": case "rw", "ro":
foundRWRO++
if foundRWRO > 1 { if foundRWRO > 1 {
return nil, errors.Errorf("invalid options %q, can only specify 1 'rw' or 'ro' option", strings.Join(options, ", ")) return nil, errors.Errorf("invalid options %q, can only specify 1 'rw' or 'ro' option", strings.Join(options, ", "))
} }
foundRWRO++
case "z", "Z", "O": case "z", "Z", "O":
foundLabelChange++
if foundLabelChange > 1 { if foundLabelChange > 1 {
return nil, errors.Errorf("invalid options %q, can only specify 1 'z', 'Z', or 'O' option", strings.Join(options, ", ")) return nil, errors.Errorf("invalid options %q, can only specify 1 'z', 'Z', or 'O' option", strings.Join(options, ", "))
} }
foundLabelChange++
case "private", "rprivate", "shared", "rshared", "slave", "rslave", "unbindable", "runbindable": case "private", "rprivate", "shared", "rshared", "slave", "rslave", "unbindable", "runbindable":
foundRootPropagation++
if foundRootPropagation > 1 { if foundRootPropagation > 1 {
return nil, errors.Errorf("invalid options %q, can only specify 1 '[r]shared', '[r]private', '[r]slave' or '[r]unbindable' option", strings.Join(options, ", ")) return nil, errors.Errorf("invalid options %q, can only specify 1 '[r]shared', '[r]private', '[r]slave' or '[r]unbindable' option", strings.Join(options, ", "))
} }
foundRootPropagation++
case "bind", "rbind": case "bind", "rbind":
bindType++ bindType++
if bindType > 1 { if bindType > 1 {

View File

@ -102,19 +102,11 @@ func localImageNameForReference(ctx context.Context, store storage.Store, srcRef
} }
case directory.Transport.Name(): case directory.Transport.Name():
// supports pull from a directory // supports pull from a directory
name = srcRef.StringWithinTransport() name = toLocalImageName(srcRef.StringWithinTransport())
// remove leading "/"
if name[:1] == "/" {
name = name[1:]
}
case oci.Transport.Name(): case oci.Transport.Name():
// supports pull from a directory // supports pull from a directory
split := strings.SplitN(srcRef.StringWithinTransport(), ":", 2) split := strings.SplitN(srcRef.StringWithinTransport(), ":", 2)
name = split[0] name = toLocalImageName(split[0])
// remove leading "/"
if name[:1] == "/" {
name = name[1:]
}
default: default:
ref := srcRef.DockerReference() ref := srcRef.DockerReference()
if ref == nil { if ref == nil {
@ -287,3 +279,8 @@ func getImageDigest(ctx context.Context, src types.ImageReference, sc *types.Sys
} }
return "@" + digest.Hex(), nil return "@" + digest.Hex(), nil
} }
// toLocalImageName converts an image name into a 'localhost/' prefixed one
func toLocalImageName(imageName string) string {
return "localhost/" + strings.TrimLeft(imageName, "/")
}

View File

@ -343,7 +343,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
net := namespaceOptions.Find(string(specs.NetworkNamespace)) net := namespaceOptions.Find(string(specs.NetworkNamespace))
hostNetwork := net == nil || net.Host hostNetwork := net == nil || net.Host
user := namespaceOptions.Find(string(specs.UserNamespace)) user := namespaceOptions.Find(string(specs.UserNamespace))
hostUser := user == nil || user.Host hostUser := (user == nil || user.Host) && !unshare.IsRootless()
// Copy mounts from the generated list. // Copy mounts from the generated list.
mountCgroups := true mountCgroups := true
@ -431,7 +431,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
// Add temporary copies of the contents of volume locations at the // Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there. // volume locations, unless we already have something there.
copyWithTar := b.copyWithTar(nil, nil, nil) copyWithTar := b.copyWithTar(nil, nil, nil, false)
builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes, int(rootUID), int(rootGID)) builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes, int(rootUID), int(rootGID))
if err != nil { if err != nil {
return err return err

View File

@ -1,4 +1,4 @@
#!/bin/bash #!/usr/bin/env bash
if pkg-config libselinux 2> /dev/null ; then if pkg-config libselinux 2> /dev/null ; then
echo selinux echo selinux
fi fi

View File

@ -3,6 +3,7 @@ package buildah
import ( import (
"archive/tar" "archive/tar"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
@ -112,24 +113,23 @@ func convertRuntimeIDMaps(UIDMap, GIDMap []rspec.LinuxIDMapping) ([]idtools.IDMa
// of any container, or another container, into our working container, mapping // of any container, or another container, into our working container, mapping
// read permissions using the passed-in ID maps, writing using the container's // read permissions using the passed-in ID maps, writing using the container's
// ID mappings, possibly overridden using the passed-in chownOpts // ID mappings, possibly overridden using the passed-in chownOpts
func (b *Builder) copyFileWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error { func (b *Builder) copyFileWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error {
if tarIDMappingOptions == nil { if tarIDMappingOptions == nil {
tarIDMappingOptions = &IDMappingOptions{ tarIDMappingOptions = &IDMappingOptions{
HostUIDMapping: true, HostUIDMapping: true,
HostGIDMapping: true, HostGIDMapping: true,
} }
} }
var hardlinkChecker util.HardlinkChecker
return func(src, dest string) error { return func(src, dest string) error {
var f *os.File
logrus.Debugf("copyFileWithTar(%s, %s)", src, dest) logrus.Debugf("copyFileWithTar(%s, %s)", src, dest)
f, err := os.Open(src) fi, err := os.Lstat(src)
if err != nil { if err != nil {
return errors.Wrapf(err, "error opening %q to copy its contents", src) return errors.Wrapf(err, "error reading attributes of %q", src)
} }
defer func() {
if f != nil {
f.Close()
}
}()
sysfi, err := system.Lstat(src) sysfi, err := system.Lstat(src)
if err != nil { if err != nil {
@ -143,19 +143,45 @@ func (b *Builder) copyFileWithTar(tarIDMappingOptions *IDMappingOptions, chownOp
return errors.Wrapf(err, "error mapping owner IDs of %q: %d/%d", src, hostUID, hostGID) return errors.Wrapf(err, "error mapping owner IDs of %q: %d/%d", src, hostUID, hostGID)
} }
fi, err := os.Lstat(src)
if err != nil {
return errors.Wrapf(err, "error reading attributes of %q", src)
}
hdr, err := tar.FileInfoHeader(fi, filepath.Base(src)) hdr, err := tar.FileInfoHeader(fi, filepath.Base(src))
if err != nil { if err != nil {
return errors.Wrapf(err, "error generating tar header for: %q", src) return errors.Wrapf(err, "error generating tar header for: %q", src)
} }
hdr.Name = filepath.Base(dest) chrootedDest, err := filepath.Rel(b.MountPoint, dest)
if err != nil {
return errors.Wrapf(err, "error generating relative-to-chroot target name for %q", dest)
}
hdr.Name = chrootedDest
hdr.Uid = int(containerUID) hdr.Uid = int(containerUID)
hdr.Gid = int(containerGID) hdr.Gid = int(containerGID)
if fi.Mode().IsRegular() && hdr.Typeflag == tar.TypeReg {
if linkname := hardlinkChecker.Check(fi); linkname != "" {
hdr.Typeflag = tar.TypeLink
hdr.Linkname = linkname
} else {
hardlinkChecker.Add(fi, chrootedDest)
f, err = os.Open(src)
if err != nil {
return errors.Wrapf(err, "error opening %q to copy its contents", src)
}
defer func() {
if err := f.Close(); err != nil {
logrus.Debugf("error closing %s: %v", fi.Name(), err)
}
}()
}
}
if fi.Mode()&os.ModeSymlink == os.ModeSymlink && hdr.Typeflag == tar.TypeSymlink {
hdr.Typeflag = tar.TypeSymlink
linkName, err := os.Readlink(src)
if err != nil {
return errors.Wrapf(err, "error reading destination from symlink %q", src)
}
hdr.Linkname = linkName
}
pipeReader, pipeWriter := io.Pipe() pipeReader, pipeWriter := io.Pipe()
writer := tar.NewWriter(pipeWriter) writer := tar.NewWriter(pipeWriter)
var copyErr error var copyErr error
@ -165,26 +191,25 @@ func (b *Builder) copyFileWithTar(tarIDMappingOptions *IDMappingOptions, chownOp
logrus.Debugf("error writing header for %s: %v", srcFile.Name(), err) logrus.Debugf("error writing header for %s: %v", srcFile.Name(), err)
copyErr = err copyErr = err
} }
if srcFile != nil {
n, err := pools.Copy(writer, srcFile) n, err := pools.Copy(writer, srcFile)
if n != hdr.Size { if n != hdr.Size {
logrus.Debugf("expected to write %d bytes for %s, wrote %d instead", hdr.Size, srcFile.Name(), n) logrus.Debugf("expected to write %d bytes for %s, wrote %d instead", hdr.Size, srcFile.Name(), n)
} }
if err != nil { if err != nil {
logrus.Debugf("error reading %s: %v", srcFile.Name(), err) logrus.Debugf("error copying contents of %s: %v", fi.Name(), err)
copyErr = err copyErr = err
} }
if err = writer.Close(); err != nil {
logrus.Debugf("error closing write pipe for %s: %v", srcFile.Name(), err)
} }
if err = srcFile.Close(); err != nil { if err = writer.Close(); err != nil {
logrus.Debugf("error closing %s: %v", srcFile.Name(), err) logrus.Debugf("error closing write pipe for %s: %v", hdr.Name, err)
} }
pipeWriter.Close() pipeWriter.Close()
pipeWriter = nil pipeWriter = nil
}(f) }(f)
untar := b.untar(chownOpts, hasher) untar := b.untar(chownOpts, hasher, dryRun)
err = untar(pipeReader, filepath.Dir(dest)) err = untar(pipeReader, b.MountPoint)
if err == nil { if err == nil {
err = copyErr err = copyErr
} }
@ -200,10 +225,17 @@ func (b *Builder) copyFileWithTar(tarIDMappingOptions *IDMappingOptions, chownOp
// our container or from another container, into our working container, mapping // our container or from another container, into our working container, mapping
// permissions at read-time using the container's ID maps, with ownership at // permissions at read-time using the container's ID maps, with ownership at
// write-time possibly overridden using the passed-in chownOpts // write-time possibly overridden using the passed-in chownOpts
func (b *Builder) copyWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error { func (b *Builder) copyWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error {
tar := b.tarPath(tarIDMappingOptions) tar := b.tarPath(tarIDMappingOptions)
untar := b.untar(chownOpts, hasher)
return func(src, dest string) error { return func(src, dest string) error {
thisHasher := hasher
if thisHasher != nil && b.ContentDigester.Hash() != nil {
thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
}
if thisHasher == nil {
thisHasher = b.ContentDigester.Hash()
}
untar := b.untar(chownOpts, thisHasher, dryRun)
rc, err := tar(src) rc, err := tar(src)
if err != nil { if err != nil {
return errors.Wrapf(err, "error archiving %q for copy", src) return errors.Wrapf(err, "error archiving %q for copy", src)
@ -215,8 +247,28 @@ func (b *Builder) copyWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *
// untarPath returns a function which extracts an archive in a specified // untarPath returns a function which extracts an archive in a specified
// location into our working container, mapping permissions using the // location into our working container, mapping permissions using the
// container's ID maps, possibly overridden using the passed-in chownOpts // container's ID maps, possibly overridden using the passed-in chownOpts
func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error { func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error {
if hasher != nil && b.ContentDigester.Hash() != nil {
hasher = io.MultiWriter(hasher, b.ContentDigester.Hash())
}
if hasher == nil {
hasher = b.ContentDigester.Hash()
}
convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
if dryRun {
return func(src, dest string) error {
if hasher == nil {
hasher = ioutil.Discard
}
f, err := os.Open(src)
if err != nil {
return errors.Wrapf(err, "error opening %q", src)
}
defer f.Close()
_, err = io.Copy(hasher, f)
return err
}
}
return chrootarchive.UntarPathAndChown(chownOpts, hasher, convertedUIDMap, convertedGIDMap) return chrootarchive.UntarPathAndChown(chownOpts, hasher, convertedUIDMap, convertedGIDMap)
} }
@ -248,7 +300,7 @@ func (b *Builder) tarPath(idMappingOptions *IDMappingOptions) func(path string)
// untar returns a function which extracts an archive stream to a specified // untar returns a function which extracts an archive stream to a specified
// location in the container's filesystem, mapping permissions using the // location in the container's filesystem, mapping permissions using the
// container's ID maps, possibly overridden using the passed-in chownOpts // container's ID maps, possibly overridden using the passed-in chownOpts
func (b *Builder) untar(chownOpts *idtools.IDPair, hasher io.Writer) func(tarArchive io.ReadCloser, dest string) error { func (b *Builder) untar(chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(tarArchive io.ReadCloser, dest string) error {
convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap) untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
options := &archive.TarOptions{ options := &archive.TarOptions{
@ -257,14 +309,31 @@ func (b *Builder) untar(chownOpts *idtools.IDPair, hasher io.Writer) func(tarArc
ChownOpts: chownOpts, ChownOpts: chownOpts,
} }
untar := chrootarchive.Untar untar := chrootarchive.Untar
if hasher != nil { if dryRun {
originalUntar := untar
untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) if _, err := io.Copy(ioutil.Discard, tarArchive); err != nil {
return errors.Wrapf(err, "error digesting tar stream")
} }
return nil
}
}
originalUntar := untar
untarWithHasher := func(tarArchive io.Reader, dest string, options *archive.TarOptions, untarHasher io.Writer) error {
reader := tarArchive
if untarHasher != nil {
reader = io.TeeReader(tarArchive, untarHasher)
}
return originalUntar(reader, dest, options)
} }
return func(tarArchive io.ReadCloser, dest string) error { return func(tarArchive io.ReadCloser, dest string) error {
err := untar(tarArchive, dest, options) thisHasher := hasher
if thisHasher != nil && b.ContentDigester.Hash() != nil {
thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
}
if thisHasher == nil {
thisHasher = b.ContentDigester.Hash()
}
err := untarWithHasher(tarArchive, dest, options, thisHasher)
if err2 := tarArchive.Close(); err2 != nil { if err2 := tarArchive.Close(); err2 != nil {
if err == nil { if err == nil {
err = err2 err = err2

View File

@ -0,0 +1,14 @@
// +build darwin
package util
import (
"syscall"
)
func makeHardlinkDeviceAndInode(st *syscall.Stat_t) hardlinkDeviceAndInode {
return hardlinkDeviceAndInode{
device: uint64(st.Dev),
inode: uint64(st.Ino),
}
}

View File

@ -0,0 +1,14 @@
// +build linux
package util
import (
"syscall"
)
func makeHardlinkDeviceAndInode(st *syscall.Stat_t) hardlinkDeviceAndInode {
return hardlinkDeviceAndInode{
device: st.Dev,
inode: st.Ino,
}
}

31
vendor/github.com/containers/buildah/util/util_unix.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
// +build linux darwin
package util
import (
"os"
"sync"
"syscall"
)
type hardlinkDeviceAndInode struct {
device, inode uint64
}
type HardlinkChecker struct {
hardlinks sync.Map
}
func (h *HardlinkChecker) Check(fi os.FileInfo) string {
if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 {
if name, ok := h.hardlinks.Load(makeHardlinkDeviceAndInode(st)); ok && name.(string) != "" {
return name.(string)
}
}
return ""
}
func (h *HardlinkChecker) Add(fi os.FileInfo, name string) {
if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 {
h.hardlinks.Store(makeHardlinkDeviceAndInode(st), name)
}
}

View File

@ -0,0 +1,16 @@
// +build !linux,!darwin
package util
import (
"os"
)
type HardlinkChecker struct {
}
func (h *HardlinkChecker) Check(fi os.FileInfo) string {
return ""
}
func (h *HardlinkChecker) Add(fi os.FileInfo, name string) {
}

2
vendor/modules.txt vendored
View File

@ -45,7 +45,7 @@ github.com/containernetworking/cni/pkg/version
github.com/containernetworking/cni/pkg/types/020 github.com/containernetworking/cni/pkg/types/020
# github.com/containernetworking/plugins v0.8.1 # github.com/containernetworking/plugins v0.8.1
github.com/containernetworking/plugins/pkg/ns github.com/containernetworking/plugins/pkg/ns
# github.com/containers/buildah v1.10.1 # github.com/containers/buildah v1.8.4-0.20190821140209-376e52ee0142
github.com/containers/buildah github.com/containers/buildah
github.com/containers/buildah/imagebuildah github.com/containers/buildah/imagebuildah
github.com/containers/buildah/pkg/chrootuser github.com/containers/buildah/pkg/chrootuser