Update c/storage to 9b10041d7b2ef767ce9c42b5862b6c51eeb82214

Signed-off-by: Matthew Heon <matthew.heon@pm.me>
This commit is contained in:
Matthew Heon
2019-06-07 15:11:38 -04:00
parent bcc89e9d08
commit d81fc2e192
18 changed files with 257 additions and 59 deletions

View File

@ -19,7 +19,7 @@ github.com/containers/image 2c0349c99af7d90694b3faa0e9bde404d407b145
github.com/vbauerster/mpb v3.3.4 github.com/vbauerster/mpb v3.3.4
github.com/mattn/go-isatty v0.0.4 github.com/mattn/go-isatty v0.0.4
github.com/VividCortex/ewma v1.1.1 github.com/VividCortex/ewma v1.1.1
github.com/containers/storage v1.12.7 github.com/containers/storage 9b10041d7b2ef767ce9c42b5862b6c51eeb82214
github.com/containers/psgo v1.3.0 github.com/containers/psgo v1.3.0
github.com/coreos/go-systemd v17 github.com/coreos/go-systemd v17
github.com/coreos/pkg v4 github.com/coreos/pkg v4

View File

@ -572,6 +572,10 @@ func (r *containerStore) Lock() {
r.lockfile.Lock() r.lockfile.Lock()
} }
func (r *containerStore) RecursiveLock() {
r.lockfile.RecursiveLock()
}
func (r *containerStore) RLock() { func (r *containerStore) RLock() {
r.lockfile.RLock() r.lockfile.RLock()
} }

View File

@ -255,6 +255,9 @@ func (a *Driver) AdditionalImageStores() []string {
// CreateFromTemplate creates a layer with the same contents and parent as another layer. // CreateFromTemplate creates a layer with the same contents and parent as another layer.
func (a *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { func (a *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
if opts == nil {
opts = &graphdriver.CreateOpts{}
}
return graphdriver.NaiveCreateFromTemplate(a, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite) return graphdriver.NaiveCreateFromTemplate(a, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite)
} }

View File

@ -55,6 +55,9 @@ func chownByMapsMain() {
if err != nil { if err != nil {
return fmt.Errorf("error walking to %q: %v", path, err) return fmt.Errorf("error walking to %q: %v", path, err)
} }
if path == "." {
return nil
}
return platformLChown(path, info, toHost, toContainer) return platformLChown(path, info, toHost, toContainer)
} }
if err := filepath.Walk(".", chown); err != nil { if err := filepath.Walk(".", chown); err != nil {

View File

@ -82,6 +82,9 @@ type Image struct {
// is set before using it. // is set before using it.
Created time.Time `json:"created,omitempty"` Created time.Time `json:"created,omitempty"`
// ReadOnly is true if this image resides in a read-only layer store.
ReadOnly bool `json:"-"`
Flags map[string]interface{} `json:"flags,omitempty"` Flags map[string]interface{} `json:"flags,omitempty"`
} }
@ -159,6 +162,7 @@ func copyImage(i *Image) *Image {
BigDataSizes: copyStringInt64Map(i.BigDataSizes), BigDataSizes: copyStringInt64Map(i.BigDataSizes),
BigDataDigests: copyStringDigestMap(i.BigDataDigests), BigDataDigests: copyStringDigestMap(i.BigDataDigests),
Created: i.Created, Created: i.Created,
ReadOnly: i.ReadOnly,
Flags: copyStringInterfaceMap(i.Flags), Flags: copyStringInterfaceMap(i.Flags),
} }
} }
@ -269,6 +273,7 @@ func (r *imageStore) Load() error {
list := digests[digest] list := digests[digest]
digests[digest] = append(list, image) digests[digest] = append(list, image)
} }
image.ReadOnly = !r.IsReadWrite()
} }
} }
if shouldSave && (!r.IsReadWrite() || !r.Locked()) { if shouldSave && (!r.IsReadWrite() || !r.Locked()) {
@ -739,6 +744,10 @@ func (r *imageStore) Lock() {
r.lockfile.Lock() r.lockfile.Lock()
} }
func (r *imageStore) RecursiveLock() {
r.lockfile.RecursiveLock()
}
func (r *imageStore) RLock() { func (r *imageStore) RLock() {
r.lockfile.RLock() r.lockfile.RLock()
} }

View File

@ -103,6 +103,9 @@ type Layer struct {
// for use inside of a user namespace where UID mapping is being used. // for use inside of a user namespace where UID mapping is being used.
UIDMap []idtools.IDMap `json:"uidmap,omitempty"` UIDMap []idtools.IDMap `json:"uidmap,omitempty"`
GIDMap []idtools.IDMap `json:"gidmap,omitempty"` GIDMap []idtools.IDMap `json:"gidmap,omitempty"`
// ReadOnly is true if this layer resides in a read-only layer store.
ReadOnly bool `json:"-"`
} }
type layerMountPoint struct { type layerMountPoint struct {
@ -259,6 +262,7 @@ func copyLayer(l *Layer) *Layer {
UncompressedDigest: l.UncompressedDigest, UncompressedDigest: l.UncompressedDigest,
UncompressedSize: l.UncompressedSize, UncompressedSize: l.UncompressedSize,
CompressionType: l.CompressionType, CompressionType: l.CompressionType,
ReadOnly: l.ReadOnly,
Flags: copyStringInterfaceMap(l.Flags), Flags: copyStringInterfaceMap(l.Flags),
UIDMap: copyIDMap(l.UIDMap), UIDMap: copyIDMap(l.UIDMap),
GIDMap: copyIDMap(l.GIDMap), GIDMap: copyIDMap(l.GIDMap),
@ -318,6 +322,7 @@ func (r *layerStore) Load() error {
if layer.MountLabel != "" { if layer.MountLabel != "" {
label.ReserveLabel(layer.MountLabel) label.ReserveLabel(layer.MountLabel)
} }
layer.ReadOnly = !r.IsReadWrite()
} }
err = nil err = nil
} }
@ -1304,6 +1309,10 @@ func (r *layerStore) Lock() {
r.lockfile.Lock() r.lockfile.Lock()
} }
func (r *layerStore) RecursiveLock() {
r.lockfile.RecursiveLock()
}
func (r *layerStore) RLock() { func (r *layerStore) RLock() {
r.lockfile.RLock() r.lockfile.RLock()
} }

View File

@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT. // Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
// source: ./layers.go // source: layers.go
package storage package storage

View File

@ -15,6 +15,10 @@ type Locker interface {
// Acquire a writer lock. // Acquire a writer lock.
Lock() Lock()
// Acquire a writer lock recursively, allowing for recursive acquisitions
// within the same process space.
RecursiveLock()
// Unlock the lock. // Unlock the lock.
Unlock() Unlock()

View File

@ -1,20 +0,0 @@
// +build linux solaris
package storage
import (
"time"
"golang.org/x/sys/unix"
)
// TouchedSince indicates if the lock file has been touched since the specified time
func (l *lockfile) TouchedSince(when time.Time) bool {
st := unix.Stat_t{}
err := unix.Fstat(int(l.fd), &st)
if err != nil {
return true
}
touched := time.Unix(st.Mtim.Unix())
return when.Before(touched)
}

View File

@ -1,19 +0,0 @@
// +build darwin freebsd
package storage
import (
"time"
"golang.org/x/sys/unix"
)
func (l *lockfile) TouchedSince(when time.Time) bool {
st := unix.Stat_t{}
err := unix.Fstat(int(l.fd), &st)
if err != nil {
return true
}
touched := time.Unix(st.Mtimespec.Unix())
return when.Before(touched)
}

View File

@ -9,6 +9,7 @@ import (
"time" "time"
"github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/system"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@ -25,6 +26,7 @@ type lockfile struct {
locktype int16 locktype int16
locked bool locked bool
ro bool ro bool
recursive bool
} }
// openLock opens the file at path and returns the corresponding file // openLock opens the file at path and returns the corresponding file
@ -75,7 +77,7 @@ func createLockerForPath(path string, ro bool) (Locker, error) {
// lock locks the lockfile via FCTNL(2) based on the specified type and // lock locks the lockfile via FCTNL(2) based on the specified type and
// command. // command.
func (l *lockfile) lock(l_type int16) { func (l *lockfile) lock(l_type int16, recursive bool) {
lk := unix.Flock_t{ lk := unix.Flock_t{
Type: l_type, Type: l_type,
Whence: int16(os.SEEK_SET), Whence: int16(os.SEEK_SET),
@ -86,7 +88,13 @@ func (l *lockfile) lock(l_type int16) {
case unix.F_RDLCK: case unix.F_RDLCK:
l.rwMutex.RLock() l.rwMutex.RLock()
case unix.F_WRLCK: case unix.F_WRLCK:
l.rwMutex.Lock() if recursive {
// NOTE: that's okay as recursive is only set in RecursiveLock(), so
// there's no need to protect against hypothetical RDLCK cases.
l.rwMutex.RLock()
} else {
l.rwMutex.Lock()
}
default: default:
panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", l_type)) panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", l_type))
} }
@ -110,6 +118,7 @@ func (l *lockfile) lock(l_type int16) {
} }
l.locktype = l_type l.locktype = l_type
l.locked = true l.locked = true
l.recursive = recursive
l.counter++ l.counter++
} }
@ -119,13 +128,24 @@ func (l *lockfile) Lock() {
if l.ro { if l.ro {
l.RLock() l.RLock()
} else { } else {
l.lock(unix.F_WRLCK) l.lock(unix.F_WRLCK, false)
}
}
// RecursiveLock locks the lockfile as a writer but allows for recursive
// acquisitions within the same process space. Note that RLock() will be called
// if it's a lockTypReader lock.
func (l *lockfile) RecursiveLock() {
if l.ro {
l.RLock()
} else {
l.lock(unix.F_WRLCK, true)
} }
} }
// LockRead locks the lockfile as a reader. // LockRead locks the lockfile as a reader.
func (l *lockfile) RLock() { func (l *lockfile) RLock() {
l.lock(unix.F_RDLCK) l.lock(unix.F_RDLCK, false)
} }
// Unlock unlocks the lockfile. // Unlock unlocks the lockfile.
@ -161,7 +181,7 @@ func (l *lockfile) Unlock() {
// Close the file descriptor on the last unlock. // Close the file descriptor on the last unlock.
unix.Close(int(l.fd)) unix.Close(int(l.fd))
} }
if l.locktype == unix.F_RDLCK { if l.locktype == unix.F_RDLCK || l.recursive {
l.rwMutex.RUnlock() l.rwMutex.RUnlock()
} else { } else {
l.rwMutex.Unlock() l.rwMutex.Unlock()
@ -232,3 +252,14 @@ func (l *lockfile) Modified() (bool, error) {
func (l *lockfile) IsReadWrite() bool { func (l *lockfile) IsReadWrite() bool {
return !l.ro return !l.ro
} }
// TouchedSince indicates if the lock file has been touched since the specified time
func (l *lockfile) TouchedSince(when time.Time) bool {
st, err := system.Fstat(int(l.fd))
if err != nil {
return true
}
mtim := st.Mtim()
touched := time.Unix(mtim.Unix())
return when.Before(touched)
}

View File

@ -36,6 +36,12 @@ func (l *lockfile) Lock() {
l.locked = true l.locked = true
} }
func (l *lockfile) RecursiveLock() {
// We don't support Windows but a recursive writer-lock in one process-space
// is really a writer lock, so just panic.
panic("not supported")
}
func (l *lockfile) RLock() { func (l *lockfile) RLock() {
l.mu.Lock() l.mu.Lock()
l.locked = true l.locked = true

View File

@ -1,7 +1,7 @@
package chrootarchive package chrootarchive
import ( import (
"archive/tar" stdtar "archive/tar"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -34,18 +34,34 @@ func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.
// The archive may be compressed with one of the following algorithms: // The archive may be compressed with one of the following algorithms:
// identity (uncompressed), gzip, bzip2, xz. // identity (uncompressed), gzip, bzip2, xz.
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
return untarHandler(tarArchive, dest, options, true) return untarHandler(tarArchive, dest, options, true, dest)
}
// UntarWithRoot is the same as `Untar`, but allows you to pass in a root directory
// The root directory is the directory that will be chrooted to.
// `dest` must be a path within `root`, if it is not an error will be returned.
//
// `root` should set to a directory which is not controlled by any potentially
// malicious process.
//
// This should be used to prevent a potential attacker from manipulating `dest`
// such that it would provide access to files outside of `dest` through things
// like symlinks. Normally `ResolveSymlinksInScope` would handle this, however
// sanitizing symlinks in this manner is inherrently racey:
// ref: CVE-2018-15664
func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
return untarHandler(tarArchive, dest, options, true, root)
} }
// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`. // and unpacks it into the directory at `dest`.
// The archive must be an uncompressed stream. // The archive must be an uncompressed stream.
func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
return untarHandler(tarArchive, dest, options, false) return untarHandler(tarArchive, dest, options, false, dest)
} }
// Handler for teasing out the automatic decompression // Handler for teasing out the automatic decompression
func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error {
if tarArchive == nil { if tarArchive == nil {
return fmt.Errorf("Empty archive") return fmt.Errorf("Empty archive")
} }
@ -77,7 +93,15 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
r = decompressedArchive r = decompressedArchive
} }
return invokeUnpack(r, dest, options) return invokeUnpack(r, dest, options, root)
}
// Tar tars the requested path while chrooted to the specified root.
func Tar(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
if options == nil {
options = &archive.TarOptions{}
}
return invokePack(srcPath, options, root)
} }
// CopyFileWithTarAndChown returns a function which copies a single file from outside // CopyFileWithTarAndChown returns a function which copies a single file from outside
@ -99,7 +123,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
var hashWorker sync.WaitGroup var hashWorker sync.WaitGroup
hashWorker.Add(1) hashWorker.Add(1)
go func() { go func() {
t := tar.NewReader(contentReader) t := stdtar.NewReader(contentReader)
_, err := t.Next() _, err := t.Next()
if err != nil { if err != nil {
hashError = err hashError = err

View File

@ -10,10 +10,13 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath"
"runtime" "runtime"
"strings"
"github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/reexec"
"github.com/pkg/errors"
) )
// untar is the entry-point for storage-untar on re-exec. This is not used on // untar is the entry-point for storage-untar on re-exec. This is not used on
@ -23,18 +26,28 @@ func untar() {
runtime.LockOSThread() runtime.LockOSThread()
flag.Parse() flag.Parse()
var options *archive.TarOptions var options archive.TarOptions
//read the options from the pipe "ExtraFiles" //read the options from the pipe "ExtraFiles"
if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
fatal(err) fatal(err)
} }
if err := chroot(flag.Arg(0)); err != nil { dst := flag.Arg(0)
var root string
if len(flag.Args()) > 1 {
root = flag.Arg(1)
}
if root == "" {
root = dst
}
if err := chroot(root); err != nil {
fatal(err) fatal(err)
} }
if err := archive.Unpack(os.Stdin, "/", options); err != nil { if err := archive.Unpack(os.Stdin, dst, &options); err != nil {
fatal(err) fatal(err)
} }
// fully consume stdin in case it is zero padded // fully consume stdin in case it is zero padded
@ -45,7 +58,10 @@ func untar() {
os.Exit(0) os.Exit(0)
} }
func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
if root == "" {
return errors.New("must specify a root to chroot to")
}
// We can't pass a potentially large exclude list directly via cmd line // We can't pass a potentially large exclude list directly via cmd line
// because we easily overrun the kernel's max argument/environment size // because we easily overrun the kernel's max argument/environment size
@ -57,7 +73,21 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
return fmt.Errorf("Untar pipe failure: %v", err) return fmt.Errorf("Untar pipe failure: %v", err)
} }
cmd := reexec.Command("storage-untar", dest) if root != "" {
relDest, err := filepath.Rel(root, dest)
if err != nil {
return err
}
if relDest == "." {
relDest = "/"
}
if relDest[0] != '/' {
relDest = "/" + relDest
}
dest = relDest
}
cmd := reexec.Command("storage-untar", dest, root)
cmd.Stdin = decompressedArchive cmd.Stdin = decompressedArchive
cmd.ExtraFiles = append(cmd.ExtraFiles, r) cmd.ExtraFiles = append(cmd.ExtraFiles, r)
@ -68,6 +98,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
if err := cmd.Start(); err != nil { if err := cmd.Start(); err != nil {
return fmt.Errorf("Untar error on re-exec cmd: %v", err) return fmt.Errorf("Untar error on re-exec cmd: %v", err)
} }
//write the options to the pipe for the untar exec to read //write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil { if err := json.NewEncoder(w).Encode(options); err != nil {
return fmt.Errorf("Untar json encode to pipe failed: %v", err) return fmt.Errorf("Untar json encode to pipe failed: %v", err)
@ -84,3 +115,92 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
} }
return nil return nil
} }
func tar() {
runtime.LockOSThread()
flag.Parse()
src := flag.Arg(0)
var root string
if len(flag.Args()) > 1 {
root = flag.Arg(1)
}
if root == "" {
root = src
}
if err := realChroot(root); err != nil {
fatal(err)
}
var options archive.TarOptions
if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil {
fatal(err)
}
rdr, err := archive.TarWithOptions(src, &options)
if err != nil {
fatal(err)
}
defer rdr.Close()
if _, err := io.Copy(os.Stdout, rdr); err != nil {
fatal(err)
}
os.Exit(0)
}
func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
if root == "" {
return nil, errors.New("root path must not be empty")
}
relSrc, err := filepath.Rel(root, srcPath)
if err != nil {
return nil, err
}
if relSrc == "." {
relSrc = "/"
}
if relSrc[0] != '/' {
relSrc = "/" + relSrc
}
// make sure we didn't trim a trailing slash with the call to `Rel`
if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") {
relSrc += "/"
}
cmd := reexec.Command("storage-tar", relSrc, root)
errBuff := bytes.NewBuffer(nil)
cmd.Stderr = errBuff
tarR, tarW := io.Pipe()
cmd.Stdout = tarW
stdin, err := cmd.StdinPipe()
if err != nil {
return nil, errors.Wrap(err, "error getting options pipe for tar process")
}
if err := cmd.Start(); err != nil {
return nil, errors.Wrap(err, "tar error on re-exec cmd")
}
go func() {
err := cmd.Wait()
err = errors.Wrapf(err, "error processing tar file: %s", errBuff)
tarW.CloseWithError(err)
}()
if err := json.NewEncoder(stdin).Encode(options); err != nil {
stdin.Close()
return nil, errors.Wrap(err, "tar json encode to pipe failed")
}
stdin.Close()
return tarR, nil
}

View File

@ -14,9 +14,16 @@ func chroot(path string) error {
func invokeUnpack(decompressedArchive io.ReadCloser, func invokeUnpack(decompressedArchive io.ReadCloser,
dest string, dest string,
options *archive.TarOptions) error { options *archive.TarOptions, root string) error {
// Windows is different to Linux here because Windows does not support // Windows is different to Linux here because Windows does not support
// chroot. Hence there is no point sandboxing a chrooted process to // chroot. Hence there is no point sandboxing a chrooted process to
// do the unpack. We call inline instead within the daemon process. // do the unpack. We call inline instead within the daemon process.
return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
} }
func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
// Windows is different to Linux here because Windows does not support
// chroot. Hence there is no point sandboxing a chrooted process to
// do the pack. We call inline instead within the daemon process.
return archive.TarWithOptions(srcPath, options)
}

View File

@ -4,9 +4,13 @@ package chrootarchive
import "golang.org/x/sys/unix" import "golang.org/x/sys/unix"
func chroot(path string) error { func realChroot(path string) error {
if err := unix.Chroot(path); err != nil { if err := unix.Chroot(path); err != nil {
return err return err
} }
return unix.Chdir("/") return unix.Chdir("/")
} }
func chroot(path string) error {
return realChroot(path)
}

View File

@ -14,6 +14,7 @@ import (
func init() { func init() {
reexec.Register("storage-applyLayer", applyLayer) reexec.Register("storage-applyLayer", applyLayer)
reexec.Register("storage-untar", untar) reexec.Register("storage-untar", untar)
reexec.Register("storage-tar", tar)
} }
func fatal(err error) { func fatal(err error) {

View File

@ -58,3 +58,15 @@ func Stat(path string) (*StatT, error) {
} }
return fromStatT(s) return fromStatT(s)
} }
// Fstat takes an open file descriptor and returns
// a system.StatT type pertaining to that file.
//
// Throws an error if the file descriptor is invalid
func Fstat(fd int) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Fstat(fd, s); err != nil {
return nil, err
}
return fromStatT(s)
}