github.com/containers/storage v1.12.13

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
Daniel J Walsh
2019-08-01 03:46:14 -04:00
parent 39de184b8b
commit 9d6dce1199
191 changed files with 15069 additions and 59037 deletions

View File

@@ -36,14 +36,15 @@ type (
// TarOptions wraps the tar options.
TarOptions struct {
IncludeFiles []string
ExcludePatterns []string
Compression Compression
NoLchown bool
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
ChownOpts *idtools.IDPair
IncludeSourceDir bool
IncludeFiles []string
ExcludePatterns []string
Compression Compression
NoLchown bool
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
IgnoreChownErrors bool
ChownOpts *idtools.IDPair
IncludeSourceDir bool
// WhiteoutFormat is the expected on disk format for whiteout files.
// This format will be converted to the standard format on pack
// and from the standard format on unpack.
@@ -563,7 +564,7 @@ func (ta *tarAppender) addTarFile(path, name string) error {
return nil
}
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error {
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool) error {
// hdr.Mode is in linux format, which we can use for sycalls,
// but for os.Foo() calls we need the mode converted to os.FileMode,
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
@@ -645,8 +646,13 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
if chownOpts == nil {
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
}
if err := idtools.SafeLchown(path, chownOpts.UID, chownOpts.GID); err != nil {
return err
err := idtools.SafeLchown(path, chownOpts.UID, chownOpts.GID)
if err != nil {
if ignoreChownErrors {
fmt.Fprintf(os.Stderr, "Chown error detected. Ignoring due to ignoreChownErrors flag: %v\n", err)
} else {
return err
}
}
}
@@ -993,7 +999,7 @@ loop:
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
}
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS); err != nil {
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS, options.IgnoreChownErrors); err != nil {
return err
}

View File

@@ -1,17 +0,0 @@
// +build cgo
package archive
import (
"io"
"github.com/DataDog/zstd"
)
func zstdReader(buf io.Reader) (io.ReadCloser, error) {
return zstd.NewReader(buf), nil
}
func zstdWriter(dest io.Writer) (io.WriteCloser, error) {
return zstd.NewWriter(dest), nil
}

View File

@@ -445,6 +445,11 @@ func (j *TarOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
} else {
buf.WriteString(`null`)
}
if j.IgnoreChownErrors {
buf.WriteString(`,"IgnoreChownErrors":true`)
} else {
buf.WriteString(`,"IgnoreChownErrors":false`)
}
if j.ChownOpts != nil {
/* Struct fall back. type=idtools.IDPair kind=struct */
buf.WriteString(`,"ChownOpts":`)
@@ -516,6 +521,8 @@ const (
ffjtTarOptionsGIDMaps
ffjtTarOptionsIgnoreChownErrors
ffjtTarOptionsChownOpts
ffjtTarOptionsIncludeSourceDir
@@ -545,6 +552,8 @@ var ffjKeyTarOptionsUIDMaps = []byte("UIDMaps")
var ffjKeyTarOptionsGIDMaps = []byte("GIDMaps")
var ffjKeyTarOptionsIgnoreChownErrors = []byte("IgnoreChownErrors")
var ffjKeyTarOptionsChownOpts = []byte("ChownOpts")
var ffjKeyTarOptionsIncludeSourceDir = []byte("IncludeSourceDir")
@@ -663,6 +672,11 @@ mainparse:
state = fflib.FFParse_want_colon
goto mainparse
} else if bytes.Equal(ffjKeyTarOptionsIgnoreChownErrors, kn) {
currentKey = ffjtTarOptionsIgnoreChownErrors
state = fflib.FFParse_want_colon
goto mainparse
} else if bytes.Equal(ffjKeyTarOptionsIncludeSourceDir, kn) {
currentKey = ffjtTarOptionsIncludeSourceDir
state = fflib.FFParse_want_colon
@@ -766,6 +780,12 @@ mainparse:
goto mainparse
}
if fflib.EqualFoldRight(ffjKeyTarOptionsIgnoreChownErrors, kn) {
currentKey = ffjtTarOptionsIgnoreChownErrors
state = fflib.FFParse_want_colon
goto mainparse
}
if fflib.EqualFoldRight(ffjKeyTarOptionsGIDMaps, kn) {
currentKey = ffjtTarOptionsGIDMaps
state = fflib.FFParse_want_colon
@@ -837,6 +857,9 @@ mainparse:
case ffjtTarOptionsGIDMaps:
goto handle_GIDMaps
case ffjtTarOptionsIgnoreChownErrors:
goto handle_IgnoreChownErrors
case ffjtTarOptionsChownOpts:
goto handle_ChownOpts
@@ -1224,6 +1247,41 @@ handle_GIDMaps:
state = fflib.FFParse_after_value
goto mainparse
handle_IgnoreChownErrors:
/* handler: j.IgnoreChownErrors type=bool kind=bool quoted=false*/
{
if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
}
}
{
if tok == fflib.FFTok_null {
} else {
tmpb := fs.Output.Bytes()
if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
j.IgnoreChownErrors = true
} else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
j.IgnoreChownErrors = false
} else {
err = errors.New("unexpected bytes for true/false value")
return fs.WrapErr(err)
}
}
}
state = fflib.FFParse_after_value
goto mainparse
handle_ChownOpts:
/* handler: j.ChownOpts type=idtools.IDPair kind=struct quoted=false*/

View File

@@ -1,16 +0,0 @@
// +build !cgo
package archive
import (
"fmt"
"io"
)
func zstdReader(buf io.Reader) (io.ReadCloser, error) {
return nil, fmt.Errorf("zstd not supported on this platform")
}
func zstdWriter(dest io.Writer) (io.WriteCloser, error) {
return nil, fmt.Errorf("zstd not supported on this platform")
}

View File

@@ -0,0 +1,41 @@
package archive
import (
"io"
"github.com/klauspost/compress/zstd"
)
type wrapperZstdDecoder struct {
decoder *zstd.Decoder
}
func (w *wrapperZstdDecoder) Close() error {
w.decoder.Close()
return nil
}
func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) {
return w.decoder.DecodeAll(input, dst)
}
func (w *wrapperZstdDecoder) Read(p []byte) (int, error) {
return w.decoder.Read(p)
}
func (w *wrapperZstdDecoder) Reset(r io.Reader) error {
return w.decoder.Reset(r)
}
func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) {
return w.decoder.WriteTo(wr)
}
func zstdReader(buf io.Reader) (io.ReadCloser, error) {
decoder, err := zstd.NewReader(buf)
return &wrapperZstdDecoder{decoder: decoder}, err
}
func zstdWriter(dest io.Writer) (io.WriteCloser, error) {
return zstd.NewWriter(dest)
}

View File

@@ -105,7 +105,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
}
defer os.RemoveAll(aufsTempdir)
}
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors); err != nil {
return 0, err
}
}
@@ -196,7 +196,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
return 0, err
}
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil {
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors); err != nil {
return 0, err
}

View File

@@ -73,6 +73,9 @@ type OptionsConfig struct {
RemapUIDs string `toml:"remap-uids"`
// RemapGIDs is a list of default GID mappings to use for layers.
RemapGIDs string `toml:"remap-gids"`
// IgnoreChownErrors is a flag for whether chown errors should be
// ignored when building an image.
IgnoreChownErrors string `toml:"ignore_chown_errors"`
// RemapUser is the name of one or more entries in /etc/subuid which
// should be used to set up default UID mappings.

View File

@@ -0,0 +1,101 @@
package lockfile
import (
"path/filepath"
"sync"
"time"
"github.com/pkg/errors"
)
// A Locker represents a file lock where the file is used to cache an
// identifier of the last party that made changes to whatever's being protected
// by the lock.
type Locker interface {
// Acquire a writer lock.
Lock()
// Acquire a writer lock recursively, allowing for recursive acquisitions
// within the same process space.
RecursiveLock()
// Unlock the lock.
Unlock()
// Acquire a reader lock.
RLock()
// Touch records, for others sharing the lock, that the caller was the
// last writer. It should only be called with the lock held.
Touch() error
// Modified() checks if the most recent writer was a party other than the
// last recorded writer. It should only be called with the lock held.
Modified() (bool, error)
// TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time.
TouchedSince(when time.Time) bool
// IsReadWrite() checks if the lock file is read-write
IsReadWrite() bool
// Locked() checks if lock is locked for writing by a thread in this process
Locked() bool
}
var (
lockfiles map[string]Locker
lockfilesLock sync.Mutex
)
// GetLockfile opens a read-write lock file, creating it if necessary. The
// Locker object may already be locked if the path has already been requested
// by the current process.
func GetLockfile(path string) (Locker, error) {
return getLockfile(path, false)
}
// GetROLockfile opens a read-only lock file, creating it if necessary. The
// Locker object may already be locked if the path has already been requested
// by the current process.
func GetROLockfile(path string) (Locker, error) {
return getLockfile(path, true)
}
// getLockfile returns a Locker object, possibly (depending on the platform)
// working inter-process, and associated with the specified path.
//
// If ro, the lock is a read-write lock and the returned Locker should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation.
//
// WARNING:
// - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive.
func getLockfile(path string, ro bool) (Locker, error) {
lockfilesLock.Lock()
defer lockfilesLock.Unlock()
if lockfiles == nil {
lockfiles = make(map[string]Locker)
}
cleanPath, err := filepath.Abs(path)
if err != nil {
return nil, errors.Wrapf(err, "error ensuring that path %q is an absolute path", path)
}
if locker, ok := lockfiles[cleanPath]; ok {
if ro && locker.IsReadWrite() {
return nil, errors.Errorf("lock %q is not a read-only lock", cleanPath)
}
if !ro && !locker.IsReadWrite() {
return nil, errors.Errorf("lock %q is not a read-write lock", cleanPath)
}
return locker, nil
}
locker, err := createLockerForPath(cleanPath, ro) // platform-dependent locker
if err != nil {
return nil, err
}
lockfiles[cleanPath] = locker
return locker, nil
}

View File

@@ -0,0 +1,255 @@
// +build linux solaris darwin freebsd
package lockfile
import (
"fmt"
"os"
"sync"
"time"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/system"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
type lockfile struct {
// rwMutex serializes concurrent reader-writer acquisitions in the same process space
rwMutex *sync.RWMutex
// stateMutex is used to synchronize concurrent accesses to the state below
stateMutex *sync.Mutex
counter int64
file string
fd uintptr
lw string
locktype int16
locked bool
ro bool
recursive bool
}
// openLock opens the file at path and returns the corresponding file
// descriptor. Note that the path is opened read-only when ro is set. If ro
// is unset, openLock will open the path read-write and create the file if
// necessary.
func openLock(path string, ro bool) (int, error) {
if ro {
return unix.Open(path, os.O_RDONLY, 0)
}
return unix.Open(path, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR)
}
// createLockerForPath returns a Locker object, possibly (depending on the platform)
// working inter-process and associated with the specified path.
//
// This function will be called at most once for each path value within a single process.
//
// If ro, the lock is a read-write lock and the returned Locker should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation.
//
// WARNING:
// - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive.
func createLockerForPath(path string, ro bool) (Locker, error) {
// Check if we can open the lock.
fd, err := openLock(path, ro)
if err != nil {
return nil, errors.Wrapf(err, "error opening %q", path)
}
unix.Close(fd)
locktype := unix.F_WRLCK
if ro {
locktype = unix.F_RDLCK
}
return &lockfile{
stateMutex: &sync.Mutex{},
rwMutex: &sync.RWMutex{},
file: path,
lw: stringid.GenerateRandomID(),
locktype: int16(locktype),
locked: false,
ro: ro}, nil
}
// lock locks the lockfile via FCTNL(2) based on the specified type and
// command.
func (l *lockfile) lock(l_type int16, recursive bool) {
lk := unix.Flock_t{
Type: l_type,
Whence: int16(os.SEEK_SET),
Start: 0,
Len: 0,
}
switch l_type {
case unix.F_RDLCK:
l.rwMutex.RLock()
case unix.F_WRLCK:
if recursive {
// NOTE: that's okay as recursive is only set in RecursiveLock(), so
// there's no need to protect against hypothetical RDLCK cases.
l.rwMutex.RLock()
} else {
l.rwMutex.Lock()
}
default:
panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", l_type))
}
l.stateMutex.Lock()
defer l.stateMutex.Unlock()
if l.counter == 0 {
// If we're the first reference on the lock, we need to open the file again.
fd, err := openLock(l.file, l.ro)
if err != nil {
panic(fmt.Sprintf("error opening %q", l.file))
}
unix.CloseOnExec(fd)
l.fd = uintptr(fd)
// Optimization: only use the (expensive) fcntl syscall when
// the counter is 0. In this case, we're either the first
// reader lock or a writer lock.
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
}
l.locktype = l_type
l.locked = true
l.recursive = recursive
l.counter++
}
// Lock locks the lockfile as a writer. Panic if the lock is a read-only one.
func (l *lockfile) Lock() {
if l.ro {
panic("can't take write lock on read-only lock file")
} else {
l.lock(unix.F_WRLCK, false)
}
}
// RecursiveLock locks the lockfile as a writer but allows for recursive
// acquisitions within the same process space. Note that RLock() will be called
// if it's a lockTypReader lock.
func (l *lockfile) RecursiveLock() {
if l.ro {
l.RLock()
} else {
l.lock(unix.F_WRLCK, true)
}
}
// LockRead locks the lockfile as a reader.
func (l *lockfile) RLock() {
l.lock(unix.F_RDLCK, false)
}
// Unlock unlocks the lockfile.
func (l *lockfile) Unlock() {
l.stateMutex.Lock()
if l.locked == false {
// Panic when unlocking an unlocked lock. That's a violation
// of the lock semantics and will reveal such.
panic("calling Unlock on unlocked lock")
}
l.counter--
if l.counter < 0 {
// Panic when the counter is negative. There is no way we can
// recover from a corrupted lock and we need to protect the
// storage from corruption.
panic(fmt.Sprintf("lock %q has been unlocked too often", l.file))
}
if l.counter == 0 {
// We should only release the lock when the counter is 0 to
// avoid releasing read-locks too early; a given process may
// acquire a read lock multiple times.
l.locked = false
// Close the file descriptor on the last unlock, releasing the
// file lock.
unix.Close(int(l.fd))
}
if l.locktype == unix.F_RDLCK || l.recursive {
l.rwMutex.RUnlock()
} else {
l.rwMutex.Unlock()
}
l.stateMutex.Unlock()
}
// Locked checks if lockfile is locked for writing by a thread in this process.
func (l *lockfile) Locked() bool {
l.stateMutex.Lock()
defer l.stateMutex.Unlock()
return l.locked && (l.locktype == unix.F_WRLCK)
}
// Touch updates the lock file with the UID of the user.
func (l *lockfile) Touch() error {
l.stateMutex.Lock()
if !l.locked || (l.locktype != unix.F_WRLCK) {
panic("attempted to update last-writer in lockfile without the write lock")
}
l.stateMutex.Unlock()
l.lw = stringid.GenerateRandomID()
id := []byte(l.lw)
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
if err != nil {
return err
}
n, err := unix.Write(int(l.fd), id)
if err != nil {
return err
}
if n != len(id) {
return unix.ENOSPC
}
err = unix.Fsync(int(l.fd))
if err != nil {
return err
}
return nil
}
// Modified indicates if the lockfile has been updated since the last time it
// was loaded.
func (l *lockfile) Modified() (bool, error) {
id := []byte(l.lw)
l.stateMutex.Lock()
if !l.locked {
panic("attempted to check last-writer in lockfile without locking it first")
}
l.stateMutex.Unlock()
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
if err != nil {
return true, err
}
n, err := unix.Read(int(l.fd), id)
if err != nil {
return true, err
}
if n != len(id) {
return true, nil
}
lw := l.lw
l.lw = string(id)
return l.lw != lw, nil
}
// IsReadWriteLock indicates if the lock file is a read-write lock.
func (l *lockfile) IsReadWrite() bool {
return !l.ro
}
// TouchedSince indicates if the lock file has been touched since the specified time
func (l *lockfile) TouchedSince(when time.Time) bool {
st, err := system.Fstat(int(l.fd))
if err != nil {
return true
}
mtim := st.Mtim()
touched := time.Unix(mtim.Unix())
return when.Before(touched)
}

View File

@@ -0,0 +1,75 @@
// +build windows
package lockfile
import (
"os"
"sync"
"time"
)
// createLockerForPath returns a Locker object, possibly (depending on the platform)
// working inter-process and associated with the specified path.
//
// This function will be called at most once for each path value within a single process.
//
// If ro, the lock is a read-write lock and the returned Locker should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation.
//
// WARNING:
// - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive.
func createLockerForPath(path string, ro bool) (Locker, error) {
return &lockfile{locked: false}, nil
}
type lockfile struct {
mu sync.Mutex
file string
locked bool
}
func (l *lockfile) Lock() {
l.mu.Lock()
l.locked = true
}
func (l *lockfile) RecursiveLock() {
// We don't support Windows but a recursive writer-lock in one process-space
// is really a writer lock, so just panic.
panic("not supported")
}
func (l *lockfile) RLock() {
l.mu.Lock()
l.locked = true
}
func (l *lockfile) Unlock() {
l.locked = false
l.mu.Unlock()
}
func (l *lockfile) Locked() bool {
return l.locked
}
func (l *lockfile) Modified() (bool, error) {
return false, nil
}
func (l *lockfile) Touch() error {
return nil
}
func (l *lockfile) IsReadWrite() bool {
return false
}
func (l *lockfile) TouchedSince(when time.Time) bool {
stat, err := os.Stat(l.file)
if err != nil {
return true
}
return when.Before(stat.ModTime())
}

View File

@@ -0,0 +1,47 @@
package tarlog
import (
"archive/tar"
"io"
"os"
"sync"
"github.com/pkg/errors"
)
type tarLogger struct {
writer *os.File
wg sync.WaitGroup
}
// NewLogger returns a writer that, when a tar archive is written to it, calls
// `logger` for each file header it encounters in the archive.
func NewLogger(logger func(*tar.Header)) (io.WriteCloser, error) {
reader, writer, err := os.Pipe()
if err != nil {
return nil, errors.Wrapf(err, "error creating pipe for tar logger")
}
t := &tarLogger{writer: writer}
tr := tar.NewReader(reader)
t.wg.Add(1)
go func() {
hdr, err := tr.Next()
for err == nil {
logger(hdr)
hdr, err = tr.Next()
}
reader.Close()
t.wg.Done()
}()
return t, nil
}
func (t *tarLogger) Write(b []byte) (int, error) {
return t.writer.Write(b)
}
func (t *tarLogger) Close() error {
err := t.writer.Close()
t.wg.Wait()
return err
}