vendor: update c/storage

Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
This commit is contained in:
Giuseppe Scrivano
2024-07-01 11:35:39 +02:00
parent 5cc57427f3
commit 5e156c424f
54 changed files with 1272 additions and 1170 deletions

View File

@@ -70,6 +70,8 @@ type (
}
)
const PaxSchilyXattr = "SCHILY.xattr."
const (
tarExt = "tar"
solaris = "solaris"
@@ -169,10 +171,17 @@ func DetectCompression(source []byte) Compression {
}
// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
func DecompressStream(archive io.Reader) (_ io.ReadCloser, Err error) {
p := pools.BufioReader32KPool
buf := p.Get(archive)
bs, err := buf.Peek(10)
defer func() {
if Err != nil {
p.Put(buf)
}
}()
if err != nil && err != io.EOF {
// Note: we'll ignore any io.EOF error because there are some odd
// cases where the layer.tar file will be empty (zero bytes) and
@@ -189,6 +198,12 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
readBufWrapper := p.NewReadCloserWrapper(buf, buf)
return readBufWrapper, nil
case Gzip:
cleanup := func() {
p.Put(buf)
}
if rc, canUse := tryProcFilter([]string{"pigz", "-d"}, buf, cleanup); canUse {
return rc, nil
}
gzReader, err := gzip.NewReader(buf)
if err != nil {
return nil, err
@@ -207,6 +222,12 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
return readBufWrapper, nil
case Zstd:
cleanup := func() {
p.Put(buf)
}
if rc, canUse := tryProcFilter([]string{"zstd", "-d"}, buf, cleanup); canUse {
return rc, nil
}
return zstdReader(buf)
default:
return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
@@ -214,9 +235,16 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
}
// CompressStream compresses the dest with specified compression algorithm.
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
func CompressStream(dest io.Writer, compression Compression) (_ io.WriteCloser, Err error) {
p := pools.BufioWriter32KPool
buf := p.Get(dest)
defer func() {
if Err != nil {
p.Put(buf)
}
}()
switch compression {
case Uncompressed:
writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
@@ -391,11 +419,11 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro
return hdr, nil
}
// ReadSecurityXattrToTarHeader reads security.capability, security,image
// readSecurityXattrToTarHeader reads security.capability, security,image
// xattrs from filesystem to a tar header
func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
if hdr.Xattrs == nil {
hdr.Xattrs = make(map[string]string)
func readSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
if hdr.PAXRecords == nil {
hdr.PAXRecords = make(map[string]string)
}
for _, xattr := range []string{"security.capability", "security.ima"} {
capability, err := system.Lgetxattr(path, xattr)
@@ -403,14 +431,14 @@ func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
return fmt.Errorf("failed to read %q attribute from %q: %w", xattr, path, err)
}
if capability != nil {
hdr.Xattrs[xattr] = string(capability)
hdr.PAXRecords[PaxSchilyXattr+xattr] = string(capability)
}
}
return nil
}
// ReadUserXattrToTarHeader reads user.* xattr from filesystem to a tar header
func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error {
// readUserXattrToTarHeader reads user.* xattr from filesystem to a tar header
func readUserXattrToTarHeader(path string, hdr *tar.Header) error {
xattrs, err := system.Llistxattr(path)
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
return err
@@ -425,10 +453,10 @@ func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error {
}
return err
}
if hdr.Xattrs == nil {
hdr.Xattrs = make(map[string]string)
if hdr.PAXRecords == nil {
hdr.PAXRecords = make(map[string]string)
}
hdr.Xattrs[key] = string(value)
hdr.PAXRecords[PaxSchilyXattr+key] = string(value)
}
}
return nil
@@ -516,10 +544,10 @@ func (ta *tarAppender) addTarFile(path, name string) error {
if err != nil {
return err
}
if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
if err := readSecurityXattrToTarHeader(path, hdr); err != nil {
return err
}
if err := ReadUserXattrToTarHeader(path, hdr); err != nil {
if err := readUserXattrToTarHeader(path, hdr); err != nil {
return err
}
if err := ReadFileFlagsToTarHeader(path, hdr); err != nil {
@@ -642,7 +670,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
}
case tar.TypeReg, tar.TypeRegA:
case tar.TypeReg:
// Source is regular file. We use system.OpenFileSequential to use sequential
// file access to avoid depleting the standby list on Windows.
// On Linux, this equates to a regular os.OpenFile
@@ -701,8 +729,11 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") {
value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&0o7777)
if err := system.Lsetxattr(path, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil {
value := idtools.Stat{
IDs: idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid},
Mode: hdrInfo.Mode() & 0o7777,
}
if err := idtools.SetContainersOverrideXattr(path, value); err != nil {
return err
}
}
@@ -753,11 +784,15 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
var errs []string
for key, value := range hdr.Xattrs {
if _, found := xattrsToIgnore[key]; found {
for key, value := range hdr.PAXRecords {
xattrKey, ok := strings.CutPrefix(key, PaxSchilyXattr)
if !ok {
continue
}
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
if _, found := xattrsToIgnore[xattrKey]; found {
continue
}
if err := system.Lsetxattr(path, xattrKey, []byte(value), 0); err != nil {
if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) {
// We ignore errors here because not all graphdrivers support
// xattrs *cough* old versions of AUFS *cough*. However only
@@ -1113,9 +1148,14 @@ loop:
}
}
if options.ForceMask != nil && rootHdr != nil {
value := fmt.Sprintf("%d:%d:0%o", rootHdr.Uid, rootHdr.Gid, rootHdr.Mode)
if err := system.Lsetxattr(dest, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil {
if options.ForceMask != nil {
value := idtools.Stat{Mode: 0o755}
if rootHdr != nil {
value.IDs.UID = rootHdr.Uid
value.IDs.GID = rootHdr.Gid
value.Mode = os.FileMode(rootHdr.Mode)
}
if err := idtools.SetContainersOverrideXattr(dest, value); err != nil {
return err
}
}
@@ -1337,7 +1377,7 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id
}
} else if runtime.GOOS == darwin {
uid, gid = hdr.Uid, hdr.Gid
if xstat, ok := hdr.Xattrs[idtools.ContainersOverrideXattr]; ok {
if xstat, ok := hdr.PAXRecords[PaxSchilyXattr+idtools.ContainersOverrideXattr]; ok {
attrs := strings.Split(string(xstat), ":")
if len(attrs) == 3 {
val, err := strconv.ParseUint(attrs[0], 10, 32)

View File

@@ -1,5 +1,5 @@
//go:build freebsd || darwin
// +build freebsd darwin
//go:build netbsd || freebsd || darwin
// +build netbsd freebsd darwin
package archive

View File

@@ -48,8 +48,8 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi
return nil, err
}
if len(opaque) == 1 && opaque[0] == 'y' {
if hdr.Xattrs != nil {
delete(hdr.Xattrs, getOverlayOpaqueXattrName())
if hdr.PAXRecords != nil {
delete(hdr.PAXRecords, PaxSchilyXattr+getOverlayOpaqueXattrName())
}
// If there are no lower layers, then it can't have been deleted in this layer.
if len(o.rolayers) == 0 {

View File

@@ -31,9 +31,9 @@ func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.Sta
ownerChanged ||
oldStat.Rdev() != newStat.Rdev() ||
oldStat.Flags() != newStat.Flags() ||
!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) ||
// Don't look at size for dirs, its not a good measure of change
(oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
((oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR) && (oldStat.Size() != newStat.Size())) {
return true
}
return false

View File

@@ -0,0 +1,55 @@
package archive
import (
"bytes"
"fmt"
"io"
"os/exec"
"strings"
"sync"
)
var filterPath sync.Map
func getFilterPath(name string) string {
path, ok := filterPath.Load(name)
if ok {
return path.(string)
}
path, err := exec.LookPath(name)
if err != nil {
path = ""
}
filterPath.Store(name, path)
return path.(string)
}
// tryProcFilter tries to run the command specified in args, passing input to its stdin and returning its stdout.
// cleanup() is a caller provided function that will be called when the command finishes running, regardless of
// whether it succeeds or fails.
// If the command is not found, it returns (nil, false) and the cleanup function is not called.
func tryProcFilter(args []string, input io.Reader, cleanup func()) (io.ReadCloser, bool) {
path := getFilterPath(args[0])
if path == "" {
return nil, false
}
var stderrBuf bytes.Buffer
r, w := io.Pipe()
cmd := exec.Command(path, args[1:]...)
cmd.Stdin = input
cmd.Stdout = w
cmd.Stderr = &stderrBuf
go func() {
err := cmd.Run()
if err != nil && stderrBuf.Len() > 0 {
err = fmt.Errorf("%s: %w", strings.TrimRight(stderrBuf.String(), "\n"), err)
}
w.CloseWithError(err) // CloseWithErr(nil) == Close()
cleanup()
}()
return r, true
}