mirror of
https://github.com/containers/podman.git
synced 2025-12-18 05:32:53 +08:00
vendor: update c/{buildah,common,image,storage} to main
Signed-off-by: Paul Holzinger <pholzing@redhat.com>
This commit is contained in:
414
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
414
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
@@ -528,11 +528,29 @@ func canonicalTarName(name string, isDir bool) (string, error) {
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// addFile adds a file from `path` as `name` to the tar archive.
|
||||
func (ta *tarWriter) addFile(path, name string) error {
|
||||
type addFileData struct {
|
||||
// The path from which to read contents.
|
||||
path string
|
||||
|
||||
// os.Stat for the above.
|
||||
fi os.FileInfo
|
||||
|
||||
// The file header of the above.
|
||||
hdr *tar.Header
|
||||
|
||||
// if present, an extra whiteout entry to write after the header.
|
||||
extraWhiteout *tar.Header
|
||||
}
|
||||
|
||||
// prepareAddFile generates the tar file header(s) for adding a file
|
||||
// from path as name to the tar archive, without writing to the
|
||||
// tar stream. Thus, any error may be ignored without corrupting the
|
||||
// tar file. A (nil, nil) return means that the file should be
|
||||
// ignored for non-error reasons.
|
||||
func (ta *tarWriter) prepareAddFile(path, name string) (*addFileData, error) {
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var link string
|
||||
@@ -540,26 +558,26 @@ func (ta *tarWriter) addFile(path, name string) error {
|
||||
var err error
|
||||
link, err = os.Readlink(path)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if fi.Mode()&os.ModeSocket != 0 {
|
||||
logrus.Infof("archive: skipping %q since it is a socket", path)
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
hdr, err := FileInfoHeader(name, fi, link)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if err := readSecurityXattrToTarHeader(path, hdr); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if err := readUserXattrToTarHeader(path, hdr); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if err := ReadFileFlagsToTarHeader(path, hdr); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if ta.CopyPass {
|
||||
copyPassHeader(hdr)
|
||||
@@ -568,18 +586,13 @@ func (ta *tarWriter) addFile(path, name string) error {
|
||||
// if it's not a directory and has more than 1 link,
|
||||
// it's hard linked, so set the type flag accordingly
|
||||
if !fi.IsDir() && hasHardlinks(fi) {
|
||||
inode, err := getInodeFromStat(fi.Sys())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
inode := getInodeFromStat(fi.Sys())
|
||||
// a link should have a name that it links too
|
||||
// and that linked name should be first in the tar archive
|
||||
if oldpath, ok := ta.SeenFiles[inode]; ok {
|
||||
hdr.Typeflag = tar.TypeLink
|
||||
hdr.Linkname = oldpath
|
||||
hdr.Size = 0 // This Must be here for the writer math to add up!
|
||||
} else {
|
||||
ta.SeenFiles[inode] = name
|
||||
hdr.Size = 0 // This must be here for the writer math to add up!
|
||||
}
|
||||
}
|
||||
|
||||
@@ -589,11 +602,11 @@ func (ta *tarWriter) addFile(path, name string) error {
|
||||
if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() {
|
||||
fileIDPair, err := getFileUIDGID(fi.Sys())
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -616,26 +629,48 @@ func (ta *tarWriter) addFile(path, name string) error {
|
||||
|
||||
maybeTruncateHeaderModTime(hdr)
|
||||
|
||||
result := &addFileData{
|
||||
path: path,
|
||||
hdr: hdr,
|
||||
fi: fi,
|
||||
}
|
||||
if ta.WhiteoutConverter != nil {
|
||||
wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
|
||||
// The WhiteoutConverter suggests a generic mechanism,
|
||||
// but this code is only used to convert between
|
||||
// overlayfs (on-disk) and AUFS (in the tar file)
|
||||
// whiteouts, and is initiated because the overlayfs
|
||||
// storage driver returns OverlayWhiteoutFormat from
|
||||
// Driver.getWhiteoutFormat().
|
||||
//
|
||||
// For AUFS, a directory with all its contents deleted
|
||||
// should be represented as a directory containing a
|
||||
// magic whiteout empty regular file, hence the
|
||||
// extraWhiteout header returned here.
|
||||
result.extraWhiteout, err = ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// addFile performs the write. An error here corrupts the tar file.
|
||||
func (ta *tarWriter) addFile(headers *addFileData) error {
|
||||
hdr := headers.hdr
|
||||
if headers.extraWhiteout != nil {
|
||||
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
|
||||
// If we write hdr with hdr.Size > 0, we have
|
||||
// to write the body before we can write the
|
||||
// extraWhiteout header. This can only happen
|
||||
// if the contract for WhiteoutConverter is
|
||||
// not honored, so bail out.
|
||||
return fmt.Errorf("tar: cannot use extra whiteout with non-empty file %s", hdr.Name)
|
||||
}
|
||||
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If a new whiteout file exists, write original hdr, then
|
||||
// replace hdr with wo to be written after. Whiteouts should
|
||||
// always be written after the original. Note the original
|
||||
// hdr may have been updated to be a whiteout with returning
|
||||
// a whiteout header
|
||||
if wo != nil {
|
||||
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
|
||||
return fmt.Errorf("tar: cannot use whiteout for non-empty file")
|
||||
}
|
||||
hdr = wo
|
||||
}
|
||||
hdr = headers.extraWhiteout
|
||||
}
|
||||
|
||||
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
||||
@@ -643,7 +678,7 @@ func (ta *tarWriter) addFile(path, name string) error {
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
|
||||
file, err := os.Open(path)
|
||||
file, err := os.Open(headers.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -661,6 +696,10 @@ func (ta *tarWriter) addFile(path, name string) error {
|
||||
}
|
||||
}
|
||||
|
||||
if !headers.fi.IsDir() && hasHardlinks(headers.fi) {
|
||||
ta.SeenFiles[getInodeFromStat(headers.fi.Sys())] = headers.hdr.Name
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -853,184 +892,189 @@ func extractTarFileEntry(path, extractDir string, hdr *tar.Header, reader io.Rea
|
||||
}
|
||||
|
||||
// Tar creates an archive from the directory at `path`, and returns it as a
|
||||
// stream of bytes.
|
||||
// stream of bytes. This is a convenience wrapper for [TarWithOptions].
|
||||
func Tar(path string, compression Compression) (io.ReadCloser, error) {
|
||||
return TarWithOptions(path, &TarOptions{Compression: compression})
|
||||
}
|
||||
|
||||
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
|
||||
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
|
||||
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
|
||||
tarWithOptionsTo := func(dest io.WriteCloser, srcPath string, options *TarOptions) (result error) {
|
||||
// Fix the source path to work with long path names. This is a no-op
|
||||
// on platforms other than Windows.
|
||||
srcPath = fixVolumePathPrefix(srcPath)
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil && result == nil {
|
||||
result = err
|
||||
}
|
||||
}()
|
||||
func tarWithOptionsTo(dest io.WriteCloser, srcPath string, options *TarOptions) (result error) {
|
||||
// Fix the source path to work with long path names. This is a no-op
|
||||
// on platforms other than Windows.
|
||||
srcPath = fixVolumePathPrefix(srcPath)
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil && result == nil {
|
||||
result = err
|
||||
}
|
||||
}()
|
||||
|
||||
pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
|
||||
if err != nil {
|
||||
return err
|
||||
pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
compressWriter, err := CompressStream(dest, options.Compression)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ta := newTarWriter(
|
||||
idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
|
||||
compressWriter,
|
||||
options.ChownOpts,
|
||||
options.Timestamp,
|
||||
)
|
||||
ta.WhiteoutConverter = GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
|
||||
ta.CopyPass = options.CopyPass
|
||||
|
||||
includeFiles := options.IncludeFiles
|
||||
defer func() {
|
||||
if err := compressWriter.Close(); err != nil && result == nil {
|
||||
result = err
|
||||
}
|
||||
}()
|
||||
|
||||
// this buffer is needed for the duration of this piped stream
|
||||
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
||||
|
||||
// In general we log errors here but ignore them because
|
||||
// during e.g. a diff operation the container can continue
|
||||
// mutating the filesystem and we can see transient errors
|
||||
// from this
|
||||
|
||||
stat, err := os.Lstat(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !stat.IsDir() {
|
||||
// We can't later join a non-dir with any includes because the
|
||||
// 'walk' will error if "file/." is stat-ed and "file" is not a
|
||||
// directory. So, we must split the source path and use the
|
||||
// basename as the include.
|
||||
if len(includeFiles) > 0 {
|
||||
logrus.Warn("Tar: Can't archive a file with includes")
|
||||
}
|
||||
|
||||
compressWriter, err := CompressStream(dest, options.Compression)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dir, base := SplitPathDirEntry(srcPath)
|
||||
srcPath = dir
|
||||
includeFiles = []string{base}
|
||||
}
|
||||
|
||||
ta := newTarWriter(
|
||||
idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
|
||||
compressWriter,
|
||||
options.ChownOpts,
|
||||
options.Timestamp,
|
||||
)
|
||||
ta.WhiteoutConverter = GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
|
||||
ta.CopyPass = options.CopyPass
|
||||
if len(includeFiles) == 0 {
|
||||
includeFiles = []string{"."}
|
||||
}
|
||||
|
||||
includeFiles := options.IncludeFiles
|
||||
defer func() {
|
||||
if err := compressWriter.Close(); err != nil && result == nil {
|
||||
result = err
|
||||
}
|
||||
}()
|
||||
seen := make(map[string]bool)
|
||||
|
||||
// this buffer is needed for the duration of this piped stream
|
||||
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
||||
for _, include := range includeFiles {
|
||||
rebaseName := options.RebaseNames[include]
|
||||
|
||||
// In general we log errors here but ignore them because
|
||||
// during e.g. a diff operation the container can continue
|
||||
// mutating the filesystem and we can see transient errors
|
||||
// from this
|
||||
|
||||
stat, err := os.Lstat(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !stat.IsDir() {
|
||||
// We can't later join a non-dir with any includes because the
|
||||
// 'walk' will error if "file/." is stat-ed and "file" is not a
|
||||
// directory. So, we must split the source path and use the
|
||||
// basename as the include.
|
||||
if len(includeFiles) > 0 {
|
||||
logrus.Warn("Tar: Can't archive a file with includes")
|
||||
walkRoot := getWalkRoot(srcPath, include)
|
||||
if err := filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
dir, base := SplitPathDirEntry(srcPath)
|
||||
srcPath = dir
|
||||
includeFiles = []string{base}
|
||||
}
|
||||
relFilePath, err := filepath.Rel(srcPath, filePath)
|
||||
if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) {
|
||||
// Error getting relative path OR we are looking
|
||||
// at the source directory path. Skip in both situations.
|
||||
return nil //nolint: nilerr
|
||||
}
|
||||
|
||||
if len(includeFiles) == 0 {
|
||||
includeFiles = []string{"."}
|
||||
}
|
||||
if options.IncludeSourceDir && include == "." && relFilePath != "." {
|
||||
relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
|
||||
}
|
||||
|
||||
seen := make(map[string]bool)
|
||||
skip := false
|
||||
|
||||
for _, include := range includeFiles {
|
||||
rebaseName := options.RebaseNames[include]
|
||||
|
||||
walkRoot := getWalkRoot(srcPath, include)
|
||||
if err := filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error {
|
||||
// If "include" is an exact match for the current file
|
||||
// then even if there's an "excludePatterns" pattern that
|
||||
// matches it, don't skip it. IOW, assume an explicit 'include'
|
||||
// is asking for that file no matter what - which is true
|
||||
// for some files, like .dockerignore and Dockerfile (sometimes)
|
||||
if include != relFilePath {
|
||||
matches, err := pm.IsMatch(relFilePath)
|
||||
if err != nil {
|
||||
logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
|
||||
return fmt.Errorf("matching %s: %w", relFilePath, err)
|
||||
}
|
||||
skip = matches
|
||||
}
|
||||
|
||||
if skip {
|
||||
// If we want to skip this file and its a directory
|
||||
// then we should first check to see if there's an
|
||||
// excludes pattern (e.g. !dir/file) that starts with this
|
||||
// dir. If so then we can't skip this dir.
|
||||
|
||||
// Its not a dir then so we can just return/skip.
|
||||
if !d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
relFilePath, err := filepath.Rel(srcPath, filePath)
|
||||
if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) {
|
||||
// Error getting relative path OR we are looking
|
||||
// at the source directory path. Skip in both situations.
|
||||
return nil //nolint: nilerr
|
||||
}
|
||||
|
||||
if options.IncludeSourceDir && include == "." && relFilePath != "." {
|
||||
relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
|
||||
}
|
||||
|
||||
skip := false
|
||||
|
||||
// If "include" is an exact match for the current file
|
||||
// then even if there's an "excludePatterns" pattern that
|
||||
// matches it, don't skip it. IOW, assume an explicit 'include'
|
||||
// is asking for that file no matter what - which is true
|
||||
// for some files, like .dockerignore and Dockerfile (sometimes)
|
||||
if include != relFilePath {
|
||||
matches, err := pm.IsMatch(relFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("matching %s: %w", relFilePath, err)
|
||||
}
|
||||
skip = matches
|
||||
}
|
||||
|
||||
if skip {
|
||||
// If we want to skip this file and its a directory
|
||||
// then we should first check to see if there's an
|
||||
// excludes pattern (e.g. !dir/file) that starts with this
|
||||
// dir. If so then we can't skip this dir.
|
||||
|
||||
// Its not a dir then so we can just return/skip.
|
||||
if !d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// No exceptions (!...) in patterns so just skip dir
|
||||
if !pm.Exclusions() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
dirSlash := relFilePath + string(filepath.Separator)
|
||||
|
||||
for _, pat := range pm.Patterns() {
|
||||
if !pat.Exclusion() {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
|
||||
// found a match - so can't skip this dir
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// No matching exclusion dir so just skip dir
|
||||
// No exceptions (!...) in patterns so just skip dir
|
||||
if !pm.Exclusions() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
if seen[relFilePath] {
|
||||
return nil
|
||||
}
|
||||
seen[relFilePath] = true
|
||||
dirSlash := relFilePath + string(filepath.Separator)
|
||||
|
||||
// Rename the base resource.
|
||||
if rebaseName != "" {
|
||||
var replacement string
|
||||
if rebaseName != string(filepath.Separator) {
|
||||
// Special case the root directory to replace with an
|
||||
// empty string instead so that we don't end up with
|
||||
// double slashes in the paths.
|
||||
replacement = rebaseName
|
||||
for _, pat := range pm.Patterns() {
|
||||
if !pat.Exclusion() {
|
||||
continue
|
||||
}
|
||||
|
||||
relFilePath = strings.Replace(relFilePath, include, replacement, 1)
|
||||
}
|
||||
|
||||
if err := ta.addFile(filePath, relFilePath); err != nil {
|
||||
logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
|
||||
// if pipe is broken, stop writing tar stream to it
|
||||
if err == io.ErrClosedPipe {
|
||||
return err
|
||||
if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
|
||||
// found a match - so can't skip this dir
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
|
||||
// No matching exclusion dir so just skip dir
|
||||
return filepath.SkipDir
|
||||
}
|
||||
}
|
||||
return ta.TarWriter.Close()
|
||||
}
|
||||
|
||||
if seen[relFilePath] {
|
||||
return nil
|
||||
}
|
||||
seen[relFilePath] = true
|
||||
|
||||
// Rename the base resource.
|
||||
if rebaseName != "" {
|
||||
var replacement string
|
||||
if rebaseName != string(filepath.Separator) {
|
||||
// Special case the root directory to replace with an
|
||||
// empty string instead so that we don't end up with
|
||||
// double slashes in the paths.
|
||||
replacement = rebaseName
|
||||
}
|
||||
|
||||
relFilePath = strings.Replace(relFilePath, include, replacement, 1)
|
||||
}
|
||||
|
||||
headers, err := ta.prepareAddFile(filePath, relFilePath)
|
||||
if err != nil {
|
||||
logrus.Errorf("Can't add file %s to tar: %s; skipping", filePath, err)
|
||||
} else if headers != nil {
|
||||
if err := ta.addFile(headers); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return ta.TarWriter.Close()
|
||||
}
|
||||
|
||||
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
|
||||
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
|
||||
//
|
||||
// If used on a file system being modified concurrently,
|
||||
// TarWithOptions will create a valid tar archive, but may leave out
|
||||
// some files.
|
||||
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
go func() {
|
||||
err := tarWithOptionsTo(pipeWriter, srcPath, options)
|
||||
@@ -1446,7 +1490,7 @@ func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
|
||||
if _, err := io.Copy(f, src); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := f.Seek(0, 0); err != nil {
|
||||
if _, err := f.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st, err := f.Stat()
|
||||
|
||||
Reference in New Issue
Block a user