Remove duplication and make consistent usage of the progress bar

Signed-off-by: Mario Loriedo <mario.loriedo@gmail.com>
This commit is contained in:
Mario Loriedo
2024-02-20 13:56:12 +01:00
parent c42d3a74ed
commit 2245cf8dc4
6 changed files with 116 additions and 172 deletions

View File

@ -1,6 +1,7 @@
package compression package compression
import ( import (
"errors"
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
@ -14,19 +15,39 @@ import (
) )
const ( const (
zipExt = ".zip" decompressedFileFlag = os.O_CREATE | os.O_TRUNC | os.O_WRONLY
progressBarPrefix = "Extracting compressed file" macOs = "darwin"
macOs = "darwin" progressBarPrefix = "Extracting compressed file"
zipExt = ".zip"
) )
type decompressor interface { type decompressor interface {
srcFilePath() string compressedFileSize() int64
reader() (io.Reader, error) compressedFileMode() os.FileMode
copy(w *os.File, r io.Reader) error compressedFileReader() (io.ReadCloser, error)
decompress(w io.WriteSeeker, r io.Reader) error
close() close()
} }
func newDecompressor(compressedFilePath string, compressedFileContent []byte) decompressor { func Decompress(compressedVMFile *define.VMFile, decompressedFilePath string) error {
compressedFilePath := compressedVMFile.GetPath()
// Are we reading full image file?
// Only few bytes are read to detect
// the compression type
compressedFileContent, err := compressedVMFile.Read()
if err != nil {
return err
}
var d decompressor
if d, err = newDecompressor(compressedFilePath, compressedFileContent); err != nil {
return err
}
return runDecompression(d, decompressedFilePath)
}
func newDecompressor(compressedFilePath string, compressedFileContent []byte) (decompressor, error) {
compressionType := archive.DetectCompression(compressedFileContent) compressionType := archive.DetectCompression(compressedFileContent)
os := runtime.GOOS os := runtime.GOOS
hasZipSuffix := strings.HasSuffix(compressedFilePath, zipExt) hasZipSuffix := strings.HasSuffix(compressedFilePath, zipExt)
@ -40,6 +61,10 @@ func newDecompressor(compressedFilePath string, compressedFileContent []byte) de
return newZipDecompressor(compressedFilePath) return newZipDecompressor(compressedFilePath)
case compressionType == archive.Uncompressed: case compressionType == archive.Uncompressed:
return newUncompressedDecompressor(compressedFilePath) return newUncompressedDecompressor(compressedFilePath)
// macOS gzipped VM images are sparse. As a result a
// special decompressor is required: it uses crc os.CopySparse
// instead of io.Copy and std lib gzip instead of klauspost/pgzip
// (even if it's slower).
case compressionType == archive.Gzip && os == macOs: case compressionType == archive.Gzip && os == macOs:
return newGzipDecompressor(compressedFilePath) return newGzipDecompressor(compressedFilePath)
default: default:
@ -47,70 +72,42 @@ func newDecompressor(compressedFilePath string, compressedFileContent []byte) de
} }
} }
func Decompress(srcVMFile *define.VMFile, dstFilePath string) error { func runDecompression(d decompressor, decompressedFilePath string) error {
srcFilePath := srcVMFile.GetPath() compressedFileReader, err := d.compressedFileReader()
// Are we reading full image file?
// Only few bytes are read to detect
// the compression type
srcFileContent, err := srcVMFile.Read()
if err != nil {
return err
}
d := newDecompressor(srcFilePath, srcFileContent)
return runDecompression(d, dstFilePath)
}
func runDecompression(d decompressor, dstFilePath string) error {
decompressorReader, err := d.reader()
if err != nil { if err != nil {
return err return err
} }
defer d.close() defer d.close()
stat, err := os.Stat(d.srcFilePath()) initMsg := progressBarPrefix + ": " + filepath.Base(decompressedFilePath)
if err != nil {
return err
}
initMsg := progressBarPrefix + ": " + filepath.Base(dstFilePath)
finalMsg := initMsg + ": done" finalMsg := initMsg + ": done"
// We are getting the compressed file size but p, bar := utils.ProgressBar(initMsg, d.compressedFileSize(), finalMsg)
// the progress bar needs the full size of the
// decompressed file.
// As a result the progress bar shows 100%
// before the decompression completes.
// A workaround is to set the size to -1 but the
// side effect is that we won't see any advancment in
// the bar.
// An update in utils.ProgressBar to handle is needed
// to improve the case of size=-1 (i.e. unkwonw size).
p, bar := utils.ProgressBar(initMsg, stat.Size(), finalMsg)
// Wait for bars to complete and then shut down the bars container // Wait for bars to complete and then shut down the bars container
defer p.Wait() defer p.Wait()
readProxy := bar.ProxyReader(decompressorReader) compressedFileReaderProxy := bar.ProxyReader(compressedFileReader)
// Interrupts the bar goroutine. It's important that // Interrupts the bar goroutine. It's important that
// bar.Abort(false) is called before p.Wait(), otherwise // bar.Abort(false) is called before p.Wait(), otherwise
// can hang. // can hang.
defer bar.Abort(false) defer bar.Abort(false)
dstFileWriter, err := os.OpenFile(dstFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, stat.Mode()) var decompressedFileWriter *os.File
if err != nil {
logrus.Errorf("Unable to open destination file %s for writing: %q", dstFilePath, err) if decompressedFileWriter, err = os.OpenFile(decompressedFilePath, decompressedFileFlag, d.compressedFileMode()); err != nil {
logrus.Errorf("Unable to open destination file %s for writing: %q", decompressedFilePath, err)
return err return err
} }
defer func() { defer func() {
if err := dstFileWriter.Close(); err != nil { if err := decompressedFileWriter.Close(); err != nil && !errors.Is(err, os.ErrClosed) {
logrus.Errorf("Unable to to close destination file %s: %q", dstFilePath, err) logrus.Warnf("Unable to to close destination file %s: %q", decompressedFilePath, err)
} }
}() }()
err = d.copy(dstFileWriter, readProxy) if err = d.decompress(decompressedFileWriter, compressedFileReaderProxy); err != nil {
if err != nil {
logrus.Errorf("Error extracting compressed file: %q", err) logrus.Errorf("Error extracting compressed file: %q", err)
return err return err
} }
return nil return nil
} }

View File

@ -2,6 +2,7 @@ package compression
import ( import (
"io" "io"
"io/fs"
"os" "os"
"github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/pkg/compression"
@ -9,38 +10,48 @@ import (
) )
type genericDecompressor struct { type genericDecompressor struct {
compressedFilePath string compressedFilePath string
compressedFile *os.File compressedFile *os.File
uncompressStream io.ReadCloser decompressedFileReader io.ReadCloser
compressedFileInfo os.FileInfo
} }
func newGenericDecompressor(compressedFilePath string) decompressor { func newGenericDecompressor(compressedFilePath string) (*genericDecompressor, error) {
return &genericDecompressor{ d := &genericDecompressor{}
compressedFilePath: compressedFilePath, d.compressedFilePath = compressedFilePath
} stat, err := os.Stat(d.compressedFilePath)
}
func (d *genericDecompressor) srcFilePath() string {
return d.compressedFilePath
}
func (d *genericDecompressor) reader() (io.Reader, error) {
srcFile, err := os.Open(d.compressedFilePath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
d.compressedFile = srcFile d.compressedFileInfo = stat
return srcFile, nil return d, nil
} }
func (d *genericDecompressor) copy(w *os.File, r io.Reader) error { func (d *genericDecompressor) compressedFileSize() int64 {
uncompressStream, _, err := compression.AutoDecompress(r) return d.compressedFileInfo.Size()
}
func (d *genericDecompressor) compressedFileMode() fs.FileMode {
return d.compressedFileInfo.Mode()
}
func (d *genericDecompressor) compressedFileReader() (io.ReadCloser, error) {
compressedFile, err := os.Open(d.compressedFilePath)
if err != nil {
return nil, err
}
d.compressedFile = compressedFile
return compressedFile, nil
}
func (d *genericDecompressor) decompress(w io.WriteSeeker, r io.Reader) error {
decompressedFileReader, _, err := compression.AutoDecompress(r)
if err != nil { if err != nil {
return err return err
} }
d.uncompressStream = uncompressStream d.decompressedFileReader = decompressedFileReader
_, err = io.Copy(w, uncompressStream) _, err = io.Copy(w, decompressedFileReader)
return err return err
} }
@ -48,7 +59,10 @@ func (d *genericDecompressor) close() {
if err := d.compressedFile.Close(); err != nil { if err := d.compressedFile.Close(); err != nil {
logrus.Errorf("Unable to close compressed file: %q", err) logrus.Errorf("Unable to close compressed file: %q", err)
} }
if err := d.uncompressStream.Close(); err != nil {
logrus.Errorf("Unable to close uncompressed stream: %q", err) if d.decompressedFileReader != nil {
if err := d.decompressedFileReader.Close(); err != nil {
logrus.Errorf("Unable to close uncompressed stream: %q", err)
}
} }
} }

View File

@ -3,54 +3,34 @@ package compression
import ( import (
"compress/gzip" "compress/gzip"
"io" "io"
"os"
crcOs "github.com/crc-org/crc/v2/pkg/os" crcOs "github.com/crc-org/crc/v2/pkg/os"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
type gzDecompressor struct { type gzipDecompressor struct {
compressedFilePath string genericDecompressor
compressedFile *os.File gzReader io.ReadCloser
gzReader *gzip.Reader
} }
func newGzipDecompressor(compressedFilePath string) decompressor { func newGzipDecompressor(compressedFilePath string) (*gzipDecompressor, error) {
return &gzDecompressor{ d, err := newGenericDecompressor(compressedFilePath)
compressedFilePath: compressedFilePath, return &gzipDecompressor{*d, nil}, err
}
} }
func (d *gzDecompressor) srcFilePath() string { func (d *gzipDecompressor) decompress(w io.WriteSeeker, r io.Reader) error {
return d.compressedFilePath gzReader, err := gzip.NewReader(r)
}
func (d *gzDecompressor) reader() (io.Reader, error) {
srcFile, err := os.Open(d.compressedFilePath)
if err != nil { if err != nil {
return nil, err return err
}
d.compressedFile = srcFile
gzReader, err := gzip.NewReader(srcFile)
if err != nil {
return gzReader, err
} }
d.gzReader = gzReader d.gzReader = gzReader
_, err = crcOs.CopySparse(w, gzReader)
return gzReader, nil
}
func (*gzDecompressor) copy(w *os.File, r io.Reader) error {
_, err := crcOs.CopySparse(w, r)
return err return err
} }
func (d *gzDecompressor) close() { func (d *gzipDecompressor) close() {
if err := d.compressedFile.Close(); err != nil {
logrus.Errorf("Unable to close gz file: %q", err)
}
if err := d.gzReader.Close(); err != nil { if err := d.gzReader.Close(); err != nil {
logrus.Errorf("Unable to close gz file: %q", err) logrus.Errorf("Unable to close gz file: %q", err)
} }
d.genericDecompressor.close()
} }

View File

@ -2,44 +2,20 @@ package compression
import ( import (
"io" "io"
"os"
crcOs "github.com/crc-org/crc/v2/pkg/os" crcOs "github.com/crc-org/crc/v2/pkg/os"
"github.com/sirupsen/logrus"
) )
type uncompressedDecompressor struct { type uncompressedDecompressor struct {
compressedFilePath string genericDecompressor
compressedFile *os.File
} }
func newUncompressedDecompressor(compressedFilePath string) decompressor { func newUncompressedDecompressor(compressedFilePath string) (*uncompressedDecompressor, error) {
return &uncompressedDecompressor{ d, err := newGenericDecompressor(compressedFilePath)
compressedFilePath: compressedFilePath, return &uncompressedDecompressor{*d}, err
}
} }
func (d *uncompressedDecompressor) srcFilePath() string { func (*uncompressedDecompressor) decompress(w io.WriteSeeker, r io.Reader) error {
return d.compressedFilePath
}
func (d *uncompressedDecompressor) reader() (io.Reader, error) {
srcFile, err := os.Open(d.compressedFilePath)
if err != nil {
return nil, err
}
d.compressedFile = srcFile
return srcFile, nil
}
func (*uncompressedDecompressor) copy(w *os.File, r io.Reader) error {
_, err := crcOs.CopySparse(w, r) _, err := crcOs.CopySparse(w, r)
return err return err
} }
func (d *uncompressedDecompressor) close() {
if err := d.compressedFile.Close(); err != nil {
logrus.Errorf("Unable to close gz file: %q", err)
}
}

View File

@ -11,33 +11,18 @@ import (
) )
type xzDecompressor struct { type xzDecompressor struct {
compressedFilePath string genericDecompressor
compressedFile *os.File
} }
func newXzDecompressor(compressedFilePath string) decompressor { func newXzDecompressor(compressedFilePath string) (*xzDecompressor, error) {
return &xzDecompressor{ d, err := newGenericDecompressor(compressedFilePath)
compressedFilePath: compressedFilePath, return &xzDecompressor{*d}, err
}
}
func (d *xzDecompressor) srcFilePath() string {
return d.compressedFilePath
}
func (d *xzDecompressor) reader() (io.Reader, error) {
srcFile, err := os.Open(d.compressedFilePath)
if err != nil {
return nil, err
}
d.compressedFile = srcFile
return srcFile, nil
} }
// Will error out if file without .Xz already exists // Will error out if file without .Xz already exists
// Maybe extracting then renaming is a good idea here.. // Maybe extracting then renaming is a good idea here..
// depends on Xz: not pre-installed on mac, so it becomes a brew dependency // depends on Xz: not pre-installed on mac, so it becomes a brew dependency
func (*xzDecompressor) copy(w *os.File, r io.Reader) error { func (*xzDecompressor) decompress(w io.WriteSeeker, r io.Reader) error {
var cmd *exec.Cmd var cmd *exec.Cmd
var read io.Reader var read io.Reader
@ -79,9 +64,3 @@ func (*xzDecompressor) copy(w *os.File, r io.Reader) error {
<-done <-done
return nil return nil
} }
func (d *xzDecompressor) close() {
if err := d.compressedFile.Close(); err != nil {
logrus.Errorf("Unable to close xz file: %q", err)
}
}

View File

@ -4,28 +4,26 @@ import (
"archive/zip" "archive/zip"
"errors" "errors"
"io" "io"
"os"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
type zipDecompressor struct { type zipDecompressor struct {
compressedFilePath string genericDecompressor
zipReader *zip.ReadCloser zipReader *zip.ReadCloser
fileReader io.ReadCloser fileReader io.ReadCloser
} }
func newZipDecompressor(compressedFilePath string) decompressor { func newZipDecompressor(compressedFilePath string) (*zipDecompressor, error) {
return &zipDecompressor{ d, err := newGenericDecompressor(compressedFilePath)
compressedFilePath: compressedFilePath, return &zipDecompressor{*d, nil, nil}, err
}
} }
func (d *zipDecompressor) srcFilePath() string { // This is the only compressor that doesn't return the compressed file
return d.compressedFilePath // stream (zip.OpenReader() provides access to the decompressed file).
} // As a result the progress bar shows the decompressed file stream
// but the final size is the compressed file size.
func (d *zipDecompressor) reader() (io.Reader, error) { func (d *zipDecompressor) compressedFileReader() (io.ReadCloser, error) {
zipReader, err := zip.OpenReader(d.compressedFilePath) zipReader, err := zip.OpenReader(d.compressedFilePath)
if err != nil { if err != nil {
return nil, err return nil, err
@ -42,7 +40,7 @@ func (d *zipDecompressor) reader() (io.Reader, error) {
return z, nil return z, nil
} }
func (*zipDecompressor) copy(w *os.File, r io.Reader) error { func (*zipDecompressor) decompress(w io.WriteSeeker, r io.Reader) error {
_, err := io.Copy(w, r) _, err := io.Copy(w, r)
return err return err
} }