mirror of
https://github.com/containers/podman.git
synced 2025-05-20 16:47:39 +08:00

allow podman machine to extract its disk image from an oci registry or oci-dir locally. for now, the image must be relatively inflexible. it must have 1 layer. the layer must possess one image. so a dockerfile like: FROM scratch COPY ./myimage.xz /myimage.xz when using an oci dir, the directory structure must adhere to the typical directory structure of a an oci image (with one layer). ── blobs │ └── sha256 │ ├── 53735773573b3853bb1cae16dd21061beb416239ceb78d4ef1f2a0609f7e843b │ ├── 80577866ec13c041693e17de61444b4696137623803c3d87f92e4f28a1f4e87b │ └── af57637ac1ab12f833e3cfa886027cc9834a755a437d0e1cf48b5d4778af7a4e ├── index.json └── oci-layout in order to identify this new input, you must use a transport/schema to differentiate from current podman machine init --image-path behavior. we will support `oci-dir://` and `docker://` as transports. when using the docker transport, you can only use an empty transport for input. for example, `podman machine init --image-path docker://`. A fully quailified image name will be supported in the next iteration. the transport absent anything means, i want to pull the default fcos image stored in a registry. podman will determine its current version and then look for its correlating manifest. in this default use case, it would look for: quay.io/libpod/podman-machine-images:<version> that manifest would then point to specific images that contain the correct arch and provider disk image. i.e. quay.io/libpod/podman-machine-images:4.6-qcow2 this PR does not enable something like docker://quay.io/mycorp/myimage:latest yet. names, addresses, andf schema/transports are all subject to change. the plan is to keep this all undocumented until things firm up. [NO NEW TESTS NEEDED] Signed-off-by: Brent Baude <bbaude@redhat.com>
485 lines
12 KiB
Go
485 lines
12 KiB
Go
//go:build amd64 || arm64
|
|
// +build amd64 arm64
|
|
|
|
package machine
|
|
|
|
import (
|
|
"archive/zip"
|
|
"bufio"
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
"net/http"
|
|
url2 "net/url"
|
|
"os"
|
|
"os/exec"
|
|
"path/filepath"
|
|
"runtime"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/containers/image/v5/pkg/compression"
|
|
"github.com/containers/storage/pkg/archive"
|
|
"github.com/sirupsen/logrus"
|
|
"github.com/ulikunitz/xz"
|
|
"github.com/vbauerster/mpb/v8"
|
|
"github.com/vbauerster/mpb/v8/decor"
|
|
)
|
|
|
|
// GenericDownload is used when a user provides a URL
|
|
// or path for an image
|
|
type GenericDownload struct {
|
|
Download
|
|
}
|
|
|
|
// NewGenericDownloader is used when the disk image is provided by the user
|
|
func NewGenericDownloader(vmType VMType, vmName, pullPath string) (DistributionDownload, error) {
|
|
var (
|
|
imageName string
|
|
)
|
|
dataDir, err := GetDataDir(vmType)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
cacheDir, err := GetCacheDir(vmType)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
dl := Download{}
|
|
// Is pullpath a file or url?
|
|
if getURL := supportedURL(pullPath); getURL != nil {
|
|
urlSplit := strings.Split(getURL.Path, "/")
|
|
imageName = urlSplit[len(urlSplit)-1]
|
|
dl.URL = getURL
|
|
dl.LocalPath = filepath.Join(cacheDir, imageName)
|
|
} else {
|
|
// Dealing with FilePath
|
|
imageName = filepath.Base(pullPath)
|
|
dl.LocalPath = pullPath
|
|
}
|
|
dl.VMName = vmName
|
|
dl.ImageName = imageName
|
|
dl.LocalUncompressedFile = dl.GetLocalUncompressedFile(dataDir)
|
|
// The download needs to be pulled into the datadir
|
|
|
|
gd := GenericDownload{Download: dl}
|
|
return gd, nil
|
|
}
|
|
|
|
func supportedURL(path string) (url *url2.URL) {
|
|
getURL, err := url2.Parse(path)
|
|
if err != nil {
|
|
// ignore error, probably not a URL, fallback & treat as file path
|
|
return nil
|
|
}
|
|
|
|
// Check supported scheme. Since URL is passed to net.http, only http
|
|
// schemes are supported. Also, windows drive paths can resemble a
|
|
// URL, but with a single letter scheme. These values should be
|
|
// passed through for interpretation as a file path.
|
|
switch getURL.Scheme {
|
|
case "http":
|
|
fallthrough
|
|
case "https":
|
|
return getURL
|
|
default:
|
|
return nil
|
|
}
|
|
}
|
|
|
|
func (dl Download) GetLocalUncompressedFile(dataDir string) string {
|
|
compressedFilename := dl.VMName + "_" + dl.ImageName
|
|
extension := compressionFromFile(compressedFilename)
|
|
uncompressedFile := strings.TrimSuffix(compressedFilename, fmt.Sprintf(".%s", extension.String()))
|
|
dl.LocalUncompressedFile = filepath.Join(dataDir, uncompressedFile)
|
|
return dl.LocalUncompressedFile
|
|
}
|
|
|
|
func (g GenericDownload) Get() *Download {
|
|
return &g.Download
|
|
}
|
|
|
|
func (g GenericDownload) HasUsableCache() (bool, error) {
|
|
// If we have a URL for this "downloader", we now pull it
|
|
return g.URL == nil, nil
|
|
}
|
|
|
|
// CleanCache cleans out downloaded uncompressed image files
|
|
func (g GenericDownload) CleanCache() error {
|
|
// Remove any image that has been downloaded via URL
|
|
// We never read from cache for generic downloads
|
|
if g.URL != nil {
|
|
if err := os.Remove(g.LocalPath); err != nil && !errors.Is(err, os.ErrNotExist) {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func DownloadImage(d DistributionDownload) error {
|
|
// check if the latest image is already present
|
|
ok, err := d.HasUsableCache()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if !ok {
|
|
if err := DownloadVMImage(d.Get().URL, d.Get().ImageName, d.Get().LocalPath); err != nil {
|
|
return err
|
|
}
|
|
// Clean out old cached images, since we didn't find needed image in cache
|
|
defer func() {
|
|
if err = d.CleanCache(); err != nil {
|
|
logrus.Warnf("error cleaning machine image cache: %s", err)
|
|
}
|
|
}()
|
|
}
|
|
localPath, err := NewMachineFile(d.Get().LocalPath, nil)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return Decompress(localPath, d.Get().LocalUncompressedFile)
|
|
}
|
|
|
|
func progressBar(prefix string, size int64, onComplete string) (*mpb.Progress, *mpb.Bar) {
|
|
p := mpb.New(
|
|
mpb.WithWidth(80), // Do not go below 80, see bug #17718
|
|
mpb.WithRefreshRate(180*time.Millisecond),
|
|
)
|
|
|
|
bar := p.AddBar(size,
|
|
mpb.BarFillerClearOnComplete(),
|
|
mpb.PrependDecorators(
|
|
decor.OnComplete(decor.Name(prefix), onComplete),
|
|
),
|
|
mpb.AppendDecorators(
|
|
decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
|
|
),
|
|
)
|
|
if size == 0 {
|
|
bar.SetTotal(0, true)
|
|
}
|
|
|
|
return p, bar
|
|
}
|
|
|
|
// DownloadVMImage downloads a VM image from url to given path
|
|
// with download status
|
|
func DownloadVMImage(downloadURL *url2.URL, imageName string, localImagePath string) error {
|
|
out, err := os.Create(localImagePath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer func() {
|
|
if err := out.Close(); err != nil {
|
|
logrus.Error(err)
|
|
}
|
|
}()
|
|
|
|
resp, err := http.Get(downloadURL.String())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer func() {
|
|
if err := resp.Body.Close(); err != nil {
|
|
logrus.Error(err)
|
|
}
|
|
}()
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
return fmt.Errorf("downloading VM image %s: %s", downloadURL, resp.Status)
|
|
}
|
|
size := resp.ContentLength
|
|
prefix := "Downloading VM image: " + imageName
|
|
onComplete := prefix + ": done"
|
|
|
|
p, bar := progressBar(prefix, size, onComplete)
|
|
|
|
proxyReader := bar.ProxyReader(resp.Body)
|
|
defer func() {
|
|
if err := proxyReader.Close(); err != nil {
|
|
logrus.Error(err)
|
|
}
|
|
}()
|
|
|
|
if _, err := io.Copy(out, proxyReader); err != nil {
|
|
return err
|
|
}
|
|
|
|
p.Wait()
|
|
return nil
|
|
}
|
|
|
|
func Decompress(localPath *VMFile, uncompressedPath string) error {
|
|
var isZip bool
|
|
uncompressedFileWriter, err := os.OpenFile(uncompressedPath, os.O_CREATE|os.O_RDWR, 0600)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
sourceFile, err := localPath.Read()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if strings.HasSuffix(localPath.GetPath(), ".zip") {
|
|
isZip = true
|
|
}
|
|
prefix := "Copying uncompressed file"
|
|
compressionType := archive.DetectCompression(sourceFile)
|
|
if compressionType != archive.Uncompressed || isZip {
|
|
prefix = "Extracting compressed file"
|
|
}
|
|
prefix += ": " + filepath.Base(uncompressedPath)
|
|
if compressionType == archive.Xz {
|
|
return decompressXZ(prefix, localPath.GetPath(), uncompressedFileWriter)
|
|
}
|
|
if isZip && runtime.GOOS == "windows" {
|
|
return decompressZip(prefix, localPath.GetPath(), uncompressedFileWriter)
|
|
}
|
|
return decompressEverythingElse(prefix, localPath.GetPath(), uncompressedFileWriter)
|
|
}
|
|
|
|
// Will error out if file without .Xz already exists
|
|
// Maybe extracting then renaming is a good idea here..
|
|
// depends on Xz: not pre-installed on mac, so it becomes a brew dependency
|
|
func decompressXZ(prefix string, src string, output io.WriteCloser) error {
|
|
var read io.Reader
|
|
var cmd *exec.Cmd
|
|
|
|
stat, err := os.Stat(src)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
file, err := os.Open(src)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer file.Close()
|
|
|
|
p, bar := progressBar(prefix, stat.Size(), prefix+": done")
|
|
proxyReader := bar.ProxyReader(file)
|
|
defer func() {
|
|
if err := proxyReader.Close(); err != nil {
|
|
logrus.Error(err)
|
|
}
|
|
}()
|
|
|
|
// Prefer Xz utils for fastest performance, fallback to go xi2 impl
|
|
if _, err := exec.LookPath("xz"); err == nil {
|
|
cmd = exec.Command("xz", "-d", "-c")
|
|
cmd.Stdin = proxyReader
|
|
read, err = cmd.StdoutPipe()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
cmd.Stderr = os.Stderr
|
|
} else {
|
|
// This XZ implementation is reliant on buffering. It is also 3x+ slower than XZ utils.
|
|
// Consider replacing with a faster implementation (e.g. xi2) if podman machine is
|
|
// updated with a larger image for the distribution base.
|
|
buf := bufio.NewReader(proxyReader)
|
|
read, err = xz.NewReader(buf)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
done := make(chan bool)
|
|
go func() {
|
|
if _, err := io.Copy(output, read); err != nil {
|
|
logrus.Error(err)
|
|
}
|
|
output.Close()
|
|
done <- true
|
|
}()
|
|
|
|
if cmd != nil {
|
|
err := cmd.Start()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
p.Wait()
|
|
return cmd.Wait()
|
|
}
|
|
<-done
|
|
p.Wait()
|
|
return nil
|
|
}
|
|
|
|
func decompressEverythingElse(prefix string, src string, output io.WriteCloser) error {
|
|
stat, err := os.Stat(src)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
f, err := os.Open(src)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
p, bar := progressBar(prefix, stat.Size(), prefix+": done")
|
|
proxyReader := bar.ProxyReader(f)
|
|
defer func() {
|
|
if err := proxyReader.Close(); err != nil {
|
|
logrus.Error(err)
|
|
}
|
|
}()
|
|
uncompressStream, _, err := compression.AutoDecompress(proxyReader)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer func() {
|
|
if err := uncompressStream.Close(); err != nil {
|
|
logrus.Error(err)
|
|
}
|
|
if err := output.Close(); err != nil {
|
|
logrus.Error(err)
|
|
}
|
|
}()
|
|
|
|
_, err = io.Copy(output, uncompressStream)
|
|
p.Wait()
|
|
return err
|
|
}
|
|
|
|
func decompressZip(prefix string, src string, output io.WriteCloser) error {
|
|
zipReader, err := zip.OpenReader(src)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if len(zipReader.File) != 1 {
|
|
return errors.New("machine image files should consist of a single compressed file")
|
|
}
|
|
f, err := zipReader.File[0].Open()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer func() {
|
|
if err := f.Close(); err != nil {
|
|
logrus.Error(err)
|
|
}
|
|
}()
|
|
defer func() {
|
|
if err := output.Close(); err != nil {
|
|
logrus.Error(err)
|
|
}
|
|
}()
|
|
size := int64(zipReader.File[0].CompressedSize64)
|
|
p, bar := progressBar(prefix, size, prefix+": done")
|
|
proxyReader := bar.ProxyReader(f)
|
|
defer func() {
|
|
if err := proxyReader.Close(); err != nil {
|
|
logrus.Error(err)
|
|
}
|
|
}()
|
|
_, err = io.Copy(output, proxyReader)
|
|
p.Wait()
|
|
return err
|
|
}
|
|
|
|
func RemoveImageAfterExpire(dir string, expire time.Duration) error {
|
|
now := time.Now()
|
|
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
|
// Delete any cache files that are older than expiry date
|
|
if !info.IsDir() && (now.Sub(info.ModTime()) > expire) {
|
|
err := os.Remove(path)
|
|
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
|
logrus.Warnf("unable to clean up cached image: %s", path)
|
|
} else {
|
|
logrus.Debugf("cleaning up cached image: %s", path)
|
|
}
|
|
}
|
|
return nil
|
|
})
|
|
return err
|
|
}
|
|
|
|
// AcquireAlternateImage downloads the alternate image the user provided, which
|
|
// can be a file path or URL
|
|
func (dl Download) AcquireAlternateImage(inputPath string) (*VMFile, error) {
|
|
g, err := NewGenericDownloader(dl.VMKind, dl.VMName, inputPath)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
imagePath, err := NewMachineFile(g.Get().LocalUncompressedFile, nil)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if err := DownloadImage(g); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return imagePath, nil
|
|
}
|
|
|
|
func isOci(input string) (bool, *OCIKind, error) {
|
|
inputURL, err := url2.Parse(input)
|
|
if err != nil {
|
|
return false, nil, err
|
|
}
|
|
switch inputURL.Scheme {
|
|
case OCIDir.String():
|
|
return true, &OCIDir, nil
|
|
case OCIRegistry.String():
|
|
return true, &OCIRegistry, nil
|
|
}
|
|
return false, nil, nil
|
|
}
|
|
|
|
func Pull(input, machineName string, vp VirtProvider) (*VMFile, FCOSStream, error) {
|
|
var (
|
|
disk Disker
|
|
)
|
|
|
|
ociBased, ociScheme, err := isOci(input)
|
|
if err != nil {
|
|
return nil, 0, err
|
|
}
|
|
if !ociBased {
|
|
// Business as usual
|
|
dl, err := vp.NewDownload(machineName)
|
|
if err != nil {
|
|
return nil, 0, err
|
|
}
|
|
return dl.AcquireVMImage(input)
|
|
}
|
|
oopts := OCIOpts{
|
|
Scheme: ociScheme,
|
|
}
|
|
dataDir, err := GetDataDir(vp.VMType())
|
|
if err != nil {
|
|
return nil, 0, err
|
|
}
|
|
if ociScheme.IsOCIDir() {
|
|
strippedOCIDir := StripOCIReference(input)
|
|
oopts.Dir = &strippedOCIDir
|
|
disk = NewOCIDir(context.Background(), input, dataDir, machineName)
|
|
} else {
|
|
// a use of a containers image type here might be
|
|
// tighter
|
|
strippedInput := strings.TrimPrefix(input, "docker://")
|
|
// this is the next piece of work
|
|
if len(strippedInput) > 0 {
|
|
return nil, 0, errors.New("image names are not supported yet")
|
|
}
|
|
disk, err = newVersioned(context.Background(), dataDir, machineName)
|
|
if err != nil {
|
|
return nil, 0, err
|
|
}
|
|
}
|
|
if err := disk.Pull(); err != nil {
|
|
return nil, 0, err
|
|
}
|
|
unpacked, err := disk.Unpack()
|
|
if err != nil {
|
|
return nil, 0, err
|
|
}
|
|
defer func() {
|
|
logrus.Debugf("cleaning up %q", unpacked.GetPath())
|
|
if err := unpacked.Delete(); err != nil {
|
|
logrus.Errorf("unable to delete local compressed file %q:%v", unpacked.GetPath(), err)
|
|
}
|
|
}()
|
|
imagePath, err := disk.Decompress(unpacked)
|
|
return imagePath, UnknownStream, err
|
|
}
|