Merge pull request from baude/ociartdisk

Allow podman machine to download from oci registry
This commit is contained in:
openshift-merge-bot[bot]
2024-02-13 12:51:28 +00:00
committed by GitHub
12 changed files with 471 additions and 237 deletions

@ -181,15 +181,21 @@ func GetMachineDirs(vmType define.VMType) (*define.MachineDirs, error) {
return nil, err
}
imageCacheDir, err := dataDirFile.AppendToNewVMFile("cache", nil)
if err != nil {
return nil, err
}
rtDirFile, err := define.NewMachineFile(rtDir, nil)
if err != nil {
return nil, err
}
dirs := define.MachineDirs{
ConfigDir: configDirFile,
DataDir: dataDirFile,
RuntimeDir: rtDirFile,
ConfigDir: configDirFile,
DataDir: dataDirFile,
ImageCacheDir: imageCacheDir,
RuntimeDir: rtDirFile,
}
// make sure all machine dirs are present
@ -199,7 +205,10 @@ func GetMachineDirs(vmType define.VMType) (*define.MachineDirs, error) {
if err := os.MkdirAll(configDir, 0755); err != nil {
return nil, err
}
err = os.MkdirAll(dataDir, 0755)
// Because this is a mkdirall, we make the image cache dir
// which is a subdir of datadir (so the datadir is made anyway)
err = os.MkdirAll(imageCacheDir.GetPath(), 0755)
return &dirs, err
}

@ -17,7 +17,8 @@ type CreateVMOpts struct {
}
type MachineDirs struct {
ConfigDir *VMFile
DataDir *VMFile
RuntimeDir *VMFile
ConfigDir *VMFile
DataDir *VMFile
ImageCacheDir *VMFile
RuntimeDir *VMFile
}

@ -36,6 +36,18 @@ func (v VMType) String() string {
return qemu
}
func (v VMType) ImageFormat() ImageFormat {
switch v {
case WSLVirt:
return Tar
case AppleHvVirt:
return Raw
case HyperVVirt:
return Vhdx
}
return Qcow
}
func ParseVMType(input string, emptyFallback VMType) (VMType, error) {
switch strings.TrimSpace(strings.ToLower(input)) {
case qemu:

@ -56,3 +56,39 @@ func TestParseVMType(t *testing.T) {
})
}
}
func TestVMType_ImageFormat(t *testing.T) {
tests := []struct {
name string
v VMType
want ImageFormat
}{
{
name: "wsl",
v: WSLVirt,
want: Tar,
},
{
name: "applehv",
v: AppleHvVirt,
want: Raw,
},
{
name: "qemu",
v: QemuVirt,
want: Qcow,
},
{
name: "hyperv",
v: HyperVVirt,
want: Vhdx,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.v.ImageFormat(); got != tt.want {
t.Errorf("ImageFormat() = %v, want %v", got, tt.want)
}
})
}
}

@ -15,14 +15,6 @@ import (
"github.com/sirupsen/logrus"
)
// quay.io/libpod/podman-machine-images:4.6
const (
diskImages = "podman-machine-images"
registry = "quay.io"
repo = "libpod"
)
type OSVersion struct {
*semver.Version
}
@ -71,10 +63,6 @@ func (o *OSVersion) majorMinor() string {
return fmt.Sprintf("%d.%d", o.Major, o.Minor)
}
func (o *OSVersion) diskImage(vmType string) string {
return fmt.Sprintf("%s/%s/%s:%s-%s", registry, repo, diskImages, o.majorMinor(), vmType)
}
func unpackOCIDir(ociTb, machineImageDir string) (*define.VMFile, error) {
imageFileName, err := findTarComponent(ociTb)
if err != nil {

@ -0,0 +1,267 @@
package ocipull
import (
"context"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v5/pkg/machine/compression"
"github.com/containers/podman/v5/pkg/machine/define"
"github.com/containers/podman/v5/utils"
crc "github.com/crc-org/crc/v2/pkg/os"
"github.com/opencontainers/go-digest"
specV1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
const (
// TODO This is temporary until we decide on a proper image name
artifactRegistry = "quay.io"
artifactRepo = "baude"
artifactImageName = "podman-machine-images-art"
artifactOriginalName = "org.opencontainers.image.title"
)
type OCIArtifactDisk struct {
cachedCompressedDiskPath *define.VMFile
name string
ctx context.Context
dirs *define.MachineDirs
diskArtifactOpts *DiskArtifactOpts
finalPath string
imageEndpoint string
machineVersion *OSVersion
diskArtifactFileName string
pullOptions *PullOptions
vmType define.VMType
}
type DiskArtifactOpts struct {
arch string
diskType string
os string
}
/*
This interface is for automatically pulling a disk artifact(qcow2, raw, vhdx file) from a pre-determined
image location. The logic is tied to vmtypes (applehv, qemu, hyperv) and their understanding of the type of
disk they require. The process can be generally described as:
* Determine the flavor of artifact we are looking for (arch, compression, type)
* Grab the manifest list for the target
* Walk the artifacts to find a match based on flavor
* Check the hash of the artifact against the hash of our cached image
* If the cached image does not exist or match, pull the latest into an OCI directory
* Read the OCI blob's manifest to determine which blob is the artifact disk
* Rename/move the blob in the OCI directory to the image cache dir and append the type and compression
i.e. 91d1e51ddfac9d4afb1f96df878089cfdb9ab9be5886f8bccac0f0557ed28974.qcow2.xz
* Discard the OCI directory
* Decompress the cached image to the image dir in the form of <vmname>-<arch>.<raw|vhdx|qcow2>
*/
func NewOCIArtifactPull(ctx context.Context, dirs *define.MachineDirs, vmName string, vmType define.VMType, finalPath *define.VMFile) (*OCIArtifactDisk, error) {
var (
arch string
)
artifactVersion := getVersion()
switch runtime.GOARCH {
case "amd64":
arch = "x86_64"
case "arm64":
arch = "aarch64"
default:
return nil, fmt.Errorf("unsupported machine arch: %s", runtime.GOARCH)
}
diskOpts := DiskArtifactOpts{
arch: arch,
diskType: vmType.String(),
os: runtime.GOOS,
}
ociDisk := OCIArtifactDisk{
ctx: ctx,
dirs: dirs,
diskArtifactOpts: &diskOpts,
finalPath: finalPath.GetPath(),
imageEndpoint: fmt.Sprintf("docker://%s/%s/%s:%s", artifactRegistry, artifactRepo, artifactImageName, artifactVersion.majorMinor()),
machineVersion: artifactVersion,
name: vmName,
pullOptions: &PullOptions{},
vmType: vmType,
}
return &ociDisk, nil
}
func (o *OCIArtifactDisk) Get() error {
destRef, artifactDigest, err := o.getDestArtifact()
if err != nil {
return err
}
// Note: the artifactDigest here is the hash of the most recent disk image available
cachedImagePath, err := o.dirs.ImageCacheDir.AppendToNewVMFile(fmt.Sprintf("%s.%s", artifactDigest.Encoded(), o.vmType.ImageFormat().KindWithCompression()), nil)
if err != nil {
return err
}
// check if we have the latest and greatest disk image
if _, err = os.Stat(cachedImagePath.GetPath()); err != nil {
if !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("unable to access cached image path %q: %q", cachedImagePath.GetPath(), err)
}
// pull the image down to our local filesystem
if err := o.pull(destRef, artifactDigest); err != nil {
return err
}
// grab the artifact disk out of the cache and lay
// it into our local cache in the format of
// hash + disktype + compression
//
// in cache it will be used until it is "outdated"
//
// i.e. 91d1e51...d28974.qcow2.xz
if err := o.unpack(artifactDigest); err != nil {
return err
}
} else {
logrus.Debugf("cached image exists and is latest: %s", cachedImagePath.GetPath())
o.cachedCompressedDiskPath = cachedImagePath
}
return o.decompress()
}
func (o *OCIArtifactDisk) getDestArtifact() (types.ImageReference, digest.Digest, error) {
imgRef, err := alltransports.ParseImageName(o.imageEndpoint)
if err != nil {
return nil, "", err
}
sysCtx := &types.SystemContext{
DockerInsecureSkipTLSVerify: types.NewOptionalBool(!o.pullOptions.TLSVerify),
}
imgSrc, err := imgRef.NewImageSource(o.ctx, sysCtx)
if err != nil {
return nil, "", err
}
defer func() {
if err := imgSrc.Close(); err != nil {
logrus.Warn(err)
}
}()
diskArtifactDigest, err := GetDiskArtifactReference(o.ctx, imgSrc, o.diskArtifactOpts)
if err != nil {
return nil, "", err
}
// create a ref now and return
named := imgRef.DockerReference()
digestedRef, err := reference.WithDigest(reference.TrimNamed(named), diskArtifactDigest)
if err != nil {
return nil, "", err
}
// Get and "store" the original filename the disk artifact had
originalFileName, err := getOriginalFileName(o.ctx, imgSrc, diskArtifactDigest)
if err != nil {
return nil, "", err
}
o.diskArtifactFileName = originalFileName
newRef, err := docker.NewReference(digestedRef)
if err != nil {
return nil, "", err
}
return newRef, diskArtifactDigest, err
}
func (o *OCIArtifactDisk) pull(destRef types.ImageReference, artifactDigest digest.Digest) error {
destFileName := artifactDigest.Encoded()
destFile, err := o.dirs.ImageCacheDir.AppendToNewVMFile(destFileName, nil)
if err != nil {
return err
}
return Pull(o.ctx, destRef, destFile, o.pullOptions)
}
func (o *OCIArtifactDisk) unpack(diskArtifactHash digest.Digest) error {
finalSuffix := extractKindAndCompression(o.diskArtifactFileName)
blobDir, err := o.dirs.ImageCacheDir.AppendToNewVMFile(diskArtifactHash.Encoded(), nil)
if err != nil {
return err
}
cachedCompressedPath, err := o.dirs.ImageCacheDir.AppendToNewVMFile(diskArtifactHash.Encoded()+finalSuffix, nil)
if err != nil {
return err
}
o.cachedCompressedDiskPath = cachedCompressedPath
blobInfo, err := GetLocalBlob(o.ctx, blobDir.GetPath())
if err != nil {
return fmt.Errorf("unable to get local manifest for %s: %q", blobDir.GetPath(), err)
}
diskBlobPath := filepath.Join(blobDir.GetPath(), "blobs", "sha256", blobInfo.Digest.Encoded())
// Rename and move the hashed blob file to the cache dir.
// If the rename fails, we do a sparsecopy instead
if err := os.Rename(diskBlobPath, cachedCompressedPath.GetPath()); err != nil {
logrus.Errorf("renaming compressed image %q failed: %q", cachedCompressedPath.GetPath(), err)
logrus.Error("trying again using copy")
if err := crc.CopyFileSparse(diskBlobPath, cachedCompressedPath.GetPath()); err != nil {
return err
}
}
// Clean up the oci dir which is no longer needed
return utils.GuardedRemoveAll(blobDir.GetPath())
}
func (o *OCIArtifactDisk) decompress() error {
return compression.Decompress(o.cachedCompressedDiskPath, o.finalPath)
}
func getOriginalFileName(ctx context.Context, imgSrc types.ImageSource, artifactDigest digest.Digest) (string, error) {
v1RawMannyfest, _, err := imgSrc.GetManifest(ctx, &artifactDigest)
if err != nil {
return "", err
}
v1MannyFest := specV1.Manifest{}
if err := json.Unmarshal(v1RawMannyfest, &v1MannyFest); err != nil {
return "", err
}
if layerLen := len(v1MannyFest.Layers); layerLen > 1 {
return "", fmt.Errorf("podman-machine images should only have 1 layer: %d found", layerLen)
}
// podman-machine-images should have an original file name
// stored in the annotations under org.opencontainers.image.title
// i.e. fedora-coreos-39.20240128.2.2-qemu.x86_64.qcow2.xz
originalFileName, ok := v1MannyFest.Layers[0].Annotations[artifactOriginalName]
if !ok {
return "", fmt.Errorf("unable to determine original artifact name: missing required annotation 'org.opencontainers.image.title'")
}
logrus.Debugf("original artifact file name: %s", originalFileName)
return originalFileName, nil
}
// extractKindAndCompression extracts the vmimage type and the compression type
// this is used for when we rename the blob from its hash to something real
// i.e. fedora-coreos-39.20240128.2.2-qemu.x86_64.qcow2.xz would return qcow2.xz
func extractKindAndCompression(name string) string {
compressAlgo := filepath.Ext(name)
compressStrippedName := strings.TrimSuffix(name, compressAlgo)
kind := filepath.Ext(compressStrippedName)
return kind + compressAlgo
}

@ -0,0 +1,57 @@
package ocipull
import "testing"
func Test_extractKindAndCompression(t *testing.T) {
type args struct {
name string
}
tests := []struct {
name string
args args
want string
}{
{
name: "qcow2",
args: args{name: "foo.qcow2.xz"},
want: ".qcow2.xz",
},
{
name: "vhdx",
args: args{name: "foo.vhdx.zip"},
want: ".vhdx.zip",
},
{
name: "applehv",
args: args{name: "foo.raw.gz"},
want: ".raw.gz",
},
{
name: "lots of extensions with type and compression",
args: args{name: "foo.bar.homer.simpson.qcow2.xz"},
want: ".qcow2.xz",
},
{
name: "lots of extensions",
args: args{name: "foo.bar.homer.simpson"},
want: ".homer.simpson",
},
{
name: "no extensions",
args: args{name: "foobar"},
want: "",
},
{
name: "one extension",
args: args{name: "foobar.zip"},
want: ".zip",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := extractKindAndCompression(tt.args.name); got != tt.want {
t.Errorf("extractKindAndCompression() = %v, want %v", got, tt.want)
}
})
}
}

@ -12,6 +12,7 @@ import (
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v5/pkg/machine/define"
specV1 "github.com/opencontainers/image-spec/specs-go/v1"
)
@ -27,16 +28,12 @@ type PullOptions struct {
}
// Pull `imageInput` from a container registry to `sourcePath`.
func Pull(ctx context.Context, imageInput string, sourcePath string, options PullOptions) error {
if _, err := os.Stat(sourcePath); err == nil {
return fmt.Errorf("%q already exists", sourcePath)
func Pull(ctx context.Context, imageInput types.ImageReference, localDestPath *define.VMFile, options *PullOptions) error {
if _, err := os.Stat(localDestPath.GetPath()); err == nil {
return fmt.Errorf("%q already exists", localDestPath.GetPath())
}
srcRef, err := stringToImageReference(imageInput)
if err != nil {
return err
}
destRef, err := layout.ParseReference(sourcePath)
destRef, err := layout.ParseReference(localDestPath.GetPath())
if err != nil {
return err
}
@ -52,7 +49,7 @@ func Pull(ctx context.Context, imageInput string, sourcePath string, options Pul
sysCtx.DockerAuthConfig = authConf
}
if err := validateSourceImageReference(ctx, srcRef, sysCtx); err != nil {
if err := validateSourceImageReference(ctx, imageInput, sysCtx); err != nil {
return err
}
@ -71,14 +68,14 @@ func Pull(ctx context.Context, imageInput string, sourcePath string, options Pul
if !options.Quiet {
copyOpts.ReportWriter = os.Stderr
}
if _, err := copy.Image(ctx, policyContext, destRef, srcRef, &copyOpts); err != nil {
if _, err := copy.Image(ctx, policyContext, destRef, imageInput, &copyOpts); err != nil {
return fmt.Errorf("pulling source image: %w", err)
}
return nil
}
func stringToImageReference(imageInput string) (types.ImageReference, error) {
func stringToImageReference(imageInput string) (types.ImageReference, error) { //nolint:unused
if shortnames.IsShortName(imageInput) {
return nil, fmt.Errorf("pulling source images by short name (%q) is not supported, please use a fully-qualified name", imageInput)
}
@ -105,6 +102,5 @@ func validateSourceImageReference(ctx context.Context, ref types.ImageReference,
if ociManifest.Config.MediaType != specV1.MediaTypeImageConfig {
return fmt.Errorf("invalid media type of image config %q (expected: %q)", ociManifest.Config.MediaType, specV1.MediaTypeImageConfig)
}
return nil
}

@ -8,10 +8,12 @@ import (
"strings"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/oci/layout"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
specV1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
// readManifestFromImageSource reads the manifest from the specified image
@ -25,31 +27,13 @@ func readManifestFromImageSource(ctx context.Context, src types.ImageSource) (*s
return nil, nil, -1, fmt.Errorf("image %q is of type %q (expected: %q)", strings.TrimPrefix(src.Reference().StringWithinTransport(), "//"), mimeType, specV1.MediaTypeImageManifest)
}
manifest := specV1.Manifest{}
if err := json.Unmarshal(rawData, &manifest); err != nil {
mannyFest := specV1.Manifest{}
if err := json.Unmarshal(rawData, &mannyFest); err != nil {
return nil, nil, -1, fmt.Errorf("reading manifest: %w", err)
}
manifestDigest := digest.FromBytes(rawData)
return &manifest, &manifestDigest, int64(len(rawData)), nil
}
// readManifestFromOCIPath returns the manifest of the specified source image
// at `sourcePath` along with its digest. The digest can later on be used to
// locate the manifest on the file system.
func readManifestFromOCIPath(ctx context.Context, sourcePath string) (*specV1.Manifest, *digest.Digest, int64, error) {
ociRef, err := layout.ParseReference(sourcePath)
if err != nil {
return nil, nil, -1, err
}
ociSource, err := ociRef.NewImageSource(ctx, &types.SystemContext{})
if err != nil {
return nil, nil, -1, err
}
defer ociSource.Close()
return readManifestFromImageSource(ctx, ociSource)
return &mannyFest, &manifestDigest, int64(len(rawData)), nil
}
func GetLocalBlob(ctx context.Context, path string) (*types.BlobInfo, error) {
@ -82,7 +66,7 @@ func GetLocalBlob(ctx context.Context, path string) (*types.BlobInfo, error) {
return &blobs[0], nil
}
func GetRemoteManifest(ctx context.Context, dest string) (*specV1.Manifest, error) {
func GetRemoteManifest(ctx context.Context, dest string) (*specV1.Manifest, error) { //nolint:unused
ref, err := docker.ParseReference(fmt.Sprintf("//%s", dest))
if err != nil {
return nil, err
@ -103,18 +87,73 @@ func GetRemoteManifest(ctx context.Context, dest string) (*specV1.Manifest, erro
return &remoteManifest, err
}
func GetRemoteDescriptor(ctx context.Context, dest string) (*specV1.Descriptor, error) {
remoteManifest, err := GetRemoteManifest(ctx, dest)
func GetDiskArtifactReference(ctx context.Context, imgSrc types.ImageSource, opts *DiskArtifactOpts) (digest.Digest, error) {
rawMannyFest, mannyType, err := imgSrc.GetManifest(ctx, nil)
if err != nil {
return nil, err
return "", err
}
if len(remoteManifest.Layers) != 1 {
return nil, errors.New("invalid remote disk image")
}
return &remoteManifest.Layers[0], nil
}
func ReadImageManifestFromOCIPath(ctx context.Context, ociImagePath string) (*specV1.Manifest, error) {
imageManifest, _, _, err := readManifestFromOCIPath(ctx, ociImagePath)
return imageManifest, err
if !manifest.MIMETypeIsMultiImage(mannyType) { // if not true
return "", fmt.Errorf("wrong manifest type for disk artifact: %s", mannyType)
}
mannyFestList, err := manifest.ListFromBlob(rawMannyFest, mannyType)
if err != nil {
return "", fmt.Errorf("failed to parse manifest list from blob: %q", err)
}
var (
artifactDigest digest.Digest
)
for _, d := range mannyFestList.Instances() {
bar, err := mannyFestList.Instance(d)
if err != nil {
return "", err
}
val, ok := bar.ReadOnly.Annotations["disktype"]
if !ok { // quick exit, no type match
continue
}
// wrong arch
if bar.ReadOnly.Platform.Architecture != opts.arch {
continue
}
// wrong os
if bar.ReadOnly.Platform.OS != opts.os {
continue
}
// wrong disktype
if val != opts.diskType {
continue
}
// ok, we have a match
artifactDigest = d
logrus.Debugf("found image in digest: %q", artifactDigest.String())
break
}
if artifactDigest == "" {
return "", fmt.Errorf("no valid disk artifact found")
}
v1RawMannyfest, _, err := imgSrc.GetManifest(ctx, &artifactDigest)
if err != nil {
return "", err
}
v1MannyFest := specV1.Manifest{}
if err := json.Unmarshal(v1RawMannyfest, &v1MannyFest); err != nil {
return "", err
}
if layerLen := len(v1MannyFest.Layers); layerLen > 1 {
return "", fmt.Errorf("podman-machine images should only have 1 layer: %d found", layerLen)
}
// podman-machine-images should have a original file name
// stored in the annotations under org.opencontainers.image.title
// i.e. fedora-coreos-39.20240128.2.2-qemu.x86_64.qcow2.xz
originalFileName, ok := v1MannyFest.Layers[0].Annotations["org.opencontainers.image.title"]
if !ok {
return "", fmt.Errorf("unable to determine original artifact name: missing required annotation 'org.opencontainers.image.title'")
}
logrus.Debugf("original artifact file name: %s", originalFileName)
return artifactDigest, err
}

@ -1,169 +0,0 @@
package ocipull
import (
"context"
"fmt"
"os"
"path/filepath"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v5/pkg/machine/compression"
"github.com/containers/podman/v5/pkg/machine/define"
"github.com/containers/podman/v5/utils"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
type Versioned struct {
blob *types.BlobInfo
blobDirPath string
cacheDir string
ctx context.Context
imageName string
machineImageDir string
machineVersion *OSVersion
vmName string
vmType string
finalPath *define.VMFile
}
func NewVersioned(ctx context.Context, machineImageDir *define.VMFile, vmName string, vmType string, finalPath *define.VMFile) (*Versioned, error) {
imageCacheDir := filepath.Join(machineImageDir.GetPath(), "cache")
if err := os.MkdirAll(imageCacheDir, 0777); err != nil {
return nil, err
}
o := getVersion()
return &Versioned{ctx: ctx, cacheDir: imageCacheDir, machineImageDir: machineImageDir.GetPath(), machineVersion: o, vmName: vmName, vmType: vmType, finalPath: finalPath}, nil
}
func (d *Versioned) LocalBlob() *types.BlobInfo {
return d.blob
}
func (d *Versioned) DiskEndpoint() string {
return d.machineVersion.diskImage(d.vmType)
}
func (d *Versioned) versionedOCICacheDir() string {
return filepath.Join(d.cacheDir, d.machineVersion.majorMinor())
}
func (d *Versioned) identifyImageNameFromOCIDir() (string, error) {
imageManifest, err := ReadImageManifestFromOCIPath(d.ctx, d.versionedOCICacheDir())
if err != nil {
return "", err
}
if len(imageManifest.Layers) > 1 {
return "", fmt.Errorf("podman machine images can have only one layer: %d found", len(imageManifest.Layers))
}
path := filepath.Join(d.versionedOCICacheDir(), "blobs", "sha256", imageManifest.Layers[0].Digest.Hex())
return findTarComponent(path)
}
func (d *Versioned) pull(path string) error {
fmt.Printf("Pulling %s\n", d.DiskEndpoint())
logrus.Debugf("pulling %s to %s", d.DiskEndpoint(), path)
return Pull(d.ctx, d.DiskEndpoint(), path, PullOptions{})
}
func (d *Versioned) Pull() error {
var (
err error
isUpdatable bool
localBlob *types.BlobInfo
remoteDescriptor *v1.Descriptor
)
remoteDiskImage := d.machineVersion.diskImage(d.vmType)
logrus.Debugf("podman disk image name: %s", remoteDiskImage)
// is there a valid oci dir in our cache
hasCache := d.localOCIDirExists()
if hasCache {
logrus.Debug("checking remote registry")
remoteDescriptor, err = GetRemoteDescriptor(d.ctx, remoteDiskImage)
if err != nil {
return err
}
logrus.Debugf("working with local cache: %s", d.versionedOCICacheDir())
localBlob, err = GetLocalBlob(d.ctx, d.versionedOCICacheDir())
if err != nil {
return err
}
// determine if the local is same as remote
if remoteDescriptor.Digest.Hex() != localBlob.Digest.Hex() {
logrus.Debugf("new image is available: %s", remoteDescriptor.Digest.Hex())
isUpdatable = true
}
}
if !hasCache || isUpdatable {
if hasCache {
if err := utils.GuardedRemoveAll(d.versionedOCICacheDir()); err != nil {
return err
}
}
if err := d.pull(d.versionedOCICacheDir()); err != nil {
return err
}
}
imageName, err := d.identifyImageNameFromOCIDir()
if err != nil {
return err
}
logrus.Debugf("image name: %s", imageName)
d.imageName = imageName
if localBlob == nil {
localBlob, err = GetLocalBlob(d.ctx, d.versionedOCICacheDir())
if err != nil {
return err
}
}
d.blob = localBlob
d.blobDirPath = d.versionedOCICacheDir()
logrus.Debugf("local oci disk image blob: %s", d.localOCIDiskImageDir(localBlob))
return nil
}
func (d *Versioned) Unpack() (*define.VMFile, error) {
tbPath := localOCIDiskImageDir(d.blobDirPath, d.blob)
unpackedFile, err := unpackOCIDir(tbPath, d.machineImageDir)
if err != nil {
return nil, err
}
d.imageName = unpackedFile.GetPath()
return unpackedFile, nil
}
func (d *Versioned) Decompress(compressedFile *define.VMFile) error {
return compression.Decompress(compressedFile, d.finalPath.GetPath())
}
func (d *Versioned) localOCIDiskImageDir(localBlob *types.BlobInfo) string {
return filepath.Join(d.versionedOCICacheDir(), "blobs", "sha256", localBlob.Digest.Hex())
}
func (d *Versioned) localOCIDirExists() bool {
_, indexErr := os.Stat(filepath.Join(d.versionedOCICacheDir(), "index.json"))
return indexErr == nil
}
func (d *Versioned) Get() error {
if err := d.Pull(); err != nil {
return err
}
unpacked, err := d.Unpack()
if err != nil {
return err
}
defer func() {
logrus.Debugf("cleaning up %q", unpacked.GetPath())
if err := unpacked.Delete(); err != nil {
logrus.Errorf("unable to delete local compressed file %q:%v", unpacked.GetPath(), err)
}
}()
return d.Decompress(unpacked)
}

@ -16,7 +16,7 @@ func GetDisk(userInputPath string, dirs *define.MachineDirs, imagePath *define.V
)
if userInputPath == "" {
mydisk, err = ocipull.NewVersioned(context.Background(), dirs.DataDir, name, vmType.String(), imagePath)
mydisk, err = ocipull.NewOCIArtifactPull(context.Background(), dirs, name, vmType, imagePath)
} else {
if strings.HasPrefix(userInputPath, "http") {
// TODO probably should use tempdir instead of datadir

@ -135,9 +135,7 @@ func Init(opts machineDefine.InitOptions, mp vmconfigs.VMProvider) (*vmconfigs.M
// "/path
// "docker://quay.io/something/someManifest
// TODO Ideally this changes into some way better ...
err = mp.GetDisk(opts.ImagePath, dirs, mc)
if err != nil {
if err := mp.GetDisk(opts.ImagePath, dirs, mc); err != nil {
return nil, err
}