Merge pull request #21536 from containers/machine-dev-5

Podman Machine 5 Refactor
This commit is contained in:
openshift-merge-bot[bot]
2024-02-07 20:23:15 +00:00
committed by GitHub
104 changed files with 4356 additions and 5092 deletions

View File

@ -6,7 +6,7 @@ env:
#### Global variables used for all tasks
####
# Name of the ultimate destination branch for this CI run, PR or post-merge.
DEST_BRANCH: "main"
DEST_BRANCH: "machine-dev-5"
# Sane (default) value for GOPROXY and GOSUMDB.
GOPROXY: "https://proxy.golang.org,direct"
GOSUMDB: "sum.golang.org"
@ -363,10 +363,10 @@ alt_build_task:
matrix:
- env:
ALT_NAME: 'Build Each Commit'
- env:
# TODO: Replace with task using `winmake` to build
# binary and archive installation zip file.
ALT_NAME: 'Windows Cross'
#- env:
# # TODO: Replace with task using `winmake` to build
# # binary and archive installation zip file.
# ALT_NAME: 'Windows Cross'
- env:
ALT_NAME: 'Alt Arch. x86 Cross'
- env:
@ -387,58 +387,58 @@ alt_build_task:
always: *runner_stats
win_installer_task:
name: "Verify Win Installer Build"
alias: win_installer
only_if: # RHEL never releases podman windows installer binary
$CIRRUS_TAG == '' &&
$CIRRUS_BRANCH !=~ 'v[0-9\.]+-rhel' &&
$CIRRUS_BASE_BRANCH !=~ 'v[0-9\.]+-rhel'
depends_on:
- alt_build
ec2_instance: &windows
image: "${WINDOWS_AMI}"
type: m5.large
region: us-east-1
platform: windows
env: &winenv
CIRRUS_WORKING_DIR: &wincwd "${LOCALAPPDATA}\\cirrus-ci-build"
CIRRUS_SHELL: powershell
PATH: "${PATH};C:\\ProgramData\\chocolatey\\bin"
DISTRO_NV: "windows"
PRIV_NAME: "rootless"
# Fake version, we are only testing the installer functions, so version doesn't matter
WIN_INST_VER: 9.9.9
# It's HIGHLY desireable to use the same binary throughout CI. Otherwise, if
# there's a toolchain or build-environment specific problem, it can be incredibly
# difficult (and non-obvious) to debug.
clone_script: &winclone |
$ErrorActionPreference = 'Stop'
$ProgressPreference = 'SilentlyContinue'
New-Item -ItemType Directory -Force -Path "$ENV:CIRRUS_WORKING_DIR"
Set-Location "$ENV:CIRRUS_WORKING_DIR"
$uri = "${ENV:ART_URL}/Windows Cross/repo/repo.tbz"
Write-Host "Downloading $uri"
For($i = 0;;) {
Try {
Invoke-WebRequest -UseBasicParsing -ErrorAction Stop -OutFile "repo.tbz2" `
-Uri "$uri"
Break
} Catch {
if (++$i -gt 6) {
throw $_.Exception
}
Write-Host "Download failed - retrying:" $_.Exception.Response.StatusCode
Start-Sleep -Seconds 10
}
}
arc unarchive repo.tbz2 .\
if ($LASTEXITCODE -ne 0) {
throw "Unarchive repo.tbz2 failed"
Exit 1
}
Get-ChildItem -Path .\repo
main_script: ".\\repo\\contrib\\cirrus\\win-installer-main.ps1"
#win_installer_task:
# name: "Verify Win Installer Build"
# alias: win_installer
# only_if: # RHEL never releases podman windows installer binary
# $CIRRUS_TAG == '' &&
# $CIRRUS_BRANCH !=~ 'v[0-9\.]+-rhel' &&
# $CIRRUS_BASE_BRANCH !=~ 'v[0-9\.]+-rhel'
# depends_on:
# - alt_build
# ec2_instance: &windows
# image: "${WINDOWS_AMI}"
# type: m5.large
# region: us-east-1
# platform: windows
# env: &winenv
# CIRRUS_WORKING_DIR: &wincwd "${LOCALAPPDATA}\\cirrus-ci-build"
# CIRRUS_SHELL: powershell
# PATH: "${PATH};C:\\ProgramData\\chocolatey\\bin"
# DISTRO_NV: "windows"
# PRIV_NAME: "rootless"
# # Fake version, we are only testing the installer functions, so version doesn't matter
# WIN_INST_VER: 9.9.9
# # It's HIGHLY desireable to use the same binary throughout CI. Otherwise, if
# # there's a toolchain or build-environment specific problem, it can be incredibly
# # difficult (and non-obvious) to debug.
# clone_script: &winclone |
# $ErrorActionPreference = 'Stop'
# $ProgressPreference = 'SilentlyContinue'
# New-Item -ItemType Directory -Force -Path "$ENV:CIRRUS_WORKING_DIR"
# Set-Location "$ENV:CIRRUS_WORKING_DIR"
# $uri = "${ENV:ART_URL}/Windows Cross/repo/repo.tbz"
# Write-Host "Downloading $uri"
# For($i = 0;;) {
# Try {
# Invoke-WebRequest -UseBasicParsing -ErrorAction Stop -OutFile "repo.tbz2" `
# -Uri "$uri"
# Break
# } Catch {
# if (++$i -gt 6) {
# throw $_.Exception
# }
# Write-Host "Download failed - retrying:" $_.Exception.Response.StatusCode
# Start-Sleep -Seconds 10
# }
# }
# arc unarchive repo.tbz2 .\
# if ($LASTEXITCODE -ne 0) {
# throw "Unarchive repo.tbz2 failed"
# Exit 1
# }
# Get-ChildItem -Path .\repo
# main_script: ".\\repo\\contrib\\cirrus\\win-installer-main.ps1"
# Confirm building the remote client, natively on a Mac OS-X VM.
@ -487,37 +487,36 @@ osx_alt_build_task:
always:
task_cleanup_script: *mac_cleanup
# Build freebsd release natively on a FreeBSD VM.
freebsd_alt_build_task:
name: "FreeBSD Cross"
alias: freebsd_alt_build
# Only run on 'main' and PRs against 'main'
# Docs: ./contrib/cirrus/CIModes.md
only_if: |
$CIRRUS_CHANGE_TITLE !=~ '.*CI:MACHINE.*' &&
( $CIRRUS_BRANCH == 'main' || $CIRRUS_BASE_BRANCH == 'main' )
depends_on:
- build
env:
<<: *stdenvars
# Functional FreeBSD builds must be built natively since they depend on CGO
DISTRO_NV: freebsd-13
VM_IMAGE_NAME: notyet
CTR_FQIN: notyet
CIRRUS_SHELL: "/bin/sh"
TEST_FLAVOR: "altbuild"
ALT_NAME: 'FreeBSD Cross'
freebsd_instance:
image_family: freebsd-13-2
setup_script:
- pkg install -y gpgme bash go-md2man gmake gsed gnugrep go pkgconf
build_amd64_script:
- gmake podman-release
# This task cannot make use of the shared repo.tbz artifact and must
# produce a new repo.tbz artifact for consumption by 'artifacts' task.
repo_prep_script: *repo_prep
repo_artifacts: *repo_artifacts
#freebsd_alt_build_task:
# name: "FreeBSD Cross"
# alias: freebsd_alt_build
# # Only run on 'main' and PRs against 'main'
# # Docs: ./contrib/cirrus/CIModes.md
# only_if: |
# $CIRRUS_CHANGE_TITLE !=~ '.*CI:MACHINE.*' &&
# ( $CIRRUS_BRANCH == 'main' || $CIRRUS_BASE_BRANCH == 'main' )
# depends_on:
# - build
# env:
# <<: *stdenvars
# # Functional FreeBSD builds must be built natively since they depend on CGO
# DISTRO_NV: freebsd-13
# VM_IMAGE_NAME: notyet
# CTR_FQIN: notyet
# CIRRUS_SHELL: "/bin/sh"
# TEST_FLAVOR: "altbuild"
# ALT_NAME: 'FreeBSD Cross'
# freebsd_instance:
# image_family: freebsd-13-2
# setup_script:
# - pkg install -y gpgme bash go-md2man gmake gsed gnugrep go pkgconf
# build_amd64_script:
# - gmake podman-release
# # This task cannot make use of the shared repo.tbz artifact and must
# # produce a new repo.tbz artifact for consumption by 'artifacts' task.
# repo_prep_script: *repo_prep
# repo_artifacts: *repo_artifacts
# Verify podman is compatible with the docker python-module.
@ -775,77 +774,77 @@ podman_machine_aarch64_task:
always: *int_logs_artifacts
podman_machine_windows_task:
name: *std_name_fmt
alias: podman_machine_windows
# Only run for non-docs/copr PRs and non-release branch builds
# and never for tags. Docs: ./contrib/cirrus/CIModes.md
only_if: *machine_cron_not_tag_build_docs
depends_on:
- alt_build
- build
- win_installer
- local_integration_test
- remote_integration_test
- container_integration_test
- rootless_integration_test
ec2_instance:
<<: *windows
type: m5zn.metal
platform: windows
env: *winenv
matrix:
- env:
TEST_FLAVOR: "machine-wsl"
- env:
TEST_FLAVOR: "machine-hyperv"
clone_script: *winclone
main_script: ".\\repo\\contrib\\cirrus\\win-podman-machine-main.ps1"
#podman_machine_windows_task:
# name: *std_name_fmt
# alias: podman_machine_windows
# # Only run for non-docs/copr PRs and non-release branch builds
# # and never for tags. Docs: ./contrib/cirrus/CIModes.md
# only_if: *not_tag_branch_build_docs
# depends_on:
# - alt_build
# - build
# - win_installer
# - local_integration_test
# - remote_integration_test
# - container_integration_test
# - rootless_integration_test
# ec2_instance:
# <<: *windows
# type: m5zn.metal
# platform: windows
# env: *winenv
# matrix:
# - env:
# TEST_FLAVOR: "machine-wsl"
# - env:
# TEST_FLAVOR: "machine-hyperv"
# clone_script: *winclone
# main_script: ".\\repo\\contrib\\cirrus\\win-podman-machine-main.ps1"
podman_machine_mac_task:
name: *std_name_fmt
alias: podman_machine_mac
only_if: *machine_cron_not_tag_build_docs
depends_on:
- osx_alt_build
- local_integration_test
- remote_integration_test
- container_integration_test
- rootless_integration_test
persistent_worker: *mac_pw
env:
<<: *mac_env
# Consumed by podman-machine ginkgo tests
CONTAINERS_MACHINE_PROVIDER: "applehv"
# TODO: Should not require a special image, for now it does.
# Simply remove the line below when a mac image is GA.
MACHINE_IMAGE: "https://fedorapeople.org/groups/podman/testing/applehv/arm64/fedora-coreos-38.20230925.dev.0-applehv.aarch64.raw.gz"
# Values necessary to populate std_name_fmt alias
TEST_FLAVOR: "machine-mac"
DISTRO_NV: "darwin"
PRIV_NAME: "rootless" # intended use-case
clone_script: # artifacts from osx_alt_build_task
- mkdir -p $CIRRUS_WORKING_DIR
- cd $CIRRUS_WORKING_DIR
- $ARTCURL/OSX%20Cross/repo/repo.tbz
- tar xjf repo.tbz
# This host is/was shared with potentially many other CI tasks.
# The previous task may have been canceled or aborted.
prep_script: *mac_cleanup
setup_script: "contrib/cirrus/mac_setup.sh"
env_script: "contrib/cirrus/mac_env.sh"
# TODO: Timeout bumped b/c initial image download (~5min) and VM
# resize (~2min) causes test-timeout (90s default). Should
# tests deal with this internally?
smoke_test_script:
- MACHINE_TEST_TIMEOUT=500 make localmachine FOCUS_FILE="basic_test.go"
test_script:
- make localmachine
# This host is/was shared with potentially many other CI tasks.
# Ensure nothing is left running while waiting for the next task.
always:
task_cleanup_script: *mac_cleanup
#podman_machine_mac_task:
# name: *std_name_fmt
# alias: podman_machine_mac
# only_if: *not_tag_branch_build_docs
# depends_on:
# - osx_alt_build
# - local_integration_test
# - remote_integration_test
# - container_integration_test
# - rootless_integration_test
# persistent_worker: *mac_pw
# env:
# <<: *mac_env
# # Consumed by podman-machine ginkgo tests
# CONTAINERS_MACHINE_PROVIDER: "applehv"
# # TODO: Should not require a special image, for now it does.
# # Simply remove the line below when a mac image is GA.
# # MACHINE_IMAGE: "https://fedorapeople.org/groups/podman/testing/applehv/arm64/fedora-coreos-38.20230925.dev.0-applehv.aarch64.raw.gz"
# # Values necessary to populate std_name_fmt alias
# TEST_FLAVOR: "machine-mac"
# DISTRO_NV: "darwin"
# PRIV_NAME: "rootless" # intended use-case
# clone_script: # artifacts from osx_alt_build_task
# - mkdir -p $CIRRUS_WORKING_DIR
# - cd $CIRRUS_WORKING_DIR
# - $ARTCURL/OSX%20Cross/repo/repo.tbz
# - tar xjf repo.tbz
# # This host is/was shared with potentially many other CI tasks.
# # The previous task may have been canceled or aborted.
# prep_script: *mac_cleanup
# setup_script: "contrib/cirrus/mac_setup.sh"
# env_script: "contrib/cirrus/mac_env.sh"
# # TODO: Timeout bumped b/c initial image download (~5min) and VM
# # resize (~2min) causes test-timeout (90s default). Should
# # tests deal with this internally?
# smoke_test_script:
# - MACHINE_TEST_TIMEOUT=500 make localmachine FOCUS_FILE="basic_test.go"
# test_script:
# - make localmachine
# # This host is/was shared with potentially many other CI tasks.
# # Ensure nothing is left running while waiting for the next task.
# always:
# task_cleanup_script: *mac_cleanup
# Always run subsequent to integration tests. While parallelism is lost
# with runtime, debugging system-test failures can be more challenging
@ -1051,8 +1050,8 @@ success_task:
- swagger
- alt_build
- osx_alt_build
- freebsd_alt_build
- win_installer
#- freebsd_alt_build
#- win_installer
- docker-py_test
- unit_test
- apiv2_test
@ -1063,7 +1062,7 @@ success_task:
- rootless_integration_test
- podman_machine
- podman_machine_aarch64
- podman_machine_windows
#- podman_machine_windows
# TODO: Issue #20853; Tests mostly fail then timeout after an hour.
# - podman_machine_mac
- local_system_test
@ -1096,104 +1095,104 @@ success_task:
# WARNING: Most of the artifacts captured here are also have their
# permalinks present in the `DOWNLOADS.md` file. Any changes made
# here, should probably be reflected in that document.
artifacts_task:
name: "Artifacts"
alias: artifacts
# Docs: ./contrib/cirrus/CIModes.md
only_if: >-
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
$CIRRUS_BRANCH !=~ 'v[0-9\.]+-rhel' &&
$CIRRUS_BASE_BRANCH !=~ 'v[0-9\.]+-rhel'
depends_on:
- success
# This task is a secondary/convenience for downstream consumers, don't
# block development progress if there is a failure in a PR, only break
# when running on branches or tags.
allow_failures: $CIRRUS_PR != ''
container: *smallcontainer
env:
CTR_FQIN: ${FEDORA_CONTAINER_FQIN}
TEST_ENVIRON: container
# In order to keep the download URL and Cirrus-CI artifact.zip contents
# simple, nothing should exist in $CIRRUS_WORKING_DIR except for artifacts.
clone_script: *noop
fedora_binaries_script:
- mkdir -p /tmp/fed
- cd /tmp/fed
- $ARTCURL/Build%20for%20${FEDORA_NAME}/repo/repo.tbz
- tar xjf repo.tbz
- cp ./bin/* $CIRRUS_WORKING_DIR/
alt_binaries_intel_script:
- mkdir -p /tmp/alt
- cd /tmp/alt
- $ARTCURL/Alt%20Arch.%20x86%20Cross/repo/repo.tbz
- tar xjf repo.tbz
- mv ./*.tar.gz $CIRRUS_WORKING_DIR/
alt_binaries_arm_script:
- mkdir -p /tmp/alt
- cd /tmp/alt
- $ARTCURL/Alt%20Arch.%20ARM%20Cross/repo/repo.tbz
- tar xjf repo.tbz
- mv ./*.tar.gz $CIRRUS_WORKING_DIR/
alt_binaries_mips_script:
- mkdir -p /tmp/alt
- cd /tmp/alt
- $ARTCURL/Alt%20Arch.%20MIPS%20Cross/repo/repo.tbz
- tar xjf repo.tbz
- mv ./*.tar.gz $CIRRUS_WORKING_DIR/
alt_binaries_mips64_script:
- mkdir -p /tmp/alt
- cd /tmp/alt
- $ARTCURL/Alt%20Arch.%20MIPS64%20Cross/repo/repo.tbz
- tar xjf repo.tbz
- mv ./*.tar.gz $CIRRUS_WORKING_DIR/
alt_binaries_other_script:
- mkdir -p /tmp/alt
- cd /tmp/alt
- $ARTCURL/Alt%20Arch.%20Other%20Cross/repo/repo.tbz
- tar xjf repo.tbz
- mv ./*.tar.gz $CIRRUS_WORKING_DIR/
win_binaries_script:
- mkdir -p /tmp/win
- cd /tmp/win
- $ARTCURL/Windows%20Cross/repo/repo.tbz
- tar xjf repo.tbz
- mv ./podman-remote*.zip $CIRRUS_WORKING_DIR/
osx_binaries_script:
- mkdir -p /tmp/osx
- cd /tmp/osx
- $ARTCURL/OSX%20Cross/repo/repo.tbz
- tar xjf repo.tbz
- mv ./podman-remote-release-darwin_*.zip $CIRRUS_WORKING_DIR/
- mv ./contrib/pkginstaller/out/podman-installer-macos-*.pkg $CIRRUS_WORKING_DIR/
always:
contents_script: ls -la $CIRRUS_WORKING_DIR
# Produce downloadable files and an automatic zip-file accessible
# by a consistent URL, based on contents of $CIRRUS_WORKING_DIR
# Ref: https://cirrus-ci.org/guide/writing-tasks/#latest-build-artifacts
binary_artifacts:
path: ./*
type: application/octet-stream
#artifacts_task:
# name: "Artifacts"
# alias: artifacts
# # Docs: ./contrib/cirrus/CIModes.md
# only_if: >-
# $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
# $CIRRUS_BRANCH !=~ 'v[0-9\.]+-rhel' &&
# $CIRRUS_BASE_BRANCH !=~ 'v[0-9\.]+-rhel'
# depends_on:
# - success
# # This task is a secondary/convenience for downstream consumers, don't
# # block development progress if there is a failure in a PR, only break
# # when running on branches or tags.
# allow_failures: $CIRRUS_PR != ''
# container: *smallcontainer
# env:
# CTR_FQIN: ${FEDORA_CONTAINER_FQIN}
# TEST_ENVIRON: container
# # In order to keep the download URL and Cirrus-CI artifact.zip contents
# # simple, nothing should exist in $CIRRUS_WORKING_DIR except for artifacts.
# clone_script: *noop
# fedora_binaries_script:
# - mkdir -p /tmp/fed
# - cd /tmp/fed
# - $ARTCURL/Build%20for%20${FEDORA_NAME}/repo/repo.tbz
# - tar xjf repo.tbz
# - cp ./bin/* $CIRRUS_WORKING_DIR/
# alt_binaries_intel_script:
# - mkdir -p /tmp/alt
# - cd /tmp/alt
# - $ARTCURL/Alt%20Arch.%20x86%20Cross/repo/repo.tbz
# - tar xjf repo.tbz
# - mv ./*.tar.gz $CIRRUS_WORKING_DIR/
# alt_binaries_arm_script:
# - mkdir -p /tmp/alt
# - cd /tmp/alt
# - $ARTCURL/Alt%20Arch.%20ARM%20Cross/repo/repo.tbz
# - tar xjf repo.tbz
# - mv ./*.tar.gz $CIRRUS_WORKING_DIR/
# alt_binaries_mips_script:
# - mkdir -p /tmp/alt
# - cd /tmp/alt
# - $ARTCURL/Alt%20Arch.%20MIPS%20Cross/repo/repo.tbz
# - tar xjf repo.tbz
# - mv ./*.tar.gz $CIRRUS_WORKING_DIR/
# alt_binaries_mips64_script:
# - mkdir -p /tmp/alt
# - cd /tmp/alt
# - $ARTCURL/Alt%20Arch.%20MIPS64%20Cross/repo/repo.tbz
# - tar xjf repo.tbz
# - mv ./*.tar.gz $CIRRUS_WORKING_DIR/
# alt_binaries_other_script:
# - mkdir -p /tmp/alt
# - cd /tmp/alt
# - $ARTCURL/Alt%20Arch.%20Other%20Cross/repo/repo.tbz
# - tar xjf repo.tbz
# - mv ./*.tar.gz $CIRRUS_WORKING_DIR/
# win_binaries_script:
# - mkdir -p /tmp/win
# - cd /tmp/win
# - $ARTCURL/Windows%20Cross/repo/repo.tbz
# - tar xjf repo.tbz
# - mv ./podman-remote*.zip $CIRRUS_WORKING_DIR/
# osx_binaries_script:
# - mkdir -p /tmp/osx
# - cd /tmp/osx
# - $ARTCURL/OSX%20Cross/repo/repo.tbz
# - tar xjf repo.tbz
# - mv ./podman-remote-release-darwin_*.zip $CIRRUS_WORKING_DIR/
# - mv ./contrib/pkginstaller/out/podman-installer-macos-*.pkg $CIRRUS_WORKING_DIR/
# always:
# contents_script: ls -la $CIRRUS_WORKING_DIR
# # Produce downloadable files and an automatic zip-file accessible
# # by a consistent URL, based on contents of $CIRRUS_WORKING_DIR
# # Ref: https://cirrus-ci.org/guide/writing-tasks/#latest-build-artifacts
# binary_artifacts:
# path: ./*
# type: application/octet-stream
# When a new tag is pushed, confirm that the code and commits
# meet criteria for an official release.
release_task:
name: "Verify Release"
alias: release
# This should _only_ run for new tags
# Docs: ./contrib/cirrus/CIModes.md
only_if: $CIRRUS_TAG != ''
depends_on:
- build
- success
gce_instance: *standardvm
env:
<<: *stdenvars
TEST_FLAVOR: release
clone_script: *get_gosrc
setup_script: *setup
main_script: *main
#release_task:
# name: "Verify Release"
# alias: release
# # This should _only_ run for new tags
# # Docs: ./contrib/cirrus/CIModes.md
# only_if: $CIRRUS_TAG != ''
# depends_on:
# - build
# - success
# gce_instance: *standardvm
# env:
# <<: *stdenvars
# TEST_FLAVOR: release
# clone_script: *get_gosrc
# setup_script: *setup
# main_script: *main
# When preparing to release a new version, this task may be manually
@ -1202,22 +1201,22 @@ release_task:
#
# Note: This cannot use a YAML alias on 'release_task' as of this
# comment, it is incompatible with 'trigger_type: manual'
release_test_task:
name: "Optional Release Test"
alias: release_test
# Release-PRs always include "release" or "Bump" in the title
# Docs: ./contrib/cirrus/CIModes.md
only_if: $CIRRUS_CHANGE_TITLE =~ '.*((release)|(bump)).*'
# Allow running manually only as part of release-related builds
# see RELEASE_PROCESS.md
trigger_type: manual
depends_on:
- build
- success
gce_instance: *standardvm
env:
<<: *stdenvars
TEST_FLAVOR: release
clone_script: *get_gosrc
setup_script: *setup
main_script: *main
#release_test_task:
# name: "Optional Release Test"
# alias: release_test
# # Release-PRs always include "release" or "Bump" in the title
# # Docs: ./contrib/cirrus/CIModes.md
# only_if: $CIRRUS_CHANGE_TITLE =~ '.*((release)|(bump)).*'
# # Allow running manually only as part of release-related builds
# # see RELEASE_PROCESS.md
# trigger_type: manual
# depends_on:
# - build
# - success
# gce_instance: *standardvm
# env:
# <<: *stdenvars
# TEST_FLAVOR: release
# clone_script: *get_gosrc
# setup_script: *setup
# main_script: *main

View File

@ -19,6 +19,7 @@ import (
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/provider"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@ -149,7 +150,12 @@ func composeDockerHost() (string, error) {
if err != nil {
return "", fmt.Errorf("getting machine provider: %w", err)
}
machineList, err := machineProvider.List(machine.ListOptions{})
dirs, err := machine.GetMachineDirs(machineProvider.VMType())
if err != nil {
return "", err
}
machineList, err := vmconfigs.LoadMachinesInDir(dirs)
if err != nil {
return "", fmt.Errorf("listing machines: %w", err)
}
@ -162,31 +168,32 @@ func composeDockerHost() (string, error) {
return "", fmt.Errorf("parsing connection port: %w", err)
}
for _, item := range machineList {
if connectionPort != item.Port {
if connectionPort != item.SSH.Port {
continue
}
vm, err := machineProvider.LoadVMByName(item.Name)
state, err := machineProvider.State(item, false)
if err != nil {
return "", fmt.Errorf("loading machine: %w", err)
return "", err
}
info, err := vm.Inspect()
if err != nil {
return "", fmt.Errorf("inspecting machine: %w", err)
if state != define.Running {
return "", fmt.Errorf("machine %s is not running but in state %s", item.Name, state)
}
if info.State != define.Running {
return "", fmt.Errorf("machine %s is not running but in state %s", item.Name, info.State)
}
if machineProvider.VMType() == define.WSLVirt || machineProvider.VMType() == define.HyperVVirt {
if info.ConnectionInfo.PodmanPipe == nil {
return "", errors.New("pipe of machine is not set")
}
return strings.Replace(info.ConnectionInfo.PodmanPipe.Path, `\\.\pipe\`, "npipe:////./pipe/", 1), nil
}
if info.ConnectionInfo.PodmanSocket == nil {
return "", errors.New("socket of machine is not set")
}
return "unix://" + info.ConnectionInfo.PodmanSocket.Path, nil
// TODO This needs to be wired back in when all providers are complete
// TODO Need someoone to plumb in the connection information below
// if machineProvider.VMType() == define.WSLVirt || machineProvider.VMType() == define.HyperVVirt {
// if info.ConnectionInfo.PodmanPipe == nil {
// return "", errors.New("pipe of machine is not set")
// }
// return strings.Replace(info.ConnectionInfo.PodmanPipe.Path, `\\.\pipe\`, "npipe:////./pipe/", 1), nil
// }
// if info.ConnectionInfo.PodmanSocket == nil {
// return "", errors.New("socket of machine is not set")
// }
// return "unix://" + info.ConnectionInfo.PodmanSocket.Path, nil
return "", nil
}
return "", fmt.Errorf("could not find a matching machine for connection %q", connection.URI)

View File

@ -15,8 +15,10 @@ import (
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/machine"
machineDefine "github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/spf13/cobra"
"sigs.k8s.io/yaml"
"gopkg.in/yaml.v2"
)
var infoDescription = `Display information pertaining to the machine host.`
@ -89,7 +91,6 @@ func info(cmd *cobra.Command, args []string) error {
}
fmt.Println(string(b))
}
return nil
}
@ -99,13 +100,16 @@ func hostInfo() (*entities.MachineHostInfo, error) {
host.Arch = runtime.GOARCH
host.OS = runtime.GOOS
var listOpts machine.ListOptions
listResponse, err := provider.List(listOpts)
dirs, err := machine.GetMachineDirs(provider.VMType())
if err != nil {
return nil, err
}
mcs, err := vmconfigs.LoadMachinesInDir(dirs)
if err != nil {
return nil, fmt.Errorf("failed to get machines %w", err)
}
host.NumberOfMachines = len(listResponse)
host.NumberOfMachines = len(mcs)
defaultCon := ""
con, err := registry.PodmanConfig().ContainersConfDefaultsRO.GetConnection("", true)
@ -116,13 +120,18 @@ func hostInfo() (*entities.MachineHostInfo, error) {
// Default state of machine is stopped
host.MachineState = "Stopped"
for _, vm := range listResponse {
for _, vm := range mcs {
// Set default machine if found
if vm.Name == defaultCon {
host.DefaultMachine = vm.Name
}
// If machine is running or starting, it is automatically the current machine
if vm.Running {
state, err := provider.State(vm, false)
if err != nil {
return nil, err
}
if state == machineDefine.Running {
host.CurrentMachine = vm.Name
host.MachineState = "Running"
} else if vm.Starting {
@ -142,17 +151,8 @@ func hostInfo() (*entities.MachineHostInfo, error) {
host.VMType = provider.VMType().String()
dataDir, err := machine.GetDataDir(provider.VMType())
if err != nil {
return nil, fmt.Errorf("failed to get machine image dir")
}
host.MachineImageDir = dataDir
confDir, err := machine.GetConfDir(provider.VMType())
if err != nil {
return nil, fmt.Errorf("failed to get machine config dir %w", err)
}
host.MachineConfigDir = confDir
host.MachineImageDir = dirs.DataDir.GetPath()
host.MachineConfigDir = dirs.ConfigDir.GetPath()
eventsDir, err := eventSockDir()
if err != nil {

View File

@ -11,6 +11,8 @@ import (
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/shim"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/spf13/cobra"
)
@ -26,7 +28,7 @@ var (
ValidArgsFunction: completion.AutocompleteNone,
}
initOpts = machine.InitOptions{}
initOpts = define.InitOptions{}
initOptionalFlags = InitOptionalFlags{}
defaultMachineName = machine.DefaultMachineName
now bool
@ -99,7 +101,7 @@ func init() {
_ = initCmd.RegisterFlagCompletionFunc(UsernameFlagName, completion.AutocompleteDefault)
ImagePathFlagName := "image-path"
flags.StringVar(&initOpts.ImagePath, ImagePathFlagName, cfg.ContainersConfDefaultsRO.Machine.Image, "Path to bootable image")
flags.StringVar(&initOpts.ImagePath, ImagePathFlagName, "", "Path to bootable image")
_ = initCmd.RegisterFlagCompletionFunc(ImagePathFlagName, completion.AutocompleteDefault)
VolumeFlagName := "volume"
@ -128,10 +130,6 @@ func init() {
}
func initMachine(cmd *cobra.Command, args []string) error {
var (
err error
vm machine.VM
)
initOpts.Name = defaultMachineName
if len(args) > 0 {
if len(args[0]) > maxMachineNameSize {
@ -145,8 +143,15 @@ func initMachine(cmd *cobra.Command, args []string) error {
return fmt.Errorf("cannot use %q for a machine name", initOpts.Name)
}
if _, err := provider.LoadVMByName(initOpts.Name); err == nil {
return fmt.Errorf("%s: %w", initOpts.Name, machine.ErrVMAlreadyExists)
// Check if machine already exists
_, exists, err := shim.VMExists(initOpts.Name, []vmconfigs.VMProvider{provider})
if err != nil {
return err
}
// machine exists, return error
if exists {
return fmt.Errorf("%s: %w", initOpts.Name, define.ErrVMAlreadyExists)
}
// check if a system connection already exists
@ -173,20 +178,28 @@ func initMachine(cmd *cobra.Command, args []string) error {
initOpts.UserModeNetworking = &initOptionalFlags.UserModeNetworking
}
vm, err = provider.NewMachine(initOpts)
// TODO need to work this back in
// if finished, err := vm.Init(initOpts); err != nil || !finished {
// // Finished = true, err = nil - Success! Log a message with further instructions
// // Finished = false, err = nil - The installation is partially complete and podman should
// // exit gracefully with no error and no success message.
// // Examples:
// // - a user has chosen to perform their own reboot
// // - reexec for limited admin operations, returning to parent
// // Finished = *, err != nil - Exit with an error message
// return err
// }
mc, err := shim.Init(initOpts, provider)
if err != nil {
return err
}
if finished, err := vm.Init(initOpts); err != nil || !finished {
// Finished = true, err = nil - Success! Log a message with further instructions
// Finished = false, err = nil - The installation is partially complete and podman should
// exit gracefully with no error and no success message.
// Examples:
// - a user has chosen to perform their own reboot
// - reexec for limited admin operations, returning to parent
// Finished = *, err != nil - Exit with an error message
// TODO callback needed for the configuration file
if err := mc.Write(); err != nil {
return err
}
newMachineEvent(events.Init, events.Event{Name: initOpts.Name})
fmt.Println("Machine init complete")

View File

@ -10,6 +10,7 @@ import (
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/spf13/cobra"
)
@ -46,23 +47,54 @@ func inspect(cmd *cobra.Command, args []string) error {
var (
errs utils.OutputErrors
)
dirs, err := machine.GetMachineDirs(provider.VMType())
if err != nil {
return err
}
if len(args) < 1 {
args = append(args, defaultMachineName)
}
vms := make([]machine.InspectInfo, 0, len(args))
for _, vmName := range args {
vm, err := provider.LoadVMByName(vmName)
vms := make([]machine.InspectInfo, 0, len(args))
for _, name := range args {
mc, err := vmconfigs.LoadMachineByName(name, dirs)
if err != nil {
errs = append(errs, err)
continue
}
ii, err := vm.Inspect()
state, err := provider.State(mc, false)
if err != nil {
errs = append(errs, err)
continue
return err
}
vms = append(vms, *ii)
ignFile, err := mc.IgnitionFile()
if err != nil {
return err
}
ii := machine.InspectInfo{
// TODO I dont think this is useful
ConfigPath: *dirs.ConfigDir,
// TODO Fill this out
ConnectionInfo: machine.ConnectionConfig{},
Created: mc.Created,
// TODO This is no longer applicable; we dont care about the provenance
// of the image
Image: machine.ImageConfig{
IgnitionFile: *ignFile,
ImagePath: *mc.ImagePath,
},
LastUp: mc.LastUp,
Name: mc.Name,
Resources: mc.Resources,
SSHConfig: mc.SSH,
State: state,
UserModeNetworking: false,
// TODO I think this should be the HostUser
Rootful: mc.HostUser.Rootful,
}
vms = append(vms, ii)
}
switch {

View File

@ -16,6 +16,8 @@ import (
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/shim"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/docker/go-units"
"github.com/spf13/cobra"
)
@ -59,14 +61,13 @@ func init() {
func list(cmd *cobra.Command, args []string) error {
var (
opts machine.ListOptions
listResponse []*machine.ListResponse
err error
opts machine.ListOptions
err error
)
listResponse, err = provider.List(opts)
listResponse, err := shim.List([]vmconfigs.VMProvider{provider}, opts)
if err != nil {
return fmt.Errorf("listing vms: %w", err)
return err
}
// Sort by last run

View File

@ -17,6 +17,7 @@ import (
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/machine"
provider2 "github.com/containers/podman/v4/pkg/machine/provider"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/containers/podman/v4/pkg/util"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@ -39,8 +40,9 @@ var (
RunE: validate.SubCommandExists,
}
)
var (
provider machine.VirtProvider
provider vmconfigs.VMProvider
)
func init() {
@ -80,7 +82,11 @@ func getMachines(toComplete string) ([]string, cobra.ShellCompDirective) {
if err != nil {
return nil, cobra.ShellCompDirectiveNoFileComp
}
machines, err := provider.List(machine.ListOptions{})
dirs, err := machine.GetMachineDirs(provider.VMType())
if err != nil {
return nil, cobra.ShellCompDirectiveNoFileComp
}
machines, err := vmconfigs.LoadMachinesInDir(dirs)
if err != nil {
cobra.CompErrorln(err.Error())
return nil, cobra.ShellCompDirectiveNoFileComp

View File

@ -8,6 +8,7 @@ import (
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/validate"
"github.com/containers/podman/v4/pkg/machine/os"
provider2 "github.com/containers/podman/v4/pkg/machine/provider"
"github.com/spf13/cobra"
)
@ -47,7 +48,12 @@ func apply(cmd *cobra.Command, args []string) error {
CLIArgs: args,
Restart: restart,
}
osManager, err := NewOSManager(managerOpts)
provider, err := provider2.Get()
if err != nil {
return err
}
osManager, err := NewOSManager(managerOpts, provider)
if err != nil {
return err
}

View File

@ -12,6 +12,7 @@ import (
pkgMachine "github.com/containers/podman/v4/pkg/machine"
pkgOS "github.com/containers/podman/v4/pkg/machine/os"
"github.com/containers/podman/v4/pkg/machine/provider"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
)
type ManagerOpts struct {
@ -21,13 +22,13 @@ type ManagerOpts struct {
}
// NewOSManager creates a new OSManager depending on the mode of the call
func NewOSManager(opts ManagerOpts) (pkgOS.Manager, error) {
func NewOSManager(opts ManagerOpts, p vmconfigs.VMProvider) (pkgOS.Manager, error) {
// If a VM name is specified, then we know that we are not inside a
// Podman VM, but rather outside of it.
if machineconfig.IsPodmanMachine() && opts.VMName == "" {
return guestOSManager()
}
return machineOSManager(opts)
return machineOSManager(opts, p)
}
// guestOSManager returns an OSmanager for inside-VM operations
@ -42,7 +43,7 @@ func guestOSManager() (pkgOS.Manager, error) {
}
// machineOSManager returns an os manager that manages outside the VM.
func machineOSManager(opts ManagerOpts) (pkgOS.Manager, error) {
func machineOSManager(opts ManagerOpts, _ vmconfigs.VMProvider) (pkgOS.Manager, error) {
vmName := opts.VMName
if opts.VMName == "" {
vmName = pkgMachine.DefaultMachineName
@ -51,15 +52,20 @@ func machineOSManager(opts ManagerOpts) (pkgOS.Manager, error) {
if err != nil {
return nil, err
}
vm, err := p.LoadVMByName(vmName)
dirs, err := pkgMachine.GetMachineDirs(p.VMType())
if err != nil {
return nil, err
}
mc, err := vmconfigs.LoadMachineByName(vmName, dirs)
if err != nil {
return nil, err
}
return &pkgOS.MachineOS{
VM: vm,
Args: opts.CLIArgs,
VMName: vmName,
Restart: opts.Restart,
VM: mc,
Provider: p,
Args: opts.CLIArgs,
VMName: vmName,
Restart: opts.Restart,
}, nil
}

View File

@ -11,6 +11,10 @@ import (
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/shim"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -51,25 +55,56 @@ func init() {
func rm(_ *cobra.Command, args []string) error {
var (
err error
vm machine.VM
)
vmName := defaultMachineName
if len(args) > 0 && len(args[0]) > 0 {
vmName = args[0]
}
vm, err = provider.LoadVMByName(vmName)
if err != nil {
return err
}
confirmationMessage, remove, err := vm.Remove(vmName, destroyOptions)
dirs, err := machine.GetMachineDirs(provider.VMType())
if err != nil {
return err
}
mc, err := vmconfigs.LoadMachineByName(vmName, dirs)
if err != nil {
return err
}
state, err := provider.State(mc, false)
if err != nil {
return err
}
if state == define.Running {
if !destroyOptions.Force {
return &define.ErrVMRunningCannotDestroyed{Name: vmName}
}
if err := shim.Stop(mc, provider, dirs, true); err != nil {
return err
}
}
rmFiles, genericRm, err := mc.Remove(destroyOptions.SaveIgnition, destroyOptions.SaveImage)
if err != nil {
return err
}
providerFiles, providerRm, err := provider.Remove(mc)
if err != nil {
return err
}
// Add provider specific files to the list
rmFiles = append(rmFiles, providerFiles...)
// Important!
// Nothing can be removed at this point. The user can still opt out below
//
if !destroyOptions.Force {
// Warn user
fmt.Println(confirmationMessage)
confirmationMessage(rmFiles)
reader := bufio.NewReader(os.Stdin)
fmt.Print("Are you sure you want to continue? [y/N] ")
answer, err := reader.ReadString('\n')
@ -80,10 +115,27 @@ func rm(_ *cobra.Command, args []string) error {
return nil
}
}
err = remove()
if err != nil {
return err
//
// All actual removal of files and vms should occur after this
//
// TODO Should this be a hard error?
if err := providerRm(); err != nil {
logrus.Errorf("failed to remove virtual machine from provider for %q", vmName)
}
// TODO Should this be a hard error?
if err := genericRm(); err != nil {
logrus.Error("failed to remove machines files")
}
newMachineEvent(events.Remove, events.Event{Name: vmName})
return nil
}
func confirmationMessage(files []string) {
fmt.Printf("The following files will be deleted:\n\n\n")
for _, msg := range files {
fmt.Println(msg)
}
}

View File

@ -4,11 +4,12 @@ package machine
import (
"fmt"
"os"
"github.com/containers/common/pkg/completion"
"github.com/containers/common/pkg/strongunits"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/spf13/cobra"
)
@ -88,8 +89,10 @@ func init() {
func setMachine(cmd *cobra.Command, args []string) error {
var (
vm machine.VM
err error
err error
newCPUs, newMemory *uint64
newDiskSize *strongunits.GiB
newRootful *bool
)
vmName := defaultMachineName
@ -97,34 +100,50 @@ func setMachine(cmd *cobra.Command, args []string) error {
vmName = args[0]
}
vm, err = provider.LoadVMByName(vmName)
dirs, err := machine.GetMachineDirs(provider.VMType())
if err != nil {
return err
}
mc, err := vmconfigs.LoadMachineByName(vmName, dirs)
if err != nil {
return err
}
if cmd.Flags().Changed("rootful") {
setOpts.Rootful = &setFlags.Rootful
newRootful = &setFlags.Rootful
}
if cmd.Flags().Changed("cpus") {
setOpts.CPUs = &setFlags.CPUs
mc.Resources.CPUs = setFlags.CPUs
newCPUs = &mc.Resources.CPUs
}
if cmd.Flags().Changed("memory") {
setOpts.Memory = &setFlags.Memory
mc.Resources.Memory = setFlags.Memory
newMemory = &mc.Resources.Memory
}
if cmd.Flags().Changed("disk-size") {
setOpts.DiskSize = &setFlags.DiskSize
if setFlags.DiskSize <= mc.Resources.DiskSize {
return fmt.Errorf("new disk size must be larger than %d GB", mc.Resources.DiskSize)
}
mc.Resources.DiskSize = setFlags.DiskSize
newDiskSizeGB := strongunits.GiB(setFlags.DiskSize)
newDiskSize = &newDiskSizeGB
}
if cmd.Flags().Changed("user-mode-networking") {
// TODO This needs help
setOpts.UserModeNetworking = &setFlags.UserModeNetworking
}
if cmd.Flags().Changed("usb") {
// TODO This needs help
setOpts.USBs = &setFlags.USBs
}
setErrs, lasterr := vm.Set(vmName, setOpts)
for _, err := range setErrs {
fmt.Fprintf(os.Stderr, "%v\n", err)
// At this point, we have the known changed information, etc
// Walk through changes to the providers if they need them
if err := provider.SetProviderAttrs(mc, newCPUs, newMemory, newDiskSize, newRootful); err != nil {
return err
}
return lasterr
// Update the configuration file last if everything earlier worked
return mc.Write()
}

View File

@ -6,10 +6,13 @@ import (
"fmt"
"net/url"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/common/pkg/completion"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/spf13/cobra"
)
@ -42,22 +45,35 @@ func init() {
_ = sshCmd.RegisterFlagCompletionFunc(usernameFlagName, completion.AutocompleteNone)
}
// TODO Remember that this changed upstream and needs to updated as such!
func ssh(cmd *cobra.Command, args []string) error {
var (
err error
mc *vmconfigs.MachineConfig
validVM bool
vm machine.VM
)
dirs, err := machine.GetMachineDirs(provider.VMType())
if err != nil {
return err
}
// Set the VM to default
vmName := defaultMachineName
// If len is greater than 0, it means we may have been
// provided the VM name. If so, we check. The VM name,
// if provided, must be in args[0].
if len(args) > 0 {
// Ignore the error, See https://github.com/containers/podman/issues/21183#issuecomment-1879713572
validVM, _ = provider.IsValidVMName(args[0])
// note: previous incantations of this up by a specific name
// and errors were ignored. this error is not ignored because
// it implies podman cannot read its machine files, which is bad
machines, err := vmconfigs.LoadMachinesInDir(dirs)
if err != nil {
return err
}
mc, validVM = machines[args[0]]
if validVM {
vmName = args[0]
} else {
@ -75,9 +91,12 @@ func ssh(cmd *cobra.Command, args []string) error {
}
}
vm, err = provider.LoadVMByName(vmName)
if err != nil {
return fmt.Errorf("vm %s not found: %w", vmName, err)
// If the machine config was not loaded earlier, we load it now
if mc == nil {
mc, err = vmconfigs.LoadMachineByName(vmName, dirs)
if err != nil {
return fmt.Errorf("vm %s not found: %w", vmName, err)
}
}
if !validVM && sshOpts.Username == "" {
@ -87,7 +106,20 @@ func ssh(cmd *cobra.Command, args []string) error {
}
}
err = vm.SSH(vmName, sshOpts)
state, err := provider.State(mc, false)
if err != nil {
return err
}
if state != define.Running {
return fmt.Errorf("vm %q is not running", mc.Name)
}
username := sshOpts.Username
if username == "" {
username = mc.SSH.RemoteUsername
}
err = machine.CommonSSH(username, mc.SSH.IdentityPath, mc.Name, mc.SSH.Port, sshOpts.Args)
return utils.HandleOSExecError(err)
}

View File

@ -8,6 +8,10 @@ import (
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/shim"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -42,7 +46,6 @@ func init() {
func start(_ *cobra.Command, args []string) error {
var (
err error
vm machine.VM
)
startOpts.NoInfo = startOpts.Quiet || startOpts.NoInfo
@ -52,25 +55,46 @@ func start(_ *cobra.Command, args []string) error {
vmName = args[0]
}
vm, err = provider.LoadVMByName(vmName)
dirs, err := machine.GetMachineDirs(provider.VMType())
if err != nil {
return err
}
mc, err := vmconfigs.LoadMachineByName(vmName, dirs)
if err != nil {
return err
}
active, activeName, cerr := provider.CheckExclusiveActiveVM()
if cerr != nil {
return cerr
state, err := provider.State(mc, false)
if err != nil {
return err
}
if active {
if vmName == activeName {
return fmt.Errorf("cannot start VM %s: %w", vmName, machine.ErrVMAlreadyRunning)
}
return fmt.Errorf("cannot start VM %s. VM %s is currently running or starting: %w", vmName, activeName, machine.ErrMultipleActiveVM)
if state == define.Running {
return define.ErrVMAlreadyRunning
}
if err := shim.CheckExclusiveActiveVM(provider, mc); err != nil {
return err
}
if !startOpts.Quiet {
fmt.Printf("Starting machine %q\n", vmName)
}
if err := vm.Start(vmName, startOpts); err != nil {
// Set starting to true
mc.Starting = true
if err := mc.Write(); err != nil {
logrus.Error(err)
}
// Set starting to false on exit
defer func() {
mc.Starting = false
if err := mc.Write(); err != nil {
logrus.Error(err)
}
}()
if err := shim.Start(mc, provider, dirs, startOpts); err != nil {
return err
}
fmt.Printf("Machine %q started successfully\n", vmName)

View File

@ -4,10 +4,14 @@ package machine
import (
"fmt"
"time"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/shim"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -35,7 +39,6 @@ func init() {
func stop(cmd *cobra.Command, args []string) error {
var (
err error
vm machine.VM
)
vmName := defaultMachineName
@ -43,13 +46,25 @@ func stop(cmd *cobra.Command, args []string) error {
vmName = args[0]
}
vm, err = provider.LoadVMByName(vmName)
dirs, err := machine.GetMachineDirs(provider.VMType())
if err != nil {
return err
}
if err := vm.Stop(vmName, machine.StopOptions{}); err != nil {
mc, err := vmconfigs.LoadMachineByName(vmName, dirs)
if err != nil {
return err
}
if err := shim.Stop(mc, provider, dirs, false); err != nil {
return err
}
// Update last time up
mc.LastUp = time.Now()
if err := mc.Write(); err != nil {
logrus.Errorf("unable to write configuration file: %q", err)
}
fmt.Printf("Machine %q stopped successfully\n", vmName)
newMachineEvent(events.Stop, events.Event{Name: vmName})
return nil

View File

@ -3,7 +3,14 @@
package system
import (
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/connection"
"github.com/containers/podman/v4/pkg/machine/define"
p "github.com/containers/podman/v4/pkg/machine/provider"
"github.com/containers/podman/v4/pkg/machine/shim"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/containers/podman/v4/utils"
"github.com/sirupsen/logrus"
)
func resetMachine() error {
@ -11,5 +18,58 @@ func resetMachine() error {
if err != nil {
return err
}
return provider.RemoveAndCleanMachines()
dirs, err := machine.GetMachineDirs(provider.VMType())
if err != nil {
return err
}
mcs, err := vmconfigs.LoadMachinesInDir(dirs)
if err != nil {
// Note: the reason we might be cleaning is because a JSON file is messed
// up and is unreadable. This should not be fatal. Keep going ...
logrus.Errorf("unable to load machines: %q", err)
}
for _, mc := range mcs {
state, err := provider.State(mc, false)
if err != nil {
logrus.Errorf("unable to determine state of %s: %q", mc.Name, err)
}
if state == define.Running {
if err := shim.Stop(mc, provider, dirs, true); err != nil {
logrus.Errorf("unable to stop running machine %s: %q", mc.Name, err)
}
}
if err := connection.RemoveConnections(mc.Name, mc.Name+"-root"); err != nil {
logrus.Error(err)
}
// the thinking here is that the we dont need to remove machine specific files because
// we will nuke them all at the end of this. Just do what provider needs
_, providerRm, err := provider.Remove(mc)
if err != nil {
logrus.Errorf("unable to prepare provider machine removal: %q", err)
}
if err := providerRm(); err != nil {
logrus.Errorf("unable remove machine %s from provider: %q", mc.Name, err)
}
}
if err := utils.GuardedRemoveAll(dirs.DataDir.GetPath()); err != nil {
logrus.Errorf("unable to remove machine data dir %q: %q", dirs.DataDir.GetPath(), err)
}
if err := utils.GuardedRemoveAll(dirs.RuntimeDir.GetPath()); err != nil {
logrus.Errorf("unable to remove machine runtime dir %q: %q", dirs.RuntimeDir.GetPath(), err)
}
if err := utils.GuardedRemoveAll(dirs.ConfigDir.GetPath()); err != nil {
logrus.Errorf("unable to remove machine config dir %q: %q", dirs.ConfigDir.GetPath(), err)
}
// Just in case a provider needs something general done
return provider.RemoveAndCleanMachines(dirs)
}

2
go.mod
View File

@ -74,6 +74,7 @@ require (
golang.org/x/text v0.14.0
google.golang.org/protobuf v1.32.0
gopkg.in/inf.v0 v0.9.1
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/kubernetes v1.28.4
sigs.k8s.io/yaml v1.4.0
@ -216,7 +217,6 @@ require (
google.golang.org/grpc v1.59.0 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
tags.cncf.io/container-device-interface/specs-go v0.6.0 // indirect
)

View File

@ -1,83 +0,0 @@
//go:build darwin
package applehv
import (
"fmt"
"io"
"io/fs"
"net"
"os"
"os/user"
"path/filepath"
"time"
)
// TODO the following functions were taken from pkg/qemu/claim_darwin.go and
// should be refactored. I'm thinking even something in pkg/machine/
func dockerClaimSupported() bool {
return true
}
func dockerClaimHelperInstalled() bool {
u, err := user.Current()
if err != nil {
return false
}
labelName := fmt.Sprintf("com.github.containers.podman.helper-%s", u.Username)
fileName := filepath.Join("/Library", "LaunchDaemons", labelName+".plist")
info, err := os.Stat(fileName)
return err == nil && info.Mode().IsRegular()
}
func claimDockerSock() bool {
u, err := user.Current()
if err != nil {
return false
}
helperSock := fmt.Sprintf("/var/run/podman-helper-%s.socket", u.Username)
con, err := net.DialTimeout("unix", helperSock, time.Second*5)
if err != nil {
return false
}
_ = con.SetWriteDeadline(time.Now().Add(time.Second * 5))
_, err = fmt.Fprintln(con, "GO")
if err != nil {
return false
}
_ = con.SetReadDeadline(time.Now().Add(time.Second * 5))
read, err := io.ReadAll(con)
return err == nil && string(read) == "OK"
}
func findClaimHelper() string {
exe, err := os.Executable()
if err != nil {
return ""
}
exe, err = filepath.EvalSymlinks(exe)
if err != nil {
return ""
}
return filepath.Join(filepath.Dir(exe), "podman-mac-helper")
}
func checkSockInUse(sock string) bool {
if info, err := os.Stat(sock); err == nil && info.Mode()&fs.ModeSocket == fs.ModeSocket {
_, err = net.DialTimeout("unix", dockerSock, dockerConnectTimeout)
return err == nil
}
return false
}
func alreadyLinked(target string, link string) bool {
read, err := os.Readlink(link)
return err == nil && read == target
}

View File

@ -2,200 +2,7 @@
package applehv
import (
"errors"
"fmt"
"io/fs"
"path/filepath"
"time"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/compression"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/ignition"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
vfConfig "github.com/crc-org/vfkit/pkg/config"
"github.com/docker/go-units"
"golang.org/x/sys/unix"
)
const (
localhostURI = "http://localhost"
ignitionSocketName = "ignition.sock"
)
type AppleHVVirtualization struct {
machine.Virtualization
}
type MMHardwareConfig struct {
CPUs uint16
DiskPath string
DiskSize uint64
Memory int32
}
func VirtualizationProvider() machine.VirtProvider {
return &AppleHVVirtualization{
machine.NewVirtualization(define.AppleHV, compression.Xz, define.Raw, vmtype),
}
}
func (v AppleHVVirtualization) CheckExclusiveActiveVM() (bool, string, error) {
fsVms, err := getVMInfos()
if err != nil {
return false, "", err
}
for _, vm := range fsVms {
if vm.Running || vm.Starting {
return true, vm.Name, nil
}
}
return false, "", nil
}
func (v AppleHVVirtualization) IsValidVMName(name string) (bool, error) {
configDir, err := machine.GetConfDir(define.AppleHvVirt)
if err != nil {
return false, err
}
fqName := filepath.Join(configDir, fmt.Sprintf("%s.json", name))
if _, err := loadMacMachineFromJSON(fqName); err != nil {
return false, err
}
return true, nil
}
func (v AppleHVVirtualization) List(opts machine.ListOptions) ([]*machine.ListResponse, error) {
var (
response []*machine.ListResponse
)
mms, err := v.loadFromLocalJson()
if err != nil {
return nil, err
}
for _, mm := range mms {
vmState, err := mm.Vfkit.State()
if err != nil {
if errors.Is(err, unix.ECONNREFUSED) {
vmState = define.Stopped
} else {
return nil, err
}
}
mlr := machine.ListResponse{
Name: mm.Name,
CreatedAt: mm.Created,
LastUp: mm.LastUp,
Running: vmState == define.Running,
Starting: vmState == define.Starting,
Stream: mm.ImageStream,
VMType: define.AppleHvVirt.String(),
CPUs: mm.CPUs,
Memory: mm.Memory * units.MiB,
DiskSize: mm.DiskSize * units.GiB,
Port: mm.Port,
RemoteUsername: mm.RemoteUsername,
IdentityPath: mm.IdentityPath,
}
response = append(response, &mlr)
}
return response, nil
}
func (v AppleHVVirtualization) LoadVMByName(name string) (machine.VM, error) {
m := MacMachine{Name: name}
return m.loadFromFile()
}
func (v AppleHVVirtualization) NewMachine(opts machine.InitOptions) (machine.VM, error) {
m := MacMachine{Name: opts.Name}
if len(opts.USBs) > 0 {
return nil, fmt.Errorf("USB host passthrough is not supported for applehv machines")
}
configDir, err := machine.GetConfDir(define.AppleHvVirt)
if err != nil {
return nil, err
}
configPath, err := define.NewMachineFile(getVMConfigPath(configDir, opts.Name), nil)
if err != nil {
return nil, err
}
m.ConfigPath = *configPath
dataDir, err := machine.GetDataDir(define.AppleHvVirt)
if err != nil {
return nil, err
}
if err := ignition.SetIgnitionFile(&m.IgnitionFile, vmtype, m.Name, configDir); err != nil {
return nil, err
}
// Set creation time
m.Created = time.Now()
m.ResourceConfig = vmconfigs.ResourceConfig{
CPUs: opts.CPUS,
DiskSize: opts.DiskSize,
// Diskpath will be needed
Memory: opts.Memory,
}
bl := vfConfig.NewEFIBootloader(fmt.Sprintf("%s/%ss", dataDir, opts.Name), true)
m.Vfkit.VirtualMachine = vfConfig.NewVirtualMachine(uint(opts.CPUS), opts.Memory, bl)
if err := m.writeConfig(); err != nil {
return nil, err
}
return m.loadFromFile()
}
func (v AppleHVVirtualization) RemoveAndCleanMachines() error {
// This can be implemented when host networking is completed.
return machine.ErrNotImplemented
}
func (v AppleHVVirtualization) VMType() define.VMType {
return vmtype
}
func (v AppleHVVirtualization) loadFromLocalJson() ([]*MacMachine, error) {
var (
jsonFiles []string
mms []*MacMachine
)
configDir, err := machine.GetConfDir(v.VMType())
if err != nil {
return nil, err
}
if err := filepath.WalkDir(configDir, func(input string, d fs.DirEntry, e error) error {
if e != nil {
return e
}
if filepath.Ext(d.Name()) == ".json" {
jsonFiles = append(jsonFiles, input)
}
return nil
}); err != nil {
return nil, err
}
for _, jsonFile := range jsonFiles {
mm, err := loadMacMachineFromJSON(jsonFile)
if err != nil {
return nil, err
}
if err != nil {
return nil, err
}
mms = append(mms, mm)
}
return mms, nil
}

View File

@ -7,14 +7,20 @@ import (
"net/http"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/sirupsen/logrus"
)
// serveIgnitionOverSock allows podman to open a small httpd instance on the vsock between the host
// and guest to inject the ignitionfile into fcos
func (m *MacMachine) serveIgnitionOverSock(ignitionSocket *define.VMFile) error {
logrus.Debugf("reading ignition file: %s", m.IgnitionFile.GetPath())
ignFile, err := m.IgnitionFile.Read()
func serveIgnitionOverSock(ignitionSocket *define.VMFile, mc *vmconfigs.MachineConfig) error {
ignitionFile, err := mc.IgnitionFile()
if err != nil {
return err
}
logrus.Debugf("reading ignition file: %s", ignitionFile.GetPath())
ignFile, err := ignitionFile.Read()
if err != nil {
return err
}
@ -22,7 +28,7 @@ func (m *MacMachine) serveIgnitionOverSock(ignitionSocket *define.VMFile) error
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
_, err := w.Write(ignFile)
if err != nil {
logrus.Error("failed to serve ignition file: %v", err)
logrus.Errorf("failed to serve ignition file: %v", err)
}
})
listener, err := net.Listen("unix", ignitionSocket.GetPath())

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,325 @@
//go:build darwin
package applehv
import (
"context"
"fmt"
"net"
"strconv"
"time"
"github.com/containers/common/pkg/config"
"github.com/containers/common/pkg/strongunits"
gvproxy "github.com/containers/gvisor-tap-vsock/pkg/types"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/applehv/vfkit"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/ignition"
"github.com/containers/podman/v4/pkg/machine/sockets"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/containers/podman/v4/utils"
vfConfig "github.com/crc-org/vfkit/pkg/config"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
// applehcMACAddress is a pre-defined mac address that vfkit recognizes
// and is required for network flow
const applehvMACAddress = "5a:94:ef:e4:0c:ee"
var (
vfkitCommand = "vfkit"
gvProxyWaitBackoff = 500 * time.Millisecond
gvProxyMaxBackoffAttempts = 6
)
type AppleHVStubber struct {
vmconfigs.AppleHVConfig
}
func (a AppleHVStubber) CreateVM(opts define.CreateVMOpts, mc *vmconfigs.MachineConfig, ignBuilder *ignition.IgnitionBuilder) error {
mc.AppleHypervisor = new(vmconfigs.AppleHVConfig)
mc.AppleHypervisor.Vfkit = vfkit.VfkitHelper{}
bl := vfConfig.NewEFIBootloader(fmt.Sprintf("%s/efi-bl-%s", opts.Dirs.DataDir.GetPath(), opts.Name), true)
mc.AppleHypervisor.Vfkit.VirtualMachine = vfConfig.NewVirtualMachine(uint(mc.Resources.CPUs), mc.Resources.Memory, bl)
randPort, err := utils.GetRandomPort()
if err != nil {
return err
}
mc.AppleHypervisor.Vfkit.Endpoint = localhostURI + ":" + strconv.Itoa(randPort)
var virtiofsMounts []machine.VirtIoFs
for _, mnt := range mc.Mounts {
virtiofsMounts = append(virtiofsMounts, machine.MountToVirtIOFs(mnt))
}
// Populate the ignition file with virtiofs stuff
ignBuilder.WithUnit(generateSystemDFilesForVirtiofsMounts(virtiofsMounts)...)
return resizeDisk(mc, strongunits.GiB(mc.Resources.DiskSize))
}
func (a AppleHVStubber) GetHyperVisorVMs() ([]string, error) {
// not applicable for applehv
return nil, nil
}
func (a AppleHVStubber) MountType() vmconfigs.VolumeMountType {
return vmconfigs.VirtIOFS
}
func (a AppleHVStubber) MountVolumesToVM(_ *vmconfigs.MachineConfig, _ bool) error {
// virtiofs: nothing to do here
return nil
}
func (a AppleHVStubber) RemoveAndCleanMachines(_ *define.MachineDirs) error {
return nil
}
func (a AppleHVStubber) SetProviderAttrs(mc *vmconfigs.MachineConfig, cpus, memory *uint64, newDiskSize *strongunits.GiB, newRootful *bool) error {
if newDiskSize != nil {
if err := resizeDisk(mc, *newDiskSize); err != nil {
return err
}
}
if newRootful != nil && mc.HostUser.Rootful != *newRootful {
if err := mc.SetRootful(*newRootful); err != nil {
return err
}
}
// VFKit does not require saving memory, disk, or cpu
return nil
}
func (a AppleHVStubber) StartNetworking(mc *vmconfigs.MachineConfig, cmd *gvproxy.GvproxyCommand) error {
gvProxySock, err := mc.GVProxySocket()
if err != nil {
return err
}
// make sure it does not exist before gvproxy is called
if err := gvProxySock.Delete(); err != nil {
logrus.Error(err)
}
cmd.AddVfkitSocket(fmt.Sprintf("unixgram://%s", gvProxySock.GetPath()))
return nil
}
func (a AppleHVStubber) StartVM(mc *vmconfigs.MachineConfig) (func() error, func() error, error) {
var (
ignitionSocket *define.VMFile
)
if bl := mc.AppleHypervisor.Vfkit.VirtualMachine.Bootloader; bl == nil {
return nil, nil, fmt.Errorf("unable to determine boot loader for this machine")
}
// Add networking
netDevice, err := vfConfig.VirtioNetNew(applehvMACAddress)
if err != nil {
return nil, nil, err
}
// Set user networking with gvproxy
gvproxySocket, err := mc.GVProxySocket()
if err != nil {
return nil, nil, err
}
// Wait on gvproxy to be running and aware
if err := waitForGvProxy(gvproxySocket); err != nil {
return nil, nil, err
}
netDevice.SetUnixSocketPath(gvproxySocket.GetPath())
readySocket, err := mc.ReadySocket()
if err != nil {
return nil, nil, err
}
logfile, err := mc.LogFile()
if err != nil {
return nil, nil, err
}
// create a one-time virtual machine for starting because we dont want all this information in the
// machineconfig if possible. the preference was to derive this stuff
vm := vfConfig.NewVirtualMachine(uint(mc.Resources.CPUs), mc.Resources.Memory, mc.AppleHypervisor.Vfkit.VirtualMachine.Bootloader)
defaultDevices, err := getDefaultDevices(mc.ImagePath.GetPath(), logfile.GetPath(), readySocket.GetPath())
if err != nil {
return nil, nil, err
}
vm.Devices = append(vm.Devices, defaultDevices...)
vm.Devices = append(vm.Devices, netDevice)
mounts, err := virtIOFsToVFKitVirtIODevice(mc.Mounts)
if err != nil {
return nil, nil, err
}
vm.Devices = append(vm.Devices, mounts...)
// To start the VM, we need to call vfkit
cfg, err := config.Default()
if err != nil {
return nil, nil, err
}
vfkitBinaryPath, err := cfg.FindHelperBinary(vfkitCommand, true)
if err != nil {
return nil, nil, err
}
logrus.Debugf("vfkit path is: %s", vfkitBinaryPath)
cmd, err := vm.Cmd(vfkitBinaryPath)
if err != nil {
return nil, nil, err
}
vfkitEndpointArgs, err := getVfKitEndpointCMDArgs(mc.AppleHypervisor.Vfkit.Endpoint)
if err != nil {
return nil, nil, err
}
machineDataDir, err := mc.DataDir()
if err != nil {
return nil, nil, err
}
cmd.Args = append(cmd.Args, vfkitEndpointArgs...)
firstBoot, err := mc.IsFirstBoot()
if err != nil {
return nil, nil, err
}
if logrus.IsLevelEnabled(logrus.DebugLevel) {
debugDevArgs, err := getDebugDevicesCMDArgs()
if err != nil {
return nil, nil, err
}
cmd.Args = append(cmd.Args, debugDevArgs...)
cmd.Args = append(cmd.Args, "--gui") // add command line switch to pop the gui open
}
if firstBoot {
// If this is the first boot of the vm, we need to add the vsock
// device to vfkit so we can inject the ignition file
socketName := fmt.Sprintf("%s-%s", mc.Name, ignitionSocketName)
ignitionSocket, err = machineDataDir.AppendToNewVMFile(socketName, &socketName)
if err != nil {
return nil, nil, err
}
if err := ignitionSocket.Delete(); err != nil {
logrus.Errorf("unable to delete ignition socket: %q", err)
}
ignitionVsockDeviceCLI, err := getIgnitionVsockDeviceAsCLI(ignitionSocket.GetPath())
if err != nil {
return nil, nil, err
}
cmd.Args = append(cmd.Args, ignitionVsockDeviceCLI...)
logrus.Debug("first boot detected")
logrus.Debugf("serving ignition file over %s", ignitionSocket.GetPath())
go func() {
if err := serveIgnitionOverSock(ignitionSocket, mc); err != nil {
logrus.Error(err)
}
logrus.Debug("ignition vsock server exited")
}()
}
logrus.Debugf("listening for ready on: %s", readySocket.GetPath())
if err := readySocket.Delete(); err != nil {
logrus.Warnf("unable to delete previous ready socket: %q", err)
}
readyListen, err := net.Listen("unix", readySocket.GetPath())
if err != nil {
return nil, nil, err
}
logrus.Debug("waiting for ready notification")
readyChan := make(chan error)
go sockets.ListenAndWaitOnSocket(readyChan, readyListen)
logrus.Debugf("vfkit command-line: %v", cmd.Args)
if err := cmd.Start(); err != nil {
return nil, nil, err
}
returnFunc := func() error {
processErrChan := make(chan error)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
defer close(processErrChan)
for {
select {
case <-ctx.Done():
return
default:
}
if err := checkProcessRunning("vfkit", cmd.Process.Pid); err != nil {
processErrChan <- err
return
}
// lets poll status every half second
time.Sleep(500 * time.Millisecond)
}
}()
// wait for either socket or to be ready or process to have exited
select {
case err := <-processErrChan:
if err != nil {
return err
}
case err := <-readyChan:
if err != nil {
return err
}
logrus.Debug("ready notification received")
}
return nil
}
return cmd.Process.Release, returnFunc, nil
}
func (a AppleHVStubber) StopHostNetworking(_ *vmconfigs.MachineConfig, _ define.VMType) error {
return nil
}
func (a AppleHVStubber) VMType() define.VMType {
return define.AppleHvVirt
}
func waitForGvProxy(gvproxySocket *define.VMFile) error {
backoffWait := gvProxyWaitBackoff
logrus.Debug("checking that gvproxy is running")
for i := 0; i < gvProxyMaxBackoffAttempts; i++ {
err := unix.Access(gvproxySocket.GetPath(), unix.W_OK)
if err == nil {
return nil
}
time.Sleep(backoffWait)
backoffWait *= 2
}
return fmt.Errorf("unable to connect to gvproxy %q", gvproxySocket.GetPath())
}
func (a AppleHVStubber) PrepareIgnition(_ *vmconfigs.MachineConfig, _ *ignition.IgnitionBuilder) (*ignition.ReadyUnitOpts, error) {
return nil, nil
}
func (a AppleHVStubber) PostStartNetworking(mc *vmconfigs.MachineConfig) error {
return nil
}

View File

@ -3,10 +3,11 @@
package applehv
import (
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
vfConfig "github.com/crc-org/vfkit/pkg/config"
)
// TODO this signature could be an machineconfig
func getDefaultDevices(imagePath, logPath, readyPath string) ([]vfConfig.VirtioDevice, error) {
var devices []vfConfig.VirtioDevice
@ -53,11 +54,14 @@ func getIgnitionVsockDevice(path string) (vfConfig.VirtioDevice, error) {
return vfConfig.VirtioVsockNew(1024, path, true)
}
func VirtIOFsToVFKitVirtIODevice(fs machine.VirtIoFs) vfConfig.VirtioFs {
return vfConfig.VirtioFs{
DirectorySharingConfig: vfConfig.DirectorySharingConfig{
MountTag: fs.Tag,
},
SharedDir: fs.Source,
func virtIOFsToVFKitVirtIODevice(mounts []*vmconfigs.Mount) ([]vfConfig.VirtioDevice, error) {
var virtioDevices []vfConfig.VirtioDevice
for _, vol := range mounts {
virtfsDevice, err := vfConfig.VirtioFsNew(vol.Source, vol.Tag)
if err != nil {
return nil, err
}
virtioDevices = append(virtioDevices, virtfsDevice)
}
return virtioDevices, nil
}

View File

@ -57,6 +57,9 @@ func (vf *VfkitHelper) getRawState() (define.Status, error) {
if err != nil {
return "", err
}
if err := serverResponse.Body.Close(); err != nil {
logrus.Error(err)
}
return ToMachineStatus(response.State)
}
@ -66,7 +69,7 @@ func (vf *VfkitHelper) getRawState() (define.Status, error) {
func (vf *VfkitHelper) State() (define.Status, error) {
vmState, err := vf.getRawState()
if err == nil {
return vmState, err
return vmState, nil
}
if errors.Is(err, unix.ECONNREFUSED) {
return define.Stopped, nil
@ -107,7 +110,7 @@ func (vf *VfkitHelper) Stop(force, wait bool) error {
waitErr = nil
break
}
waitDuration = waitDuration * 2
waitDuration *= 2
logrus.Debugf("backoff wait time: %s", waitDuration.String())
time.Sleep(waitDuration)
}

View File

@ -0,0 +1,117 @@
package compression
import (
"bytes"
"io"
"os"
)
// TODO vendor this in ... pkg/os directory is small and code should be negligible
/*
NOTE: copy.go and copy.test were lifted from github.com/crc-org/crc because
i was having trouble getting go to vendor it properly. all credit to them
*/
func copyFile(src, dst string, sparse bool) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
defer out.Close()
if sparse {
if _, err = CopySparse(out, in); err != nil {
return err
}
} else {
if _, err = io.Copy(out, in); err != nil {
return err
}
}
fi, err := os.Stat(src)
if err != nil {
return err
}
if err = os.Chmod(dst, fi.Mode()); err != nil {
return err
}
return out.Close()
}
func CopyFile(src, dst string) error {
return copyFile(src, dst, false)
}
func CopyFileSparse(src, dst string) error {
return copyFile(src, dst, true)
}
func CopySparse(dst io.WriteSeeker, src io.Reader) (int64, error) {
copyBuf := make([]byte, copyChunkSize)
sparseWriter := newSparseWriter(dst)
bytesWritten, err := io.CopyBuffer(sparseWriter, src, copyBuf)
if err != nil {
return bytesWritten, err
}
err = sparseWriter.Close()
return bytesWritten, err
}
type sparseWriter struct {
writer io.WriteSeeker
lastChunkSparse bool
}
func newSparseWriter(writer io.WriteSeeker) *sparseWriter {
return &sparseWriter{writer: writer}
}
const copyChunkSize = 4096
var emptyChunk = make([]byte, copyChunkSize)
func isEmptyChunk(p []byte) bool {
// HasPrefix instead of bytes.Equal in order to handle the last chunk
// of the file, which may be shorter than len(emptyChunk), and would
// fail bytes.Equal()
return bytes.HasPrefix(emptyChunk, p)
}
func (w *sparseWriter) Write(p []byte) (n int, err error) {
if isEmptyChunk(p) {
offset, err := w.writer.Seek(int64(len(p)), io.SeekCurrent)
if err != nil {
w.lastChunkSparse = false
return 0, err
}
_ = offset
w.lastChunkSparse = true
return len(p), nil
}
w.lastChunkSparse = false
return w.writer.Write(p)
}
func (w *sparseWriter) Close() error {
if w.lastChunkSparse {
if _, err := w.writer.Seek(-1, io.SeekCurrent); err != nil {
return err
}
if _, err := w.writer.Write([]byte{0}); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,52 @@
package compression
import (
"os"
"path/filepath"
"testing"
)
func TestCopyFile(t *testing.T) {
testStr := "test-machine"
srcFile, err := os.CreateTemp("", "machine-test-")
if err != nil {
t.Fatal(err)
}
srcFi, err := srcFile.Stat()
if err != nil {
t.Fatal(err)
}
_, _ = srcFile.Write([]byte(testStr)) //nolint:mirror
srcFile.Close()
srcFilePath := filepath.Join(os.TempDir(), srcFi.Name())
destFile, err := os.CreateTemp("", "machine-copy-test-")
if err != nil {
t.Fatal(err)
}
destFi, err := destFile.Stat()
if err != nil {
t.Fatal(err)
}
destFile.Close()
destFilePath := filepath.Join(os.TempDir(), destFi.Name())
if err := CopyFile(srcFilePath, destFilePath); err != nil {
t.Fatal(err)
}
data, err := os.ReadFile(destFilePath)
if err != nil {
t.Fatal(err)
}
if string(data) != testStr {
t.Fatalf("expected data \"%s\"; received \"%s\"", testStr, string(data))
}
}

View File

@ -3,7 +3,9 @@ package compression
import (
"archive/zip"
"bufio"
"compress/gzip"
"errors"
"fmt"
"io"
"os"
"os/exec"
@ -19,12 +21,20 @@ import (
"github.com/ulikunitz/xz"
)
// Decompress is a generic wrapper for various decompression algos
// TODO this needs some love. in the various decompression functions that are
// called, the same uncompressed path is being opened multiple times.
func Decompress(localPath *define.VMFile, uncompressedPath string) error {
var isZip bool
uncompressedFileWriter, err := os.OpenFile(uncompressedPath, os.O_CREATE|os.O_RDWR, 0600)
if err != nil {
return err
}
defer func() {
if err := uncompressedFileWriter.Close(); err != nil {
logrus.Errorf("unable to to close decompressed file %s: %q", uncompressedPath, err)
}
}()
sourceFile, err := localPath.Read()
if err != nil {
return err
@ -32,19 +42,50 @@ func Decompress(localPath *define.VMFile, uncompressedPath string) error {
if strings.HasSuffix(localPath.GetPath(), ".zip") {
isZip = true
}
prefix := "Copying uncompressed file"
compressionType := archive.DetectCompression(sourceFile)
if compressionType != archive.Uncompressed || isZip {
prefix = "Extracting compressed file"
}
prefix := "Extracting compressed file"
prefix += ": " + filepath.Base(uncompressedPath)
if compressionType == archive.Xz {
switch compressionType {
case archive.Xz:
return decompressXZ(prefix, localPath.GetPath(), uncompressedFileWriter)
case archive.Uncompressed:
if isZip && runtime.GOOS == "windows" {
return decompressZip(prefix, localPath.GetPath(), uncompressedFileWriter)
}
// here we should just do a copy
dstFile, err := os.Open(localPath.GetPath())
if err != nil {
return err
}
fmt.Printf("Copying uncompressed file %q to %q/n", localPath.GetPath(), dstFile.Name())
_, err = CopySparse(uncompressedFileWriter, dstFile)
return err
case archive.Gzip:
if runtime.GOOS == "darwin" {
return decompressGzWithSparse(prefix, localPath, uncompressedPath)
}
fallthrough
default:
return decompressEverythingElse(prefix, localPath.GetPath(), uncompressedFileWriter)
}
if isZip && runtime.GOOS == "windows" {
return decompressZip(prefix, localPath.GetPath(), uncompressedFileWriter)
}
return decompressEverythingElse(prefix, localPath.GetPath(), uncompressedFileWriter)
// if compressionType != archive.Uncompressed || isZip {
// prefix = "Extracting compressed file"
// }
// prefix += ": " + filepath.Base(uncompressedPath)
// if compressionType == archive.Xz {
// return decompressXZ(prefix, localPath.GetPath(), uncompressedFileWriter)
// }
// if isZip && runtime.GOOS == "windows" {
// return decompressZip(prefix, localPath.GetPath(), uncompressedFileWriter)
// }
// Unfortunately GZ is not sparse capable. Lets handle it differently
// if compressionType == archive.Gzip && runtime.GOOS == "darwin" {
// return decompressGzWithSparse(prefix, localPath, uncompressedPath)
// }
// return decompressEverythingElse(prefix, localPath.GetPath(), uncompressedFileWriter)
}
// Will error out if file without .Xz already exists
@ -182,3 +223,56 @@ func decompressZip(prefix string, src string, output io.WriteCloser) error {
p.Wait()
return err
}
func decompressGzWithSparse(prefix string, compressedPath *define.VMFile, uncompressedPath string) error {
stat, err := os.Stat(compressedPath.GetPath())
if err != nil {
return err
}
dstFile, err := os.OpenFile(uncompressedPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, stat.Mode())
if err != nil {
return err
}
defer func() {
if err := dstFile.Close(); err != nil {
logrus.Errorf("unable to close uncompressed file %s: %q", uncompressedPath, err)
}
}()
f, err := os.Open(compressedPath.GetPath())
if err != nil {
return err
}
defer func() {
if err := f.Close(); err != nil {
logrus.Errorf("unable to close on compressed file %s: %q", compressedPath.GetPath(), err)
}
}()
gzReader, err := gzip.NewReader(f)
if err != nil {
return err
}
defer func() {
if err := gzReader.Close(); err != nil {
logrus.Errorf("unable to close gzreader: %q", err)
}
}()
// TODO remove the following line when progress bars work
_ = prefix
// p, bar := utils.ProgressBar(prefix, stat.Size(), prefix+": done")
// proxyReader := bar.ProxyReader(f)
// defer func() {
// if err := proxyReader.Close(); err != nil {
// logrus.Error(err)
// }
// }()
logrus.Debugf("decompressing %s", compressedPath.GetPath())
_, err = CopySparse(dstFile, gzReader)
logrus.Debug("decompression complete")
// p.Wait()
return err
}

View File

@ -22,36 +22,14 @@ import (
"github.com/sirupsen/logrus"
)
type InitOptions struct {
CPUS uint64
DiskSize uint64
IgnitionPath string
ImagePath string
Volumes []string
VolumeDriver string
IsDefault bool
Memory uint64
Name string
TimeZone string
URI url.URL
Username string
ReExec bool
Rootful bool
UID string // uid of the user that called machine
UserModeNetworking *bool // nil = use backend/system default, false = disable, true = enable
USBs []string
}
const (
DefaultMachineName string = "podman-machine-default"
apiUpTimeout = 20 * time.Second
)
type RemoteConnectionType string
var (
SSHRemoteConnection RemoteConnectionType = "ssh"
ForwarderBinaryName = "gvproxy"
DefaultIgnitionUserName = "core"
ForwarderBinaryName = "gvproxy"
)
type Download struct {
@ -120,7 +98,7 @@ type RemoveOptions struct {
type InspectOptions struct{}
type VM interface {
Init(opts InitOptions) (bool, error)
Init(opts define.InitOptions) (bool, error)
Inspect() (*InspectInfo, error)
Remove(name string, opts RemoveOptions) (string, func() error, error)
Set(name string, opts SetOptions) ([]error, error)
@ -130,24 +108,6 @@ type VM interface {
Stop(name string, opts StopOptions) error
}
func GetLock(name string, vmtype define.VMType) (*lockfile.LockFile, error) {
// FIXME: there's a painful amount of `GetConfDir` calls scattered
// across the code base. This should be done once and stored
// somewhere instead.
vmConfigDir, err := GetConfDir(vmtype)
if err != nil {
return nil, err
}
lockPath := filepath.Join(vmConfigDir, name+".lock")
lock, err := lockfile.GetLockFile(lockPath)
if err != nil {
return nil, fmt.Errorf("creating lockfile for VM: %w", err)
}
return lock, nil
}
type DistributionDownload interface {
HasUsableCache() (bool, error)
Get() *Download
@ -167,26 +127,6 @@ type InspectInfo struct {
Rootful bool
}
func (rc RemoteConnectionType) MakeSSHURL(host, path, port, userName string) url.URL {
// TODO Should this function have input verification?
userInfo := url.User(userName)
uri := url.URL{
Scheme: "ssh",
Opaque: "",
User: userInfo,
Host: host,
Path: path,
RawPath: "",
ForceQuery: false,
RawQuery: "",
Fragment: "",
}
if len(port) > 0 {
uri.Host = net.JoinHostPort(uri.Hostname(), port)
}
return uri
}
// GetCacheDir returns the dir where VM images are downloaded into when pulled
func GetCacheDir(vmType define.VMType) (string, error) {
dataDir, err := GetDataDir(vmType)
@ -226,6 +166,55 @@ func GetGlobalDataDir() (string, error) {
return dataDir, os.MkdirAll(dataDir, 0755)
}
func GetMachineDirs(vmType define.VMType) (*define.MachineDirs, error) {
rtDir, err := getRuntimeDir()
if err != nil {
return nil, err
}
rtDir = filepath.Join(rtDir, "podman")
configDir, err := GetConfDir(vmType)
if err != nil {
return nil, err
}
configDirFile, err := define.NewMachineFile(configDir, nil)
if err != nil {
return nil, err
}
dataDir, err := GetDataDir(vmType)
if err != nil {
return nil, err
}
dataDirFile, err := define.NewMachineFile(dataDir, nil)
if err != nil {
return nil, err
}
rtDirFile, err := define.NewMachineFile(rtDir, nil)
if err != nil {
return nil, err
}
dirs := define.MachineDirs{
ConfigDir: configDirFile,
DataDir: dataDirFile,
RuntimeDir: rtDirFile,
}
// make sure all machine dirs are present
if err := os.MkdirAll(rtDir, 0755); err != nil {
return nil, err
}
if err := os.MkdirAll(configDir, 0755); err != nil {
return nil, err
}
err = os.MkdirAll(dataDir, 0755)
return &dirs, err
}
// DataDirPrefix returns the path prefix for all machine data files
func DataDirPrefix() (string, error) {
data, err := homedir.GetDataHome()
@ -299,20 +288,6 @@ const (
DockerGlobal
)
type VirtProvider interface { //nolint:interfacebloat
Artifact() define.Artifact
CheckExclusiveActiveVM() (bool, string, error)
Compression() compression.ImageCompression
Format() define.ImageFormat
IsValidVMName(name string) (bool, error)
List(opts ListOptions) ([]*ListResponse, error)
LoadVMByName(name string) (VM, error)
NewMachine(opts InitOptions) (VM, error)
NewDownload(vmName string) (Download, error)
RemoveAndCleanMachines() error
VMType() define.VMType
}
type Virtualization struct {
artifact define.Artifact
compression compression.ImageCompression
@ -465,3 +440,22 @@ func (dl Download) AcquireVMImage(imagePath string) (*define.VMFile, FCOSStream,
}
return imageLocation, fcosStream, nil
}
// Deprecated: GetLock
func GetLock(name string, vmtype define.VMType) (*lockfile.LockFile, error) {
// FIXME: there's a painful amount of `GetConfDir` calls scattered
// across the code base. This should be done once and stored
// somewhere instead.
vmConfigDir, err := GetConfDir(vmtype)
if err != nil {
return nil, err
}
lockPath := filepath.Join(vmConfigDir, name+".lock")
lock, err := lockfile.GetLockFile(lockPath)
if err != nil {
return nil, fmt.Errorf("creating lockfile for VM: %w", err)
}
return lock, nil
}

View File

@ -9,6 +9,7 @@ import (
"reflect"
"testing"
"github.com/containers/podman/v4/pkg/machine/connection"
"github.com/stretchr/testify/assert"
)
@ -27,7 +28,7 @@ func TestRemoteConnectionType_MakeSSHURL(t *testing.T) {
}
tests := []struct {
name string
rc RemoteConnectionType
rc connection.RemoteConnectionType
args args
want url.URL
}{

View File

@ -0,0 +1,36 @@
package connection
import (
"fmt"
"net/url"
"strconv"
"github.com/containers/podman/v4/pkg/machine/define"
)
// AddSSHConnectionsToPodmanSocket adds SSH connections to the podman socket if
// no ignition path is provided
func AddSSHConnectionsToPodmanSocket(uid, port int, identityPath, name, remoteUsername string, opts define.InitOptions) error {
if len(opts.IgnitionPath) > 0 {
fmt.Println("An ignition path was provided. No SSH connection was added to Podman")
return nil
}
uri := SSHRemoteConnection.MakeSSHURL(LocalhostIP, fmt.Sprintf("/run/user/%d/podman/podman.sock", uid), strconv.Itoa(port), remoteUsername)
uriRoot := SSHRemoteConnection.MakeSSHURL(LocalhostIP, "/run/podman/podman.sock", strconv.Itoa(port), "root")
uris := []url.URL{uri, uriRoot}
names := []string{name, name + "-root"}
// The first connection defined when connections is empty will become the default
// regardless of IsDefault, so order according to rootful
if opts.Rootful {
uris[0], names[0], uris[1], names[1] = uris[1], names[1], uris[0], names[0]
}
for i := 0; i < 2; i++ {
if err := AddConnection(&uris[i], names[i], identityPath, opts.IsDefault && i == 0); err != nil {
return err
}
}
return nil
}

View File

@ -1,10 +1,12 @@
//go:build amd64 || arm64
package machine
package connection
import (
"errors"
"fmt"
"net"
"net/url"
"os"
"github.com/containers/common/pkg/config"
@ -105,3 +107,28 @@ func RemoveFilesAndConnections(files []string, names ...string) {
logrus.Error(err)
}
}
type RemoteConnectionType string
var SSHRemoteConnection RemoteConnectionType = "ssh"
// MakeSSHURL
func (rc RemoteConnectionType) MakeSSHURL(host, path, port, userName string) url.URL {
// TODO Should this function have input verification?
userInfo := url.User(userName)
uri := url.URL{
Scheme: "ssh",
Opaque: "",
User: userInfo,
Host: host,
Path: path,
RawPath: "",
ForceQuery: false,
RawQuery: "",
Fragment: "",
}
if len(port) > 0 {
uri.Host = net.JoinHostPort(uri.Hostname(), port)
}
return uri
}

View File

@ -1,4 +1,21 @@
package define
import "os"
const UserCertsTargetPath = "/etc/containers/certs.d"
const DefaultIdentityName = "machine"
var (
DefaultFilePerm os.FileMode = 0644
)
type CreateVMOpts struct {
Name string
Dirs *MachineDirs
}
type MachineDirs struct {
ConfigDir *VMFile
DataDir *VMFile
RuntimeDir *VMFile
}

View File

@ -1,4 +1,4 @@
package machine
package define
import (
"errors"

View File

@ -0,0 +1,23 @@
package define
import "net/url"
type InitOptions struct {
CPUS uint64
DiskSize uint64
IgnitionPath string
ImagePath string
Volumes []string
VolumeDriver string
IsDefault bool
Memory uint64
Name string
TimeZone string
URI url.URL
Username string
ReExec bool
Rootful bool
UID string // uid of the user that called machine
UserModeNetworking *bool // nil = use backend/system default, false = disable, true = enable
USBs []string
}

View File

@ -4,6 +4,8 @@ import (
"errors"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/sirupsen/logrus"
)
@ -46,6 +48,22 @@ func (m *VMFile) Read() ([]byte, error) {
return os.ReadFile(m.GetPath())
}
// ReadPIDFrom a file and return as int. -1 means the pid file could not
// be read or had something that could not be converted to an int in it
func (m *VMFile) ReadPIDFrom() (int, error) {
vmPidString, err := m.Read()
if err != nil {
return -1, err
}
pid, err := strconv.Atoi(strings.TrimSpace(string(vmPidString)))
if err != nil {
return -1, err
}
// Not returning earlier because -1 means something
return pid, nil
}
// NewMachineFile is a constructor for VMFile
func NewMachineFile(path string, symlink *string) (*VMFile, error) {
if len(path) < 1 {
@ -55,6 +73,7 @@ func NewMachineFile(path string, symlink *string) (*VMFile, error) {
return nil, errors.New("invalid symlink path")
}
mf := VMFile{Path: path}
logrus.Debugf("socket length for %s is %d", path, len(path))
if symlink != nil && len(path) > MaxSocketPathLength {
if err := mf.makeSymlink(symlink); err != nil && !errors.Is(err, os.ErrExist) {
return nil, err
@ -78,3 +97,9 @@ func (m *VMFile) makeSymlink(symlink *string) error {
m.Symlink = &sl
return os.Symlink(m.Path, sl)
}
// AppendToNewVMFile takes a given path and appends it to the existing vmfile path. The new
// VMFile is returned
func (m *VMFile) AppendToNewVMFile(additionalPath string, symlink *string) (*VMFile, error) {
return NewMachineFile(filepath.Join(m.Path, additionalPath), symlink)
}

View File

@ -33,6 +33,4 @@ Note: To run specific test files, add the test files to the end of the winmake c
### Apple Hypervisor
1. `make podman-remote`
1. `export CONTAINERS_MACHINE_PROVIDER="applehv"`
1. `export MACHINE_IMAGE="https://fedorapeople.org/groups/podman/testing/applehv/arm64/fedora-coreos-38.20230925.dev.0-applehv.aarch64.raw.gz"`
1. `make localmachine` (Add `FOCUS_FILE=basic_test.go` to only run basic test)

View File

@ -236,16 +236,17 @@ func isWSL() bool {
return isVmtype(define.WSLVirt)
}
func getFCOSDownloadLocation(p machine.VirtProvider) string {
dd, err := p.NewDownload("")
if err != nil {
Fail("unable to create new download")
}
fcd, err := dd.GetFCOSDownload(defaultStream)
if err != nil {
Fail("unable to get virtual machine image")
}
return fcd.Location
}
// TODO temporarily suspended
// func getFCOSDownloadLocation(p vmconfigs.VMStubber) string {
// dd, err := p.NewDownload("")
// if err != nil {
// Fail("unable to create new download")
// }
//
// fcd, err := dd.GetFCOSDownload(defaultStream)
// if err != nil {
// Fail("unable to get virtual machine image")
// }
//
// return fcd.Location
// }

View File

@ -4,13 +4,12 @@ package e2e_test
import (
"os/exec"
"github.com/containers/podman/v4/pkg/machine"
)
func getDownloadLocation(p machine.VirtProvider) string {
return getFCOSDownloadLocation(p)
}
// TODO temporarily suspended
// func getDownloadLocation(p machine.VirtProvider) string {
// return getFCOSDownloadLocation(p)
// }
func pgrep(n string) (string, error) {
out, err := exec.Command("pgrep", "gvproxy").Output()

View File

@ -4,26 +4,10 @@ import (
"fmt"
"os/exec"
"strings"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/wsl"
. "github.com/onsi/ginkgo/v2"
)
const podmanBinary = "../../../bin/windows/podman.exe"
func getDownloadLocation(p machine.VirtProvider) string {
if p.VMType() == define.HyperVVirt {
return getFCOSDownloadLocation(p)
}
fd, err := wsl.NewFedoraDownloader(define.WSLVirt, "", defaultStream.String())
if err != nil {
Fail("unable to get WSL virtual image")
}
return fd.Get().URL.String()
}
// pgrep emulates the pgrep linux command
func pgrep(n string) (string, error) {
// add filter to find the process and do no display a header

View File

@ -3,6 +3,7 @@ package e2e_test
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
@ -290,7 +291,7 @@ var _ = Describe("podman machine init", func() {
inspect = inspect.withFormat("{{.ConfigPath.Path}}")
inspectSession, err := mb.setCmd(inspect).run()
Expect(err).ToNot(HaveOccurred())
cfgpth := inspectSession.outputToString()
cfgpth := filepath.Join(inspectSession.outputToString(), fmt.Sprintf("%s.json", name))
inspect = inspect.withFormat("{{.Image.IgnitionFile.Path}}")
inspectSession, err = mb.setCmd(inspect).run()

View File

@ -2,7 +2,6 @@ package e2e_test
import (
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/define"
jsoniter "github.com/json-iterator/go"
. "github.com/onsi/ginkgo/v2"
@ -66,12 +65,14 @@ var _ = Describe("podman inspect stop", func() {
var inspectInfo []machine.InspectInfo
err = jsoniter.Unmarshal(inspectSession.Bytes(), &inspectInfo)
Expect(err).ToNot(HaveOccurred())
switch testProvider.VMType() {
case define.WSLVirt:
Expect(inspectInfo[0].ConnectionInfo.PodmanPipe.GetPath()).To(ContainSubstring("podman-"))
default:
Expect(inspectInfo[0].ConnectionInfo.PodmanSocket.GetPath()).To(HaveSuffix("podman.sock"))
}
// TODO Re-enable this for tests once inspect is fixed
// switch testProvider.VMType() {
// case define.WSLVirt:
// Expect(inspectInfo[0].ConnectionInfo.PodmanPipe.GetPath()).To(ContainSubstring("podman-"))
// default:
// Expect(inspectInfo[0].ConnectionInfo.PodmanSocket.GetPath()).To(HaveSuffix("podman.sock"))
// }
inspect := new(inspectMachine)
inspect = inspect.withFormat("{{.Name}}")

View File

@ -7,6 +7,7 @@ import (
"os"
"path"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
@ -15,6 +16,7 @@ import (
"github.com/containers/podman/v4/pkg/machine/compression"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/provider"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/containers/podman/v4/utils"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@ -47,7 +49,7 @@ func TestMachine(t *testing.T) {
RunSpecs(t, "Podman Machine tests")
}
var testProvider machine.VirtProvider
var testProvider vmconfigs.VMProvider
var _ = BeforeSuite(func() {
var err error
@ -57,14 +59,27 @@ var _ = BeforeSuite(func() {
}
downloadLocation := os.Getenv("MACHINE_IMAGE")
if len(downloadLocation) < 1 {
downloadLocation = getDownloadLocation(testProvider)
// we cannot simply use OS here because hyperv uses fcos; so WSL is just
// special here
if downloadLocation == "" {
downloadLocation, err = GetDownload(testProvider.VMType())
if err != nil {
Fail("unable to derive download disk from fedora coreos")
}
}
if downloadLocation == "" {
Fail("machine tests require a file reference to a disk image right now")
}
var compressionExtension string
switch testProvider.VMType() {
case define.AppleHvVirt:
compressionExtension = ".gz"
case define.HyperVVirt:
compressionExtension = ".zip"
default:
compressionExtension = ".xz"
}
compressionExtension := fmt.Sprintf(".%s", testProvider.Compression().String())
suiteImageName = strings.TrimSuffix(path.Base(downloadLocation), compressionExtension)
fqImageName = filepath.Join(tmpDir, suiteImageName)
if _, err := os.Stat(fqImageName); err != nil {
@ -82,13 +97,16 @@ var _ = BeforeSuite(func() {
if err != nil {
Fail(fmt.Sprintf("unable to create vmfile %q: %v", fqImageName+compressionExtension, err))
}
compressionStart := time.Now()
if err := compression.Decompress(diskImage, fqImageName); err != nil {
Fail(fmt.Sprintf("unable to decompress image file: %q", err))
}
GinkgoWriter.Println("compression took: ", time.Since(compressionStart))
} else {
Fail(fmt.Sprintf("unable to check for cache image: %q", err))
}
}
})
var _ = SynchronizedAfterSuite(func() {}, func() {})
@ -125,20 +143,34 @@ func setup() (string, *machineTestBuilder) {
if err != nil {
Fail(fmt.Sprintf("failed to create machine test: %q", err))
}
f, err := os.Open(fqImageName)
src, err := os.Open(fqImageName)
if err != nil {
Fail(fmt.Sprintf("failed to open file %s: %q", fqImageName, err))
}
defer func() {
if err := src.Close(); err != nil {
Fail(fmt.Sprintf("failed to close src reader %q: %q", src.Name(), err))
}
}()
mb.imagePath = filepath.Join(homeDir, suiteImageName)
n, err := os.Create(mb.imagePath)
dest, err := os.Create(mb.imagePath)
if err != nil {
Fail(fmt.Sprintf("failed to create file %s: %q", mb.imagePath, err))
}
if _, err := io.Copy(n, f); err != nil {
Fail(fmt.Sprintf("failed to copy %ss to %s: %q", fqImageName, mb.imagePath, err))
}
if err := n.Close(); err != nil {
Fail(fmt.Sprintf("failed to close image copy handler: %q", err))
defer func() {
if err := dest.Close(); err != nil {
Fail(fmt.Sprintf("failed to close destination file %q: %q", dest.Name(), err))
}
}()
fmt.Printf("--> copying %q to %q/n", src.Name(), dest.Name())
if runtime.GOOS != "darwin" {
if _, err := io.Copy(dest, src); err != nil {
Fail(fmt.Sprintf("failed to copy %ss to %s: %q", fqImageName, mb.imagePath, err))
}
} else {
if _, err := compression.CopySparse(dest, src); err != nil {
Fail(fmt.Sprintf("failed to copy %q to %q: %q", src.Name(), dest.Name(), err))
}
}
return homeDir, mb
}

View File

@ -23,6 +23,9 @@ var _ = Describe("podman machine proxy settings propagation", func() {
})
It("ssh to running machine and check proxy settings", func() {
// TODO the proxy test is currently failing on applehv. FIX ME
skipIfVmtype(define.AppleHvVirt, "TODO: this test fails on applehv")
// https://github.com/containers/podman/issues/20129
if testProvider.VMType() == define.HyperVVirt {
Skip("proxy settings not yet supported")

View File

@ -0,0 +1,74 @@
package e2e_test
import (
"encoding/json"
"fmt"
"io"
"net/http"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/coreos/stream-metadata-go/fedoracoreos"
"github.com/coreos/stream-metadata-go/stream"
"github.com/sirupsen/logrus"
)
func GetDownload(vmType define.VMType) (string, error) {
var (
fcosstable stream.Stream
artifactType, format string
)
url := fedoracoreos.GetStreamURL("testing")
resp, err := http.Get(url.String())
if err != nil {
return "", err
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
defer func() {
if err := resp.Body.Close(); err != nil {
logrus.Error(err)
}
}()
if err := json.Unmarshal(body, &fcosstable); err != nil {
return "", err
}
switch vmType {
case define.AppleHvVirt:
artifactType = "applehv"
format = "raw.gz"
case define.HyperVVirt:
artifactType = "hyperv"
format = "vhdx.zip"
default:
artifactType = "qemu"
format = "qcow2.xz"
}
arch, ok := fcosstable.Architectures[machine.GetFcosArch()]
if !ok {
return "", fmt.Errorf("unable to pull VM image: no targetArch in stream")
}
upstreamArtifacts := arch.Artifacts
if upstreamArtifacts == nil {
return "", fmt.Errorf("unable to pull VM image: no artifact in stream")
}
upstreamArtifact, ok := upstreamArtifacts[artifactType]
if !ok {
return "", fmt.Errorf("unable to pull VM image: no %s artifact in stream", artifactType)
}
formats := upstreamArtifact.Formats
if formats == nil {
return "", fmt.Errorf("unable to pull VM image: no formats in stream")
}
formatType, ok := formats[format]
if !ok {
return "", fmt.Errorf("unable to pull VM image: no %s format in stream", format)
}
disk := formatType.Disk
return disk.Location, nil
}

View File

@ -86,6 +86,7 @@ var _ = Describe("podman machine start", func() {
Expect(startSession).To(Exit(125))
Expect(startSession.errorToString()).To(ContainSubstring("VM already running or starting"))
})
It("start only starts specified machine", func() {
i := initMachine{}
startme := randomString()

View File

@ -1,309 +0,0 @@
//go:build windows
package hyperv
import (
"encoding/json"
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"time"
"github.com/containers/libhvee/pkg/hypervctl"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/compression"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/ignition"
"github.com/docker/go-units"
"github.com/sirupsen/logrus"
)
type HyperVVirtualization struct {
machine.Virtualization
}
func VirtualizationProvider() machine.VirtProvider {
return &HyperVVirtualization{
machine.NewVirtualization(define.HyperV, compression.Zip, define.Vhdx, vmtype),
}
}
func (v HyperVVirtualization) CheckExclusiveActiveVM() (bool, string, error) {
vmm := hypervctl.NewVirtualMachineManager()
// Get all the VMs on disk (json files)
onDiskVMs, err := v.loadFromLocalJson()
if err != nil {
return false, "", err
}
for _, onDiskVM := range onDiskVMs {
// lookup if the vm exists in hyperv
exists, vm, err := vmm.GetMachineExists(onDiskVM.Name)
if err != nil {
return false, "", err
}
// hyperv does not know about it, move on
if !exists { // hot path
// TODO should we logrus this to show we found a JSON with no hyperv vm ?
continue
}
if vm.IsStarting() || vm.State() == hypervctl.Enabled {
return true, vm.ElementName, nil
}
}
return false, "", nil
}
func (v HyperVVirtualization) IsValidVMName(name string) (bool, error) {
var found bool
vms, err := v.loadFromLocalJson()
if err != nil {
return false, err
}
for _, vm := range vms {
if vm.Name == name {
found = true
break
}
}
if !found {
return false, nil
}
if _, err := hypervctl.NewVirtualMachineManager().GetMachine(name); err != nil {
return false, err
}
return true, nil
}
func (v HyperVVirtualization) List(opts machine.ListOptions) ([]*machine.ListResponse, error) {
mms, err := v.loadFromLocalJson()
if err != nil {
return nil, err
}
var response []*machine.ListResponse
vmm := hypervctl.NewVirtualMachineManager()
for _, mm := range mms {
vm, err := vmm.GetMachine(mm.Name)
if err != nil {
return nil, err
}
mlr := machine.ListResponse{
Name: mm.Name,
CreatedAt: mm.Created,
LastUp: mm.LastUp,
Running: vm.State() == hypervctl.Enabled,
Starting: mm.isStarting(),
Stream: mm.ImageStream,
VMType: define.HyperVVirt.String(),
CPUs: mm.CPUs,
Memory: mm.Memory * units.MiB,
DiskSize: mm.DiskSize * units.GiB,
Port: mm.Port,
RemoteUsername: mm.RemoteUsername,
IdentityPath: mm.IdentityPath,
}
response = append(response, &mlr)
}
return response, err
}
func (v HyperVVirtualization) LoadVMByName(name string) (machine.VM, error) {
m := &HyperVMachine{Name: name}
return m.loadFromFile()
}
func (v HyperVVirtualization) NewMachine(opts machine.InitOptions) (machine.VM, error) {
m := HyperVMachine{Name: opts.Name}
if len(opts.ImagePath) < 1 {
return nil, errors.New("must define --image-path for hyperv support")
}
if len(opts.USBs) > 0 {
return nil, fmt.Errorf("USB host passthrough is not supported for hyperv machines")
}
m.RemoteUsername = opts.Username
configDir, err := machine.GetConfDir(define.HyperVVirt)
if err != nil {
return nil, err
}
configPath, err := define.NewMachineFile(getVMConfigPath(configDir, opts.Name), nil)
if err != nil {
return nil, err
}
m.ConfigPath = *configPath
if err := ignition.SetIgnitionFile(&m.IgnitionFile, vmtype, m.Name, configDir); err != nil {
return nil, err
}
// Set creation time
m.Created = time.Now()
dataDir, err := machine.GetDataDir(define.HyperVVirt)
if err != nil {
return nil, err
}
// Set the proxy pid file
gvProxyPid, err := define.NewMachineFile(filepath.Join(dataDir, "gvproxy.pid"), nil)
if err != nil {
return nil, err
}
m.GvProxyPid = *gvProxyPid
dl, err := VirtualizationProvider().NewDownload(m.Name)
if err != nil {
return nil, err
}
// Acquire the image
imagePath, imageStream, err := dl.AcquireVMImage(opts.ImagePath)
if err != nil {
return nil, err
}
// assign values to machine
m.ImagePath = *imagePath
m.ImageStream = imageStream.String()
config := hypervctl.HardwareConfig{
CPUs: uint16(opts.CPUS),
DiskPath: imagePath.GetPath(),
DiskSize: opts.DiskSize,
Memory: opts.Memory,
}
// Write the json configuration file which will be loaded by
// LoadByName
b, err := json.MarshalIndent(m, "", " ")
if err != nil {
return nil, err
}
if err := os.WriteFile(m.ConfigPath.GetPath(), b, 0644); err != nil {
return nil, err
}
vmm := hypervctl.NewVirtualMachineManager()
if err := vmm.NewVirtualMachine(opts.Name, &config); err != nil {
return nil, err
}
return v.LoadVMByName(opts.Name)
}
func (v HyperVVirtualization) RemoveAndCleanMachines() error {
// Error handling used here is following what qemu did
var (
prevErr error
)
// The next three info lookups must succeed or we return
mms, err := v.loadFromLocalJson()
if err != nil {
return err
}
configDir, err := machine.GetConfDir(vmtype)
if err != nil {
return err
}
dataDir, err := machine.GetDataDir(vmtype)
if err != nil {
return err
}
vmm := hypervctl.NewVirtualMachineManager()
for _, mm := range mms {
vm, err := vmm.GetMachine(mm.Name)
if err != nil {
prevErr = handlePrevError(err, prevErr)
}
if vm.State() != hypervctl.Disabled {
if err := vm.StopWithForce(); err != nil {
prevErr = handlePrevError(err, prevErr)
}
}
if err := vm.Remove(mm.ImagePath.GetPath()); err != nil {
prevErr = handlePrevError(err, prevErr)
}
if err := mm.ReadyHVSock.Remove(); err != nil {
prevErr = handlePrevError(err, prevErr)
}
if err := mm.NetworkHVSock.Remove(); err != nil {
prevErr = handlePrevError(err, prevErr)
}
}
// Nuke the config and dataDirs
if err := os.RemoveAll(configDir); err != nil {
prevErr = handlePrevError(err, prevErr)
}
if err := os.RemoveAll(dataDir); err != nil {
prevErr = handlePrevError(err, prevErr)
}
return prevErr
}
func (v HyperVVirtualization) VMType() define.VMType {
return vmtype
}
func (v HyperVVirtualization) loadFromLocalJson() ([]*HyperVMachine, error) {
var (
jsonFiles []string
mms []*HyperVMachine
)
configDir, err := machine.GetConfDir(v.VMType())
if err != nil {
return nil, err
}
if err := filepath.WalkDir(configDir, func(input string, d fs.DirEntry, e error) error {
if e != nil {
return e
}
if filepath.Ext(d.Name()) == ".json" {
jsonFiles = append(jsonFiles, input)
}
return nil
}); err != nil {
return nil, err
}
for _, jsonFile := range jsonFiles {
mm := HyperVMachine{}
if err := mm.loadHyperVMachineFromJSON(jsonFile); err != nil {
return nil, err
}
if err != nil {
return nil, err
}
mms = append(mms, &mm)
}
return mms, nil
}
func handlePrevError(e, prevErr error) error {
if prevErr != nil {
logrus.Error(e)
}
return e
}
func stateConversion(s hypervctl.EnabledState) (define.Status, error) {
switch s {
case hypervctl.Enabled:
return define.Running, nil
case hypervctl.Disabled:
return define.Stopped, nil
case hypervctl.Starting:
return define.Starting, nil
}
return define.Unknown, fmt.Errorf("unknown state: %q", s.String())
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,556 @@
//go:build windows
package hyperv
import (
"bytes"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"github.com/Microsoft/go-winio"
"github.com/containers/common/pkg/strongunits"
gvproxy "github.com/containers/gvisor-tap-vsock/pkg/types"
"github.com/containers/libhvee/pkg/hypervctl"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/hyperv/vsock"
"github.com/containers/podman/v4/pkg/machine/ignition"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/containers/podman/v4/pkg/systemd/parser"
"github.com/sirupsen/logrus"
)
type HyperVStubber struct {
vmconfigs.HyperVConfig
}
func (h HyperVStubber) CreateVM(opts define.CreateVMOpts, mc *vmconfigs.MachineConfig, builder *ignition.IgnitionBuilder) error {
var (
err error
)
callbackFuncs := machine.InitCleanup()
defer callbackFuncs.CleanIfErr(&err)
go callbackFuncs.CleanOnSignal()
hwConfig := hypervctl.HardwareConfig{
CPUs: uint16(mc.Resources.CPUs),
DiskPath: mc.ImagePath.GetPath(),
DiskSize: mc.Resources.DiskSize,
Memory: mc.Resources.Memory,
}
networkHVSock, err := vsock.NewHVSockRegistryEntry(mc.Name, vsock.Network)
if err != nil {
return err
}
mc.HyperVHypervisor.NetworkVSock = *networkHVSock
// Add vsock port numbers to mounts
err = createShares(mc)
if err != nil {
return err
}
removeShareCallBack := func() error {
return removeShares(mc)
}
callbackFuncs.Add(removeShareCallBack)
removeRegistrySockets := func() error {
removeNetworkAndReadySocketsFromRegistry(mc)
return nil
}
callbackFuncs.Add(removeRegistrySockets)
netUnitFile, err := createNetworkUnit(mc.HyperVHypervisor.NetworkVSock.Port)
if err != nil {
return err
}
builder.WithUnit(ignition.Unit{
Contents: ignition.StrToPtr(netUnitFile),
Enabled: ignition.BoolToPtr(true),
Name: "vsock-network.service",
})
builder.WithFile(ignition.File{
Node: ignition.Node{
Path: "/etc/NetworkManager/system-connections/vsock0.nmconnection",
},
FileEmbedded1: ignition.FileEmbedded1{
Append: nil,
Contents: ignition.Resource{
Source: ignition.EncodeDataURLPtr(hyperVVsockNMConnection),
},
Mode: ignition.IntToPtr(0600),
},
})
vmm := hypervctl.NewVirtualMachineManager()
err = vmm.NewVirtualMachine(mc.Name, &hwConfig)
if err != nil {
return err
}
vmRemoveCallback := func() error {
vm, err := vmm.GetMachine(mc.Name)
if err != nil {
return err
}
return vm.Remove("")
}
callbackFuncs.Add(vmRemoveCallback)
err = resizeDisk(strongunits.GiB(mc.Resources.DiskSize), mc.ImagePath)
return err
}
func (h HyperVStubber) GetHyperVisorVMs() ([]string, error) {
var (
vmNames []string
)
vmm := hypervctl.NewVirtualMachineManager()
vms, err := vmm.GetAll()
if err != nil {
return nil, err
}
for _, vm := range vms {
vmNames = append(vmNames, vm.Name)
}
return vmNames, nil
}
func (h HyperVStubber) MountType() vmconfigs.VolumeMountType {
return vmconfigs.NineP
}
func (h HyperVStubber) MountVolumesToVM(mc *vmconfigs.MachineConfig, quiet bool) error {
return nil
}
func (h HyperVStubber) Remove(mc *vmconfigs.MachineConfig) ([]string, func() error, error) {
mc.Lock()
defer mc.Unlock()
_, vm, err := GetVMFromMC(mc)
if err != nil {
return nil, nil, err
}
rmFunc := func() error {
// Tear down vsocks
removeNetworkAndReadySocketsFromRegistry(mc)
// Remove ignition registry entries - not a fatal error
// for vm removal
// TODO we could improve this by recommending an action be done
if err := removeIgnitionFromRegistry(vm); err != nil {
logrus.Errorf("unable to remove ignition registry entries: %q", err)
}
// disk path removal is done by generic remove
return vm.Remove("")
}
return []string{}, rmFunc, nil
}
func (h HyperVStubber) RemoveAndCleanMachines(_ *define.MachineDirs) error {
return nil
}
func (h HyperVStubber) StartNetworking(mc *vmconfigs.MachineConfig, cmd *gvproxy.GvproxyCommand) error {
cmd.AddEndpoint(fmt.Sprintf("vsock://%s", mc.HyperVHypervisor.NetworkVSock.KeyName))
return nil
}
func (h HyperVStubber) StartVM(mc *vmconfigs.MachineConfig) (func() error, func() error, error) {
var (
err error
)
_, vm, err := GetVMFromMC(mc)
if err != nil {
return nil, nil, err
}
callbackFuncs := machine.InitCleanup()
defer callbackFuncs.CleanIfErr(&err)
go callbackFuncs.CleanOnSignal()
firstBoot, err := mc.IsFirstBoot()
if err != nil {
return nil, nil, err
}
if firstBoot {
// Add ignition entries to windows registry
// for first boot only
if err := readAndSplitIgnition(mc, vm); err != nil {
return nil, nil, err
}
// this is added because if the machine does not start
// properly on first boot, the next boot will be considered
// the first boot again and the addition of the ignition
// entries might fail?
//
// the downside is that if the start fails and then a rm
// is run, it will puke error messages about the ignition.
//
// TODO detect if ignition was run from a failed boot earlier
// and skip. Maybe this could be done with checking a k/v
// pair
rmIgnCallbackFunc := func() error {
return removeIgnitionFromRegistry(vm)
}
callbackFuncs.Add(rmIgnCallbackFunc)
}
err = vm.Start()
if err != nil {
return nil, nil, err
}
startCallback := func() error {
return vm.Stop()
}
callbackFuncs.Add(startCallback)
return nil, mc.HyperVHypervisor.ReadyVsock.Listen, err
}
// State is returns the state as a define.status. for hyperv, state differs from others because
// state is determined by the VM itself. normally this can be done with vm.State() and a conversion
// but doing here as well. this requires a little more interaction with the hypervisor
func (h HyperVStubber) State(mc *vmconfigs.MachineConfig, bypass bool) (define.Status, error) {
_, vm, err := GetVMFromMC(mc)
if err != nil {
return define.Unknown, err
}
return stateConversion(vm.State())
}
func (h HyperVStubber) StopVM(mc *vmconfigs.MachineConfig, hardStop bool) error {
mc.Lock()
defer mc.Unlock()
vmm := hypervctl.NewVirtualMachineManager()
vm, err := vmm.GetMachine(mc.Name)
if err != nil {
return fmt.Errorf("getting virtual machine: %w", err)
}
vmState := vm.State()
if vm.State() == hypervctl.Disabled {
return nil
}
if vmState != hypervctl.Enabled { // more states could be provided as well
return hypervctl.ErrMachineStateInvalid
}
if hardStop {
return vm.StopWithForce()
}
return vm.Stop()
}
// TODO should this be plumbed higher into the code stack?
func (h HyperVStubber) StopHostNetworking(mc *vmconfigs.MachineConfig, vmType define.VMType) error {
err := machine.StopWinProxy(mc.Name, vmType)
// in podman 4, this was a "soft" error; keeping behavior as such
if err != nil {
fmt.Fprintf(os.Stderr, "Could not stop API forwarding service (win-sshproxy.exe): %s\n", err.Error())
}
return nil
}
func (h HyperVStubber) VMType() define.VMType {
return define.HyperVVirt
}
func GetVMFromMC(mc *vmconfigs.MachineConfig) (*hypervctl.VirtualMachineManager, *hypervctl.VirtualMachine, error) {
vmm := hypervctl.NewVirtualMachineManager()
vm, err := vmm.GetMachine(mc.Name)
return vmm, vm, err
}
func stateConversion(s hypervctl.EnabledState) (define.Status, error) {
switch s {
case hypervctl.Enabled:
return define.Running, nil
case hypervctl.Disabled:
return define.Stopped, nil
case hypervctl.Starting:
return define.Starting, nil
}
return define.Unknown, fmt.Errorf("unknown state: %q", s.String())
}
func (h HyperVStubber) SetProviderAttrs(mc *vmconfigs.MachineConfig, cpus, memory *uint64, newDiskSize *strongunits.GiB, newRootful *bool) error {
var (
cpuChanged, memoryChanged bool
)
mc.Lock()
defer mc.Unlock()
_, vm, err := GetVMFromMC(mc)
if err != nil {
return err
}
// TODO lets move this up into set as a "rule" for all machines
if vm.State() != hypervctl.Disabled {
return errors.New("unable to change settings unless vm is stopped")
}
if newRootful != nil && mc.HostUser.Rootful != *newRootful {
if err := mc.SetRootful(*newRootful); err != nil {
return err
}
}
if newDiskSize != nil {
if err := resizeDisk(*newDiskSize, mc.ImagePath); err != nil {
return err
}
}
if cpus != nil {
cpuChanged = true
}
if memory != nil {
memoryChanged = true
}
if cpuChanged || memoryChanged {
err := vm.UpdateProcessorMemSettings(func(ps *hypervctl.ProcessorSettings) {
if cpuChanged {
ps.VirtualQuantity = *cpus
}
}, func(ms *hypervctl.MemorySettings) {
if memoryChanged {
ms.DynamicMemoryEnabled = false
ms.VirtualQuantity = *memory
ms.Limit = *memory
ms.Reservation = *memory
}
})
if err != nil {
return fmt.Errorf("setting CPU and Memory for VM: %w", err)
}
}
return nil
}
func (h HyperVStubber) PrepareIgnition(mc *vmconfigs.MachineConfig, ignBuilder *ignition.IgnitionBuilder) (*ignition.ReadyUnitOpts, error) {
// HyperV is different because it has to know some ignition details before creating the VM. It cannot
// simply be derived. So we create the HyperVConfig here.
mc.HyperVHypervisor = new(vmconfigs.HyperVConfig)
var ignOpts ignition.ReadyUnitOpts
readySock, err := vsock.NewHVSockRegistryEntry(mc.Name, vsock.Events)
if err != nil {
return nil, err
}
// TODO Stopped here ... fails bc mc.Hypervisor is nil ... this can be nil checked prior and created
// however the same will have to be done in create
mc.HyperVHypervisor.ReadyVsock = *readySock
ignOpts.Port = readySock.Port
return &ignOpts, nil
}
func (h HyperVStubber) PostStartNetworking(mc *vmconfigs.MachineConfig) error {
var (
err error
executable string
)
callbackFuncs := machine.InitCleanup()
defer callbackFuncs.CleanIfErr(&err)
go callbackFuncs.CleanOnSignal()
winProxyOpts := machine.WinProxyOpts{
Name: mc.Name,
IdentityPath: mc.SSH.IdentityPath,
Port: mc.SSH.Port,
RemoteUsername: mc.SSH.RemoteUsername,
Rootful: mc.HostUser.Rootful,
VMType: h.VMType(),
}
// TODO Should this process be fatal on error; currenty, no error is
// returned but an error can occur in the func itself
// TODO we do not currently pass "noinfo" (quiet) into the StartVM
// func so this is hard set to false
machine.LaunchWinProxy(winProxyOpts, false)
winProxyCallbackFunc := func() error {
return machine.StopWinProxy(mc.Name, h.VMType())
}
callbackFuncs.Add(winProxyCallbackFunc)
if len(mc.Mounts) != 0 {
var (
dirs *define.MachineDirs
gvproxyPID int
)
dirs, err = machine.GetMachineDirs(h.VMType())
if err != nil {
return err
}
// GvProxy PID file path is now derived
gvproxyPIDFile, err := dirs.RuntimeDir.AppendToNewVMFile("gvproxy.pid", nil)
if err != nil {
return err
}
gvproxyPID, err = gvproxyPIDFile.ReadPIDFrom()
if err != nil {
return err
}
executable, err = os.Executable()
if err != nil {
return err
}
// Start the 9p server in the background
p9ServerArgs := []string{}
if logrus.IsLevelEnabled(logrus.DebugLevel) {
p9ServerArgs = append(p9ServerArgs, "--log-level=debug")
}
p9ServerArgs = append(p9ServerArgs, "machine", "server9p")
for _, mount := range mc.Mounts {
if mount.VSockNumber == nil {
return fmt.Errorf("mount %s has not vsock port defined", mount.Source)
}
p9ServerArgs = append(p9ServerArgs, "--serve", fmt.Sprintf("%s:%s", mount.Source, winio.VsockServiceID(uint32(*mount.VSockNumber)).String()))
}
p9ServerArgs = append(p9ServerArgs, fmt.Sprintf("%d", gvproxyPID))
logrus.Debugf("Going to start 9p server using command: %s %v", executable, p9ServerArgs)
fsCmd := exec.Command(executable, p9ServerArgs...)
if logrus.IsLevelEnabled(logrus.DebugLevel) {
err = logCommandToFile(fsCmd, "podman-machine-server9.log")
if err != nil {
return err
}
}
err = fsCmd.Start()
if err == nil {
logrus.Infof("Started podman 9p server as PID %d", fsCmd.Process.Pid)
}
// Note: No callback is needed to stop the 9p server, because it will stop when
// gvproxy stops
// Finalize starting shares after we are confident gvproxy is still alive.
err = startShares(mc)
}
return err
}
func resizeDisk(newSize strongunits.GiB, imagePath *define.VMFile) error {
resize := exec.Command("powershell", []string{"-command", fmt.Sprintf("Resize-VHD %s %d", imagePath.GetPath(), newSize.ToBytes())}...)
logrus.Debug(resize.Args)
resize.Stdout = os.Stdout
resize.Stderr = os.Stderr
if err := resize.Run(); err != nil {
return fmt.Errorf("resizing image: %q", err)
}
return nil
}
// removeNetworkAndReadySocketsFromRegistry removes the Network and Ready sockets
// from the Windows Registry
func removeNetworkAndReadySocketsFromRegistry(mc *vmconfigs.MachineConfig) {
// Remove the HVSOCK for networking
if err := mc.HyperVHypervisor.NetworkVSock.Remove(); err != nil {
logrus.Errorf("unable to remove registry entry for %s: %q", mc.HyperVHypervisor.NetworkVSock.KeyName, err)
}
// Remove the HVSOCK for events
if err := mc.HyperVHypervisor.ReadyVsock.Remove(); err != nil {
logrus.Errorf("unable to remove registry entry for %s: %q", mc.HyperVHypervisor.ReadyVsock.KeyName, err)
}
}
// readAndSplitIgnition reads the ignition file and splits it into key:value pairs
func readAndSplitIgnition(mc *vmconfigs.MachineConfig, vm *hypervctl.VirtualMachine) error {
ignFile, err := mc.IgnitionFile()
if err != nil {
return err
}
ign, err := ignFile.Read()
if err != nil {
return err
}
reader := bytes.NewReader(ign)
return vm.SplitAndAddIgnition("ignition.config.", reader)
}
func removeIgnitionFromRegistry(vm *hypervctl.VirtualMachine) error {
pairs, err := vm.GetKeyValuePairs()
if err != nil {
return err
}
for key := range pairs {
if err := vm.RemoveKeyValuePair(key); err != nil {
return err
}
}
return nil
}
func logCommandToFile(c *exec.Cmd, filename string) error {
dir, err := machine.GetDataDir(define.HyperVVirt)
if err != nil {
return fmt.Errorf("obtain machine dir: %w", err)
}
path := filepath.Join(dir, filename)
logrus.Infof("Going to log to %s", path)
log, err := os.Create(path)
if err != nil {
return fmt.Errorf("create log file: %w", err)
}
defer log.Close()
c.Stdout = log
c.Stderr = log
return nil
}
const hyperVVsockNMConnection = `
[connection]
id=vsock0
type=tun
interface-name=vsock0
[tun]
mode=2
[802-3-ethernet]
cloned-mac-address=5A:94:EF:E4:0C:EE
[ipv4]
method=auto
[proxy]
`
func createNetworkUnit(netPort uint64) (string, error) {
netUnit := parser.NewUnitFile()
netUnit.Add("Unit", "Description", "vsock_network")
netUnit.Add("Unit", "After", "NetworkManager.service")
netUnit.Add("Service", "ExecStart", fmt.Sprintf("/usr/libexec/podman/gvforwarder -preexisting -iface vsock0 -url vsock://2:%d/connect", netPort))
netUnit.Add("Service", "ExecStartPost", "/usr/bin/nmcli c up vsock0")
netUnit.Add("Install", "WantedBy", "multi-user.target")
return netUnit.ToString()
}

View File

@ -0,0 +1,70 @@
//go:build windows
package hyperv
import (
"errors"
"fmt"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/hyperv/vsock"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/sirupsen/logrus"
)
func removeShares(mc *vmconfigs.MachineConfig) error {
var removalErr error
for _, mount := range mc.Mounts {
if mount.VSockNumber == nil {
// nothing to do if the vsock number was never defined
continue
}
vsockReg, err := vsock.LoadHVSockRegistryEntry(*mount.VSockNumber)
if err != nil {
logrus.Debugf("Vsock %d for mountpoint %s does not have a valid registry entry, skipping removal", *mount.VSockNumber, mount.Target)
continue
}
if err := vsockReg.Remove(); err != nil {
if removalErr != nil {
logrus.Errorf("Error removing vsock: %w", removalErr)
}
removalErr = fmt.Errorf("removing vsock %d for mountpoint %s: %w", *mount.VSockNumber, mount.Target, err)
}
}
return removalErr
}
func startShares(mc *vmconfigs.MachineConfig) error {
for _, mount := range mc.Mounts {
args := []string{"-q", "--", "sudo", "podman"}
if logrus.IsLevelEnabled(logrus.DebugLevel) {
args = append(args, "--log-level=debug")
}
//just being protective here; in a perfect world, this cannot happen
if mount.VSockNumber == nil {
return errors.New("cannot start 9p shares with undefined vsock number")
}
args = append(args, "machine", "client9p", fmt.Sprintf("%d", mount.VSockNumber), mount.Target)
if err := machine.CommonSSH(mc.SSH.RemoteUsername, mc.SSH.IdentityPath, mc.Name, mc.SSH.Port, args); err != nil {
return err
}
}
return nil
}
func createShares(mc *vmconfigs.MachineConfig) (err error) {
for _, mount := range mc.Mounts {
testVsock, err := vsock.NewHVSockRegistryEntry(mc.Name, vsock.Fileserver)
if err != nil {
return err
}
mount.VSockNumber = &testVsock.Port
logrus.Debugf("Going to share directory %s via 9p on vsock %d", mount.Source, testVsock.Port)
}
return nil
}

View File

@ -8,9 +8,8 @@ import (
"net"
"strings"
"github.com/containers/podman/v4/pkg/machine/sockets"
"github.com/Microsoft/go-winio"
"github.com/containers/podman/v4/pkg/machine/sockets"
"github.com/containers/podman/v4/utils"
"github.com/sirupsen/logrus"
"golang.org/x/sys/windows/registry"

View File

@ -834,6 +834,7 @@ func (i *IgnitionBuilder) BuildWithIgnitionFile(ignPath string) error {
// Build writes the internal `DynamicIgnition` config to its write path
func (i *IgnitionBuilder) Build() error {
logrus.Debugf("writing ignition file to %q", i.dynamicIgnition.WritePath)
return i.dynamicIgnition.Write()
}

View File

@ -13,7 +13,7 @@ type ReadyUnitOpts struct {
Port uint64
}
// CreateReadyUnitFile makes a the ready unit to report back to the host that the system is running
// CreateReadyUnitFile makes the ready unit to report back to the host that the system is running
func CreateReadyUnitFile(provider define.VMType, opts *ReadyUnitOpts) (string, error) {
readyUnit := DefaultReadyUnitFile()
switch provider {

17
pkg/machine/lock/lock.go Normal file
View File

@ -0,0 +1,17 @@
package lock
import (
"fmt"
"path/filepath"
"github.com/containers/storage/pkg/lockfile"
)
func GetMachineLock(name string, machineConfigDir string) (*lockfile.LockFile, error) {
lockPath := filepath.Join(machineConfigDir, name+".lock")
lock, err := lockfile.GetLockFile(lockPath)
if err != nil {
return nil, fmt.Errorf("creating lockfile for VM: %w", err)
}
return lock, nil
}

View File

@ -5,10 +5,9 @@ package machine
import (
"encoding/json"
"fmt"
"net/url"
"os"
"strconv"
"github.com/containers/podman/v4/pkg/machine/connection"
"github.com/containers/storage/pkg/ioutils"
)
@ -30,36 +29,9 @@ func GetDevNullFiles() (*os.File, *os.File, error) {
return dnr, dnw, nil
}
// AddSSHConnectionsToPodmanSocket adds SSH connections to the podman socket if
// no ignition path is provided
func AddSSHConnectionsToPodmanSocket(uid, port int, identityPath, name, remoteUsername string, opts InitOptions) error {
if len(opts.IgnitionPath) > 0 {
fmt.Println("An ignition path was provided. No SSH connection was added to Podman")
return nil
}
uri := SSHRemoteConnection.MakeSSHURL(LocalhostIP, fmt.Sprintf("/run/user/%d/podman/podman.sock", uid), strconv.Itoa(port), remoteUsername)
uriRoot := SSHRemoteConnection.MakeSSHURL(LocalhostIP, "/run/podman/podman.sock", strconv.Itoa(port), "root")
uris := []url.URL{uri, uriRoot}
names := []string{name, name + "-root"}
// The first connection defined when connections is empty will become the default
// regardless of IsDefault, so order according to rootful
if opts.Rootful {
uris[0], names[0], uris[1], names[1] = uris[1], names[1], uris[0], names[0]
}
for i := 0; i < 2; i++ {
if err := AddConnection(&uris[i], names[i], identityPath, opts.IsDefault && i == 0); err != nil {
return err
}
}
return nil
}
// WaitAPIAndPrintInfo prints info about the machine and does a ping test on the
// API socket
func WaitAPIAndPrintInfo(forwardState APIForwardingState, name, helper, forwardSock string, noInfo, isIncompatible, rootful bool) {
func WaitAPIAndPrintInfo(forwardState APIForwardingState, name, helper, forwardSock string, noInfo, rootful bool) {
suffix := ""
var fmtString string
@ -67,31 +39,6 @@ func WaitAPIAndPrintInfo(forwardState APIForwardingState, name, helper, forwardS
suffix = " " + name
}
if isIncompatible {
fmtString = `
!!! ACTION REQUIRED: INCOMPATIBLE MACHINE !!!
This machine was created by an older podman release that is incompatible
with this release of podman. It has been started in a limited operational
mode to allow you to copy any necessary files before recreating it. This
can be accomplished with the following commands:
# Login and copy desired files (Optional)
# podman machine ssh%[1]s tar cvPf - /path/to/files > backup.tar
# Recreate machine (DESTRUCTIVE!)
podman machine stop%[1]s
podman machine rm -f%[1]s
podman machine init --now%[1]s
# Copy back files (Optional)
# cat backup.tar | podman machine ssh%[1]s tar xvPf -
`
fmt.Fprintf(os.Stderr, fmtString, suffix)
}
if forwardState == NoForwarding {
return
}
@ -158,7 +105,7 @@ following command in your terminal session:
// SetRootful modifies the machine's default connection to be either rootful or
// rootless
func SetRootful(rootful bool, name, rootfulName string) error {
return UpdateConnectionIfDefault(rootful, name, rootfulName)
return connection.UpdateConnectionIfDefault(rootful, name, rootfulName)
}
// WriteConfig writes the machine's JSON config file

View File

@ -133,6 +133,7 @@ func launchWinProxy(opts WinProxyOpts) (bool, string, error) {
}
cmd := exec.Command(command, args...)
logrus.Debugf("winssh command: %s %v", command, args)
if err := cmd.Start(); err != nil {
return globalName, "", err
}

View File

@ -28,10 +28,7 @@ type OSVersion struct {
}
type Disker interface {
Pull() error
Decompress(compressedFile *define.VMFile) (*define.VMFile, error)
DiskEndpoint() string
Unpack() (*define.VMFile, error)
Get() error
}
type OCIOpts struct {

View File

@ -5,7 +5,6 @@ import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v4/pkg/machine/compression"
@ -25,15 +24,16 @@ type Versioned struct {
machineVersion *OSVersion
vmName string
vmType string
finalPath *define.VMFile
}
func NewVersioned(ctx context.Context, machineImageDir, vmName string, vmType string) (*Versioned, error) {
imageCacheDir := filepath.Join(machineImageDir, "cache")
func NewVersioned(ctx context.Context, machineImageDir *define.VMFile, vmName string, vmType string, finalPath *define.VMFile) (*Versioned, error) {
imageCacheDir := filepath.Join(machineImageDir.GetPath(), "cache")
if err := os.MkdirAll(imageCacheDir, 0777); err != nil {
return nil, err
}
o := getVersion()
return &Versioned{ctx: ctx, cacheDir: imageCacheDir, machineImageDir: machineImageDir, machineVersion: o, vmName: vmName, vmType: vmType}, nil
return &Versioned{ctx: ctx, cacheDir: imageCacheDir, machineImageDir: machineImageDir.GetPath(), machineVersion: o, vmName: vmName, vmType: vmType, finalPath: finalPath}, nil
}
func (d *Versioned) LocalBlob() *types.BlobInfo {
@ -136,14 +136,8 @@ func (d *Versioned) Unpack() (*define.VMFile, error) {
return unpackedFile, nil
}
func (d *Versioned) Decompress(compressedFile *define.VMFile) (*define.VMFile, error) {
imageCompression := compression.KindFromFile(d.imageName)
strippedImageName := strings.TrimSuffix(d.imageName, fmt.Sprintf(".%s", imageCompression.String()))
finalName := finalFQImagePathName(d.vmName, strippedImageName)
if err := compression.Decompress(compressedFile, finalName); err != nil {
return nil, err
}
return define.NewMachineFile(finalName, nil)
func (d *Versioned) Decompress(compressedFile *define.VMFile) error {
return compression.Decompress(compressedFile, d.finalPath.GetPath())
}
func (d *Versioned) localOCIDiskImageDir(localBlob *types.BlobInfo) string {
@ -154,3 +148,22 @@ func (d *Versioned) localOCIDirExists() bool {
_, indexErr := os.Stat(filepath.Join(d.versionedOCICacheDir(), "index.json"))
return indexErr == nil
}
func (d *Versioned) Get() error {
if err := d.Pull(); err != nil {
return err
}
unpacked, err := d.Unpack()
if err != nil {
return err
}
defer func() {
logrus.Debugf("cleaning up %q", unpacked.GetPath())
if err := unpacked.Delete(); err != nil {
logrus.Errorf("unable to delete local compressed file %q:%v", unpacked.GetPath(), err)
}
}()
return d.Decompress(unpacked)
}

View File

@ -1,8 +1,6 @@
package qemu
package machine
import (
"os"
)
import "os"
func getRuntimeDir() (string, error) {
tmpDir, ok := os.LookupEnv("TMPDIR")
@ -11,7 +9,3 @@ func getRuntimeDir() (string, error) {
}
return tmpDir, nil
}
func useNetworkRecover() bool {
return true
}

View File

@ -0,0 +1,11 @@
package machine
import "os"
func getRuntimeDir() (string, error) {
tmpDir, ok := os.LookupEnv("TMPDIR")
if !ok {
tmpDir = "/tmp"
}
return tmpDir, nil
}

View File

@ -1,4 +1,4 @@
package qemu
package machine
import (
"github.com/containers/podman/v4/pkg/rootless"
@ -11,7 +11,3 @@ func getRuntimeDir() (string, error) {
}
return util.GetRootlessRuntimeDir()
}
func useNetworkRecover() bool {
return false
}

View File

@ -0,0 +1,11 @@
package machine
import "os"
func getRuntimeDir() (string, error) {
tmpDir, ok := os.LookupEnv("TEMP")
if !ok {
tmpDir = os.Getenv("LOCALAPPDATA") + "\\Temp"
}
return tmpDir, nil
}

View File

@ -6,31 +6,37 @@ import (
"fmt"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/shim"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
)
// MachineOS manages machine OS's from outside the machine.
type MachineOS struct {
Args []string
VM machine.VM
VMName string
Restart bool
Args []string
VM *vmconfigs.MachineConfig
Provider vmconfigs.VMProvider
VMName string
Restart bool
}
// Apply applies the image by sshing into the machine and running apply from inside the VM.
func (m *MachineOS) Apply(image string, opts ApplyOptions) error {
sshOpts := machine.SSHOptions{
Args: []string{"podman", "machine", "os", "apply", image},
args := []string{"podman", "machine", "os", "apply", image}
if err := machine.CommonSSH(m.VM.SSH.RemoteUsername, m.VM.SSH.IdentityPath, m.VMName, m.VM.SSH.Port, args); err != nil {
return err
}
if err := m.VM.SSH(m.VMName, sshOpts); err != nil {
dirs, err := machine.GetMachineDirs(m.Provider.VMType())
if err != nil {
return err
}
if m.Restart {
if err := m.VM.Stop(m.VMName, machine.StopOptions{}); err != nil {
if err := shim.Stop(m.VM, m.Provider, dirs, false); err != nil {
return err
}
if err := m.VM.Start(m.VMName, machine.StartOptions{NoInfo: true}); err != nil {
if err := shim.Start(m.VM, m.Provider, dirs, machine.StartOptions{NoInfo: true}); err != nil {
return err
}
fmt.Printf("Machine %q restarted successfully\n", m.VMName)

View File

@ -7,13 +7,13 @@ import (
"os"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/qemu"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/sirupsen/logrus"
)
func Get() (machine.VirtProvider, error) {
func Get() (vmconfigs.VMProvider, error) {
cfg, err := config.Default()
if err != nil {
return nil, err
@ -30,7 +30,7 @@ func Get() (machine.VirtProvider, error) {
logrus.Debugf("Using Podman machine with `%s` virtualization provider", resolvedVMType.String())
switch resolvedVMType {
case define.QemuVirt:
return qemu.VirtualizationProvider(), nil
return new(qemu.QEMUStubber), nil
default:
return nil, fmt.Errorf("unsupported virtualization provider: `%s`", resolvedVMType.String())
}

View File

@ -5,14 +5,13 @@ import (
"os"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/applehv"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/qemu"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/sirupsen/logrus"
)
func Get() (machine.VirtProvider, error) {
func Get() (vmconfigs.VMProvider, error) {
cfg, err := config.Default()
if err != nil {
return nil, err
@ -28,10 +27,8 @@ func Get() (machine.VirtProvider, error) {
logrus.Debugf("Using Podman machine with `%s` virtualization provider", resolvedVMType.String())
switch resolvedVMType {
case define.QemuVirt:
return qemu.VirtualizationProvider(), nil
case define.AppleHvVirt:
return applehv.VirtualizationProvider(), nil
return new(applehv.AppleHVStubber), nil
default:
return nil, fmt.Errorf("unsupported virtualization provider: `%s`", resolvedVMType.String())
}

View File

@ -2,17 +2,16 @@ package provider
import (
"fmt"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"os"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/hyperv"
"github.com/containers/podman/v4/pkg/machine/wsl"
"github.com/sirupsen/logrus"
)
func Get() (machine.VirtProvider, error) {
func Get() (vmconfigs.VMProvider, error) {
cfg, err := config.Default()
if err != nil {
return nil, err
@ -28,10 +27,11 @@ func Get() (machine.VirtProvider, error) {
logrus.Debugf("Using Podman machine with `%s` virtualization provider", resolvedVMType.String())
switch resolvedVMType {
case define.WSLVirt:
return wsl.VirtualizationProvider(), nil
// TODO re-enable this with WSL
//case define.WSLVirt:
// return wsl.VirtualizationProvider(), nil
case define.HyperVVirt:
return hyperv.VirtualizationProvider(), nil
return new(hyperv.HyperVStubber), nil
default:
return nil, fmt.Errorf("unsupported virtualization provider: `%s`", resolvedVMType.String())
}

View File

@ -3,7 +3,6 @@
package machine
import (
"context"
"errors"
"fmt"
"io"
@ -219,7 +218,7 @@ func (dl Download) AcquireAlternateImage(inputPath string) (*define.VMFile, erro
return imagePath, nil
}
func isOci(input string) (bool, *ocipull.OCIKind, error) {
func isOci(input string) (bool, *ocipull.OCIKind, error) { //nolint:unused
inputURL, err := url2.Parse(input)
if err != nil {
return false, nil, err
@ -233,60 +232,60 @@ func isOci(input string) (bool, *ocipull.OCIKind, error) {
return false, nil, nil
}
func Pull(input, machineName string, vp VirtProvider) (*define.VMFile, FCOSStream, error) {
var (
disk ocipull.Disker
)
ociBased, ociScheme, err := isOci(input)
if err != nil {
return nil, 0, err
}
if !ociBased {
// Business as usual
dl, err := vp.NewDownload(machineName)
if err != nil {
return nil, 0, err
}
return dl.AcquireVMImage(input)
}
oopts := ocipull.OCIOpts{
Scheme: ociScheme,
}
dataDir, err := GetDataDir(vp.VMType())
if err != nil {
return nil, 0, err
}
if ociScheme.IsOCIDir() {
strippedOCIDir := ocipull.StripOCIReference(input)
oopts.Dir = &strippedOCIDir
disk = ocipull.NewOCIDir(context.Background(), input, dataDir, machineName)
} else {
// a use of a containers image type here might be
// tighter
strippedInput := strings.TrimPrefix(input, "docker://")
// this is the next piece of work
if len(strippedInput) > 0 {
return nil, 0, errors.New("image names are not supported yet")
}
disk, err = ocipull.NewVersioned(context.Background(), dataDir, machineName, vp.VMType().String())
if err != nil {
return nil, 0, err
}
}
if err := disk.Pull(); err != nil {
return nil, 0, err
}
unpacked, err := disk.Unpack()
if err != nil {
return nil, 0, err
}
defer func() {
logrus.Debugf("cleaning up %q", unpacked.GetPath())
if err := unpacked.Delete(); err != nil {
logrus.Errorf("unable to delete local compressed file %q:%v", unpacked.GetPath(), err)
}
}()
imagePath, err := disk.Decompress(unpacked)
return imagePath, UnknownStream, err
}
// func Pull(input, machineName string, vp VirtProvider) (*define.VMFile, FCOSStream, error) {
// var (
// disk ocipull.Disker
// )
//
// ociBased, ociScheme, err := isOci(input)
// if err != nil {
// return nil, 0, err
// }
// if !ociBased {
// // Business as usual
// dl, err := vp.NewDownload(machineName)
// if err != nil {
// return nil, 0, err
// }
// return dl.AcquireVMImage(input)
// }
// oopts := ocipull.OCIOpts{
// Scheme: ociScheme,
// }
// dataDir, err := GetDataDir(vp.VMType())
// if err != nil {
// return nil, 0, err
// }
// if ociScheme.IsOCIDir() {
// strippedOCIDir := ocipull.StripOCIReference(input)
// oopts.Dir = &strippedOCIDir
// disk = ocipull.NewOCIDir(context.Background(), input, dataDir, machineName)
// } else {
// // a use of a containers image type here might be
// // tighter
// strippedInput := strings.TrimPrefix(input, "docker://")
// // this is the next piece of work
// if len(strippedInput) > 0 {
// return nil, 0, errors.New("image names are not supported yet")
// }
// disk, err = ocipull.NewVersioned(context.Background(), dataDir, machineName, vp.VMType().String())
// if err != nil {
// return nil, 0, err
// }
// }
// if err := disk.Pull(); err != nil {
// return nil, 0, err
// }
// unpacked, err := disk.Unpack()
// if err != nil {
// return nil, 0, err
// }
// defer func() {
// logrus.Debugf("cleaning up %q", unpacked.GetPath())
// if err := unpacked.Delete(); err != nil {
// logrus.Errorf("unable to delete local compressed file %q:%v", unpacked.GetPath(), err)
// }
// }()
// imagePath, err := disk.Decompress(unpacked)
// return imagePath, UnknownStream, err
//}

View File

@ -1,19 +1,23 @@
package command
import (
"encoding/base64"
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/containers/common/libnetwork/etchosts"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/pkg/machine/define"
)
// defaultQMPTimeout is the timeout duration for the
// qmp monitor interactions.
var (
defaultQMPTimeout = 2 * time.Second
)
// QemuCmd is an alias around a string slice to prevent the need to migrate the
// MachineVM struct due to changes
type QemuCmd []string
@ -104,7 +108,7 @@ func (q *QemuCmd) SetDisplay(display string) {
// SetPropagatedHostEnvs adds options that propagate SSL and proxy settings
func (q *QemuCmd) SetPropagatedHostEnvs() {
*q = propagateHostEnv(*q)
*q = PropagateHostEnv(*q)
}
func (q *QemuCmd) Build() []string {
@ -181,51 +185,6 @@ func ParseUSBs(usbs []string) ([]USBConfig, error) {
return configs, nil
}
func GetProxyVariables() map[string]string {
proxyOpts := make(map[string]string)
for _, variable := range config.ProxyEnv {
if value, ok := os.LookupEnv(variable); ok {
if value == "" {
continue
}
v := strings.ReplaceAll(value, "127.0.0.1", etchosts.HostContainersInternal)
v = strings.ReplaceAll(v, "localhost", etchosts.HostContainersInternal)
proxyOpts[variable] = v
}
}
return proxyOpts
}
// propagateHostEnv is here for providing the ability to propagate
// proxy and SSL settings (e.g. HTTP_PROXY and others) on a start
// and avoid a need of re-creating/re-initiating a VM
func propagateHostEnv(cmdLine QemuCmd) QemuCmd {
varsToPropagate := make([]string, 0)
for k, v := range GetProxyVariables() {
varsToPropagate = append(varsToPropagate, fmt.Sprintf("%s=%q", k, v))
}
if sslCertFile, ok := os.LookupEnv("SSL_CERT_FILE"); ok {
pathInVM := filepath.Join(define.UserCertsTargetPath, filepath.Base(sslCertFile))
varsToPropagate = append(varsToPropagate, fmt.Sprintf("%s=%q", "SSL_CERT_FILE", pathInVM))
}
if _, ok := os.LookupEnv("SSL_CERT_DIR"); ok {
varsToPropagate = append(varsToPropagate, fmt.Sprintf("%s=%q", "SSL_CERT_DIR", define.UserCertsTargetPath))
}
if len(varsToPropagate) > 0 {
prefix := "name=opt/com.coreos/environment,string="
envVarsJoined := strings.Join(varsToPropagate, "|")
fwCfgArg := prefix + base64.StdEncoding.EncodeToString([]byte(envVarsJoined))
return append(cmdLine, "-fw_cfg", fwCfgArg)
}
return cmdLine
}
type Monitor struct {
// Address portion of the qmp monitor (/tmp/tmp.sock)
Address define.VMFile
@ -234,3 +193,22 @@ type Monitor struct {
// Timeout in seconds for qmp monitor transactions
Timeout time.Duration
}
// NewQMPMonitor creates the monitor subsection of our vm
func NewQMPMonitor(name string, machineRuntimeDir *define.VMFile) (Monitor, error) {
if _, err := os.Stat(machineRuntimeDir.GetPath()); errors.Is(err, fs.ErrNotExist) {
if err := os.MkdirAll(machineRuntimeDir.GetPath(), 0755); err != nil {
return Monitor{}, err
}
}
address, err := machineRuntimeDir.AppendToNewVMFile("qmp_"+name+".sock", nil)
if err != nil {
return Monitor{}, err
}
monitor := Monitor{
Network: "unix",
Address: *address,
Timeout: defaultQMPTimeout,
}
return monitor, nil
}

View File

@ -62,7 +62,7 @@ func TestPropagateHostEnv(t *testing.T) {
t.Setenv(key, item.value)
}
cmdLine := propagateHostEnv(make([]string, 0))
cmdLine := PropagateHostEnv(make([]string, 0))
assert.Len(t, cmdLine, 2)
assert.Equal(t, "-fw_cfg", cmdLine[0])

View File

@ -0,0 +1,58 @@
package command
import (
"encoding/base64"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/containers/common/libnetwork/etchosts"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/pkg/machine/define"
)
func GetProxyVariables() map[string]string {
proxyOpts := make(map[string]string)
for _, variable := range config.ProxyEnv {
if value, ok := os.LookupEnv(variable); ok {
if value == "" {
continue
}
v := strings.ReplaceAll(value, "127.0.0.1", etchosts.HostContainersInternal)
v = strings.ReplaceAll(v, "localhost", etchosts.HostContainersInternal)
proxyOpts[variable] = v
}
}
return proxyOpts
}
// PropagateHostEnv is here for providing the ability to propagate
// proxy and SSL settings (e.g. HTTP_PROXY and others) on a start
// and avoid a need of re-creating/re-initiating a VM
func PropagateHostEnv(cmdLine QemuCmd) QemuCmd {
varsToPropagate := make([]string, 0)
for k, v := range GetProxyVariables() {
varsToPropagate = append(varsToPropagate, fmt.Sprintf("%s=%q", k, v))
}
if sslCertFile, ok := os.LookupEnv("SSL_CERT_FILE"); ok {
pathInVM := filepath.Join(define.UserCertsTargetPath, filepath.Base(sslCertFile))
varsToPropagate = append(varsToPropagate, fmt.Sprintf("%s=%q", "SSL_CERT_FILE", pathInVM))
}
if _, ok := os.LookupEnv("SSL_CERT_DIR"); ok {
varsToPropagate = append(varsToPropagate, fmt.Sprintf("%s=%q", "SSL_CERT_DIR", define.UserCertsTargetPath))
}
if len(varsToPropagate) > 0 {
prefix := "name=opt/com.coreos/environment,string="
envVarsJoined := strings.Join(varsToPropagate, "|")
fwCfgArg := prefix + base64.StdEncoding.EncodeToString([]byte(envVarsJoined))
return append(cmdLine, "-fw_cfg", fwCfgArg)
}
return cmdLine
}

View File

@ -1,44 +1,14 @@
package qemu
import (
"encoding/json"
"fmt"
"io/fs"
"os"
"path/filepath"
"strings"
"time"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/compression"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/ignition"
"github.com/containers/podman/v4/pkg/machine/qemu/command"
"github.com/containers/podman/v4/pkg/machine/sockets"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/containers/podman/v4/utils"
"github.com/docker/go-units"
"github.com/sirupsen/logrus"
)
var (
// defaultQMPTimeout is the timeout duration for the
// qmp monitor interactions.
defaultQMPTimeout = 2 * time.Second
)
type QEMUVirtualization struct {
machine.Virtualization
}
// setNewMachineCMDOpts are options needed to pass
// into setting up the qemu command line. long term, this need
// should be eliminated
// TODO Podman5
type setNewMachineCMDOpts struct {
imageDir string
}
type setNewMachineCMDOpts struct{}
// findQEMUBinary locates and returns the QEMU binary
func findQEMUBinary() (string, error) {
@ -48,300 +18,3 @@ func findQEMUBinary() (string, error) {
}
return cfg.FindHelperBinary(QemuCommand, true)
}
// setQMPMonitorSocket sets the virtual machine's QMP Monitor socket
func (v *MachineVM) setQMPMonitorSocket() error {
monitor, err := NewQMPMonitor("unix", v.Name, defaultQMPTimeout)
if err != nil {
return err
}
v.QMPMonitor = monitor
return nil
}
// setNewMachineCMD configure the CLI command that will be run to create the new
// machine
func (v *MachineVM) setNewMachineCMD(qemuBinary string, cmdOpts *setNewMachineCMDOpts) {
v.CmdLine = command.NewQemuBuilder(qemuBinary, v.addArchOptions(cmdOpts))
v.CmdLine.SetMemory(v.Memory)
v.CmdLine.SetCPUs(v.CPUs)
v.CmdLine.SetIgnitionFile(v.IgnitionFile)
v.CmdLine.SetQmpMonitor(v.QMPMonitor)
v.CmdLine.SetNetwork()
v.CmdLine.SetSerialPort(v.ReadySocket, v.VMPidFilePath, v.Name)
v.CmdLine.SetUSBHostPassthrough(v.USBs)
}
// NewMachine initializes an instance of a virtual machine based on the qemu
// virtualization.
func (p *QEMUVirtualization) NewMachine(opts machine.InitOptions) (machine.VM, error) {
vm := new(MachineVM)
if len(opts.Name) > 0 {
vm.Name = opts.Name
}
dataDir, err := machine.GetDataDir(p.VMType())
if err != nil {
return nil, err
}
confDir, err := machine.GetConfDir(vmtype)
if err != nil {
return nil, err
}
// set VM ignition file
if err := ignition.SetIgnitionFile(&vm.IgnitionFile, vmtype, vm.Name, confDir); err != nil {
return nil, err
}
// set VM image file
imagePath, err := define.NewMachineFile(opts.ImagePath, nil)
if err != nil {
return nil, err
}
vm.ImagePath = *imagePath
vm.RemoteUsername = opts.Username
// Add a random port for ssh
port, err := utils.GetRandomPort()
if err != nil {
return nil, err
}
vm.Port = port
vm.CPUs = opts.CPUS
vm.Memory = opts.Memory
vm.DiskSize = opts.DiskSize
if vm.USBs, err = command.ParseUSBs(opts.USBs); err != nil {
return nil, err
}
vm.Created = time.Now()
// find QEMU binary
execPath, err := findQEMUBinary()
if err != nil {
return nil, err
}
if err := vm.setPIDSocket(); err != nil {
return nil, err
}
// Add qmp socket
if err := vm.setQMPMonitorSocket(); err != nil {
return nil, err
}
runtimeDir, err := getRuntimeDir()
if err != nil {
return nil, err
}
symlink := vm.Name + "_ready.sock"
if err := sockets.SetSocket(&vm.ReadySocket, sockets.ReadySocketPath(runtimeDir+"/podman/", vm.Name), &symlink); err != nil {
return nil, err
}
// configure command to run
cmdOpts := setNewMachineCMDOpts{imageDir: dataDir}
vm.setNewMachineCMD(execPath, &cmdOpts)
return vm, nil
}
// LoadVMByName reads a json file that describes a known qemu vm
// and returns a vm instance
func (p *QEMUVirtualization) LoadVMByName(name string) (machine.VM, error) {
vm := &MachineVM{Name: name}
vm.HostUser = vmconfigs.HostUser{UID: -1} // posix reserves -1, so use it to signify undefined
if err := vm.update(); err != nil {
return nil, err
}
lock, err := machine.GetLock(vm.Name, vmtype)
if err != nil {
return nil, err
}
vm.lock = lock
return vm, nil
}
// List lists all vm's that use qemu virtualization
func (p *QEMUVirtualization) List(_ machine.ListOptions) ([]*machine.ListResponse, error) {
return getVMInfos()
}
func getVMInfos() ([]*machine.ListResponse, error) {
vmConfigDir, err := machine.GetConfDir(vmtype)
if err != nil {
return nil, err
}
var listed []*machine.ListResponse
if err = filepath.WalkDir(vmConfigDir, func(path string, d fs.DirEntry, err error) error {
vm := new(MachineVM)
if strings.HasSuffix(d.Name(), ".json") {
fullPath := filepath.Join(vmConfigDir, d.Name())
b, err := os.ReadFile(fullPath)
if err != nil {
return err
}
if err = json.Unmarshal(b, vm); err != nil {
return err
}
listEntry := new(machine.ListResponse)
listEntry.Name = vm.Name
listEntry.Stream = vm.ImageStream
listEntry.VMType = "qemu"
listEntry.CPUs = vm.CPUs
listEntry.Memory = vm.Memory * units.MiB
listEntry.DiskSize = vm.DiskSize * units.GiB
listEntry.Port = vm.Port
listEntry.RemoteUsername = vm.RemoteUsername
listEntry.IdentityPath = vm.IdentityPath
listEntry.CreatedAt = vm.Created
listEntry.Starting = vm.Starting
listEntry.UserModeNetworking = true // always true
if listEntry.CreatedAt.IsZero() {
listEntry.CreatedAt = time.Now()
vm.Created = time.Now()
if err := vm.writeConfig(); err != nil {
return err
}
}
state, err := vm.State(false)
if err != nil {
return err
}
listEntry.Running = state == define.Running
listEntry.LastUp = vm.LastUp
listed = append(listed, listEntry)
}
return nil
}); err != nil {
return nil, err
}
return listed, err
}
func (p *QEMUVirtualization) IsValidVMName(name string) (bool, error) {
infos, err := getVMInfos()
if err != nil {
return false, err
}
for _, vm := range infos {
if vm.Name == name {
return true, nil
}
}
return false, nil
}
// CheckExclusiveActiveVM checks if there is a VM already running
// that does not allow other VMs to be running
func (p *QEMUVirtualization) CheckExclusiveActiveVM() (bool, string, error) {
vms, err := getVMInfos()
if err != nil {
return false, "", fmt.Errorf("checking VM active: %w", err)
}
// NOTE: Start() takes care of dealing with the "starting" state.
for _, vm := range vms {
if vm.Running {
return true, vm.Name, nil
}
}
return false, "", nil
}
// RemoveAndCleanMachines removes all machine and cleans up any other files associated with podman machine
func (p *QEMUVirtualization) RemoveAndCleanMachines() error {
var (
vm machine.VM
listResponse []*machine.ListResponse
opts machine.ListOptions
destroyOptions machine.RemoveOptions
)
destroyOptions.Force = true
var prevErr error
listResponse, err := p.List(opts)
if err != nil {
return err
}
for _, mach := range listResponse {
vm, err = p.LoadVMByName(mach.Name)
if err != nil {
if prevErr != nil {
logrus.Error(prevErr)
}
prevErr = err
}
_, remove, err := vm.Remove(mach.Name, destroyOptions)
if err != nil {
if prevErr != nil {
logrus.Error(prevErr)
}
prevErr = err
} else {
if err := remove(); err != nil {
if prevErr != nil {
logrus.Error(prevErr)
}
prevErr = err
}
}
}
// Clean leftover files in data dir
dataDir, err := machine.DataDirPrefix()
if err != nil {
if prevErr != nil {
logrus.Error(prevErr)
}
prevErr = err
} else {
err := utils.GuardedRemoveAll(dataDir)
if err != nil {
if prevErr != nil {
logrus.Error(prevErr)
}
prevErr = err
}
}
// Clean leftover files in conf dir
confDir, err := machine.ConfDirPrefix()
if err != nil {
if prevErr != nil {
logrus.Error(prevErr)
}
prevErr = err
} else {
err := utils.GuardedRemoveAll(confDir)
if err != nil {
if prevErr != nil {
logrus.Error(prevErr)
}
prevErr = err
}
}
return prevErr
}
func (p *QEMUVirtualization) VMType() define.VMType {
return vmtype
}
func VirtualizationProvider() machine.VirtProvider {
return &QEMUVirtualization{
machine.NewVirtualization(define.Qemu, compression.Xz, define.Qcow, vmtype),
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,20 +0,0 @@
//go:build (amd64 && !windows) || (arm64 && !windows)
package qemu
import (
"testing"
"github.com/containers/podman/v4/pkg/machine/qemu/command"
"github.com/stretchr/testify/require"
)
func TestEditCmd(t *testing.T) {
vm := new(MachineVM)
vm.CmdLine = command.QemuCmd{"command", "-flag", "value"}
vm.editCmdLine("-flag", "newvalue")
vm.editCmdLine("-anotherflag", "anothervalue")
require.Equal(t, vm.CmdLine.Build(), []string{"command", "-flag", "newvalue", "-anotherflag", "anothervalue"})
}

View File

@ -1,11 +1,10 @@
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd
//go:build dragonfly || freebsd || linux || netbsd || openbsd
package qemu
import (
"bytes"
"fmt"
"strings"
"syscall"
"golang.org/x/sys/unix"
@ -32,17 +31,6 @@ func checkProcessStatus(processHint string, pid int, stderrBuf *bytes.Buffer) er
return nil
}
func pathsFromVolume(volume string) []string {
return strings.SplitN(volume, ":", 3)
}
func extractTargetPath(paths []string) string {
if len(paths) > 1 {
return paths[1]
}
return paths[0]
}
func sigKill(pid int) error {
return unix.Kill(pid, unix.SIGKILL)
}

View File

@ -1,18 +0,0 @@
package qemu
var (
QemuCommand = "qemu-system-x86_64"
)
func (v *MachineVM) addArchOptions(_ *setNewMachineCMDOpts) []string {
opts := []string{"-machine", "q35,accel=hvf:tcg", "-cpu", "host"}
return opts
}
func (v *MachineVM) prepare() error {
return nil
}
func (v *MachineVM) archRemovalFiles() []string {
return []string{}
}

View File

@ -1,78 +0,0 @@
package qemu
import (
"os"
"os/exec"
"path/filepath"
"github.com/containers/common/pkg/config"
)
var (
QemuCommand = "qemu-system-aarch64"
)
func (v *MachineVM) addArchOptions(cmdOpts *setNewMachineCMDOpts) []string {
ovmfDir := getOvmfDir(cmdOpts.imageDir, v.Name)
opts := []string{
"-accel", "hvf",
"-accel", "tcg",
"-cpu", "host",
"-M", "virt,highmem=on",
"-drive", "file=" + getEdk2CodeFd("edk2-aarch64-code.fd") + ",if=pflash,format=raw,readonly=on",
"-drive", "file=" + ovmfDir + ",if=pflash,format=raw"}
return opts
}
func (v *MachineVM) prepare() error {
ovmfDir := getOvmfDir(filepath.Dir(v.ImagePath.GetPath()), v.Name)
cmd := []string{"/bin/dd", "if=/dev/zero", "conv=sync", "bs=1m", "count=64", "of=" + ovmfDir}
return exec.Command(cmd[0], cmd[1:]...).Run()
}
func (v *MachineVM) archRemovalFiles() []string {
ovmDir := getOvmfDir(filepath.Dir(v.ImagePath.GetPath()), v.Name)
return []string{ovmDir}
}
func getOvmfDir(imagePath, vmName string) string {
return filepath.Join(imagePath, vmName+"_ovmf_vars.fd")
}
/*
* When QEmu is installed in a non-default location in the system
* we can use the qemu-system-* binary path to figure the install
* location for Qemu and use it to look for edk2-code-fd
*/
func getEdk2CodeFdPathFromQemuBinaryPath() string {
cfg, err := config.Default()
if err == nil {
execPath, err := cfg.FindHelperBinary(QemuCommand, true)
if err == nil {
return filepath.Clean(filepath.Join(filepath.Dir(execPath), "..", "share", "qemu"))
}
}
return ""
}
/*
* QEmu can be installed in multiple locations on MacOS, especially on
* Apple Silicon systems. A build from source will likely install it in
* /usr/local/bin, whereas Homebrew package management standard is to
* install in /opt/homebrew
*/
func getEdk2CodeFd(name string) string {
dirs := []string{
getEdk2CodeFdPathFromQemuBinaryPath(),
"/opt/homebrew/opt/podman/libexec/share/qemu",
"/usr/local/share/qemu",
"/opt/homebrew/share/qemu",
}
for _, dir := range dirs {
fullpath := filepath.Join(dir, name)
if _, err := os.Stat(fullpath); err == nil {
return fullpath
}
}
return name
}

View File

@ -4,18 +4,10 @@ var (
QemuCommand = "qemu-system-x86_64"
)
func (v *MachineVM) addArchOptions(_ *setNewMachineCMDOpts) []string {
func (q *QEMUStubber) addArchOptions(_ *setNewMachineCMDOpts) []string {
opts := []string{
"-accel", "kvm",
"-cpu", "host",
}
return opts
}
func (v *MachineVM) prepare() error {
return nil
}
func (v *MachineVM) archRemovalFiles() []string {
return []string{}
}

View File

@ -9,7 +9,7 @@ var (
QemuCommand = "qemu-system-aarch64"
)
func (v *MachineVM) addArchOptions(_ *setNewMachineCMDOpts) []string {
func (q *QEMUStubber) addArchOptions(_ *setNewMachineCMDOpts) []string {
opts := []string{
"-accel", "kvm",
"-cpu", "host",
@ -19,14 +19,6 @@ func (v *MachineVM) addArchOptions(_ *setNewMachineCMDOpts) []string {
return opts
}
func (v *MachineVM) prepare() error {
return nil
}
func (v *MachineVM) archRemovalFiles() []string {
return []string{}
}
func getQemuUefiFile(name string) string {
dirs := []string{
"/usr/share/qemu-efi-aarch64",

View File

@ -4,7 +4,7 @@ var (
QemuCommand = "qemu-system-x86_64w"
)
func (v *MachineVM) addArchOptions(_ *setNewMachineCMDOpts) []string {
func (q *QEMUStubber) addArchOptions(_ *setNewMachineCMDOpts) []string {
// "qemu64" level is used, because "host" is not supported with "whpx" acceleration.
// It is a stable choice for running on bare metal and inside Hyper-V machine with nested virtualization.
opts := []string{"-machine", "q35,accel=whpx:tcg", "-cpu", "qemu64"}

View File

@ -4,7 +4,7 @@ var (
QemuCommand = "qemu-system-aarch64w"
)
func (v *MachineVM) addArchOptions(_ *setNewMachineCMDOpts) []string {
func (q *QEMUStubber) addArchOptions(_ *setNewMachineCMDOpts) []string {
// stub to fix compilation issues
opts := []string{}
return opts

321
pkg/machine/qemu/stubber.go Normal file
View File

@ -0,0 +1,321 @@
package qemu
import (
"bufio"
"bytes"
"fmt"
"net"
"os"
"os/exec"
"strconv"
"strings"
"time"
"github.com/containers/podman/v4/pkg/machine/ignition"
"github.com/containers/common/pkg/config"
"github.com/containers/common/pkg/strongunits"
gvproxy "github.com/containers/gvisor-tap-vsock/pkg/types"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/qemu/command"
"github.com/containers/podman/v4/pkg/machine/sockets"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/sirupsen/logrus"
)
type QEMUStubber struct {
vmconfigs.QEMUConfig
// Command describes the final QEMU command line
Command command.QemuCmd
}
func (q *QEMUStubber) setQEMUCommandLine(mc *vmconfigs.MachineConfig) error {
qemuBinary, err := findQEMUBinary()
if err != nil {
return err
}
ignitionFile, err := mc.IgnitionFile()
if err != nil {
return err
}
readySocket, err := mc.ReadySocket()
if err != nil {
return err
}
q.QEMUPidPath = mc.QEMUHypervisor.QEMUPidPath
q.Command = command.NewQemuBuilder(qemuBinary, q.addArchOptions(nil))
q.Command.SetBootableImage(mc.ImagePath.GetPath())
q.Command.SetMemory(mc.Resources.Memory)
q.Command.SetCPUs(mc.Resources.CPUs)
q.Command.SetIgnitionFile(*ignitionFile)
q.Command.SetQmpMonitor(mc.QEMUHypervisor.QMPMonitor)
q.Command.SetNetwork()
q.Command.SetSerialPort(*readySocket, *mc.QEMUHypervisor.QEMUPidPath, mc.Name)
// Add volumes to qemu command line
for _, mount := range mc.Mounts {
// the index provided in this case is thrown away
_, _, _, _, securityModel := vmconfigs.SplitVolume(0, mount.OriginalInput)
q.Command.SetVirtfsMount(mount.Source, mount.Tag, securityModel, mount.ReadOnly)
}
// TODO
// v.QEMUConfig.Command.SetUSBHostPassthrough(v.USBs)
return nil
}
func (q *QEMUStubber) CreateVM(opts define.CreateVMOpts, mc *vmconfigs.MachineConfig, _ *ignition.IgnitionBuilder) error {
monitor, err := command.NewQMPMonitor(opts.Name, opts.Dirs.RuntimeDir)
if err != nil {
return err
}
qemuConfig := vmconfigs.QEMUConfig{
QMPMonitor: monitor,
}
machineRuntimeDir, err := mc.RuntimeDir()
if err != nil {
return err
}
qemuPidPath, err := machineRuntimeDir.AppendToNewVMFile(mc.Name+"_vm.pid", nil)
if err != nil {
return err
}
mc.QEMUHypervisor = &qemuConfig
mc.QEMUHypervisor.QEMUPidPath = qemuPidPath
return q.resizeDisk(strongunits.GiB(mc.Resources.DiskSize), mc.ImagePath)
}
func runStartVMCommand(cmd *exec.Cmd) error {
err := cmd.Start()
if err != nil {
// check if qemu was not found
// look up qemu again maybe the path was changed, https://github.com/containers/podman/issues/13394
cfg, err := config.Default()
if err != nil {
return err
}
qemuBinaryPath, err := cfg.FindHelperBinary(QemuCommand, true)
if err != nil {
return err
}
cmd.Path = qemuBinaryPath
err = cmd.Start()
if err != nil {
return fmt.Errorf("unable to execute %q: %w", cmd, err)
}
}
return nil
}
func (q *QEMUStubber) StartVM(mc *vmconfigs.MachineConfig) (func() error, func() error, error) {
if err := q.setQEMUCommandLine(mc); err != nil {
return nil, nil, fmt.Errorf("unable to generate qemu command line: %q", err)
}
defaultBackoff := 500 * time.Millisecond
maxBackoffs := 6
readySocket, err := mc.ReadySocket()
if err != nil {
return nil, nil, err
}
// If the qemusocketpath exists and the vm is off/down, we should rm
// it before the dial as to avoid a segv
if err := mc.QEMUHypervisor.QMPMonitor.Address.Delete(); err != nil {
return nil, nil, err
}
qemuSocketConn, err := sockets.DialSocketWithBackoffs(maxBackoffs, defaultBackoff, mc.QEMUHypervisor.QMPMonitor.Address.GetPath())
if err != nil {
return nil, nil, fmt.Errorf("failed to connect to qemu monitor socket: %w", err)
}
defer qemuSocketConn.Close()
fd, err := qemuSocketConn.(*net.UnixConn).File()
if err != nil {
return nil, nil, err
}
defer fd.Close()
dnr, dnw, err := machine.GetDevNullFiles()
if err != nil {
return nil, nil, err
}
defer dnr.Close()
defer dnw.Close()
attr := new(os.ProcAttr)
files := []*os.File{dnr, dnw, dnw, fd}
attr.Files = files
cmdLine := q.Command
cmdLine.SetPropagatedHostEnvs()
// Disable graphic window when not in debug mode
// Done in start, so we're not suck with the debug level we used on init
if !logrus.IsLevelEnabled(logrus.DebugLevel) {
cmdLine.SetDisplay("none")
}
logrus.Debugf("qemu cmd: %v", cmdLine)
stderrBuf := &bytes.Buffer{}
// actually run the command that starts the virtual machine
cmd := &exec.Cmd{
Args: cmdLine,
Path: cmdLine[0],
Stdin: dnr,
Stdout: dnw,
Stderr: stderrBuf,
ExtraFiles: []*os.File{fd},
}
if err := runStartVMCommand(cmd); err != nil {
return nil, nil, err
}
logrus.Debugf("Started qemu pid %d", cmd.Process.Pid)
readyFunc := func() error {
return waitForReady(readySocket, cmd.Process.Pid, stderrBuf)
}
// if this is not the last line in the func, make it a defer
return cmd.Process.Release, readyFunc, nil
}
func waitForReady(readySocket *define.VMFile, pid int, stdErrBuffer *bytes.Buffer) error {
defaultBackoff := 500 * time.Millisecond
maxBackoffs := 6
conn, err := sockets.DialSocketWithBackoffsAndProcCheck(maxBackoffs, defaultBackoff, readySocket.GetPath(), checkProcessStatus, "qemu", pid, stdErrBuffer)
if err != nil {
return err
}
defer conn.Close()
_, err = bufio.NewReader(conn).ReadString('\n')
return err
}
func (q *QEMUStubber) GetHyperVisorVMs() ([]string, error) {
return nil, nil
}
func (q *QEMUStubber) VMType() define.VMType {
return define.QemuVirt
}
func (q *QEMUStubber) PrepareIgnition(_ *vmconfigs.MachineConfig, _ *ignition.IgnitionBuilder) (*ignition.ReadyUnitOpts, error) {
return nil, nil
}
func (q *QEMUStubber) StopHostNetworking(_ *vmconfigs.MachineConfig, _ define.VMType) error {
return define.ErrNotImplemented
}
func (q *QEMUStubber) resizeDisk(newSize strongunits.GiB, diskPath *define.VMFile) error {
// Find the qemu executable
cfg, err := config.Default()
if err != nil {
return err
}
resizePath, err := cfg.FindHelperBinary("qemu-img", true)
if err != nil {
return err
}
resize := exec.Command(resizePath, []string{"resize", diskPath.GetPath(), strconv.Itoa(int(newSize)) + "G"}...)
resize.Stdout = os.Stdout
resize.Stderr = os.Stderr
if err := resize.Run(); err != nil {
return fmt.Errorf("resizing image: %q", err)
}
return nil
}
func (q *QEMUStubber) SetProviderAttrs(mc *vmconfigs.MachineConfig, cpus, memory *uint64, newDiskSize *strongunits.GiB, newRootful *bool) error {
if newDiskSize != nil {
if err := q.resizeDisk(*newDiskSize, mc.ImagePath); err != nil {
return err
}
}
if newRootful != nil && mc.HostUser.Rootful != *newRootful {
if err := mc.SetRootful(*newRootful); err != nil {
return err
}
}
// Because QEMU does nothing with these hardware attributes, we can simply return
return nil
}
func (q *QEMUStubber) StartNetworking(mc *vmconfigs.MachineConfig, cmd *gvproxy.GvproxyCommand) error {
cmd.AddQemuSocket(fmt.Sprintf("unix://%s", mc.QEMUHypervisor.QMPMonitor.Address.GetPath()))
return nil
}
func (q *QEMUStubber) RemoveAndCleanMachines(_ *define.MachineDirs) error {
// nothing to do but remove files
return nil
}
// mountVolumesToVM iterates through the machine's volumes and mounts them to the
// machine
// TODO this should probably be temporary; mount code should probably be its own package and shared completely
func (q *QEMUStubber) MountVolumesToVM(mc *vmconfigs.MachineConfig, quiet bool) error {
for _, mount := range mc.Mounts {
if !quiet {
fmt.Printf("Mounting volume... %s:%s\n", mount.Source, mount.Target)
}
// create mountpoint directory if it doesn't exist
// because / is immutable, we have to monkey around with permissions
// if we dont mount in /home or /mnt
args := []string{"-q", "--"}
if !strings.HasPrefix(mount.Target, "/home") && !strings.HasPrefix(mount.Target, "/mnt") {
args = append(args, "sudo", "chattr", "-i", "/", ";")
}
args = append(args, "sudo", "mkdir", "-p", mount.Target)
if !strings.HasPrefix(mount.Target, "/home") && !strings.HasPrefix(mount.Target, "/mnt") {
args = append(args, ";", "sudo", "chattr", "+i", "/", ";")
}
err := machine.CommonSSH(mc.SSH.RemoteUsername, mc.SSH.IdentityPath, mc.Name, mc.SSH.Port, args)
if err != nil {
return err
}
switch mount.Type {
case MountType9p:
mountOptions := []string{"-t", "9p"}
mountOptions = append(mountOptions, []string{"-o", "trans=virtio", mount.Tag, mount.Target}...)
mountOptions = append(mountOptions, []string{"-o", "version=9p2000.L,msize=131072,cache=mmap"}...)
if mount.ReadOnly {
mountOptions = append(mountOptions, []string{"-o", "ro"}...)
}
err = machine.CommonSSH(mc.SSH.RemoteUsername, mc.SSH.IdentityPath, mc.Name, mc.SSH.Port, append([]string{"-q", "--", "sudo", "mount"}, mountOptions...))
if err != nil {
return err
}
default:
return fmt.Errorf("unknown mount type: %s", mount.Type)
}
}
return nil
}
func (q *QEMUStubber) MountType() vmconfigs.VolumeMountType {
return vmconfigs.NineP
}
func (q *QEMUStubber) PostStartNetworking(mc *vmconfigs.MachineConfig) error {
return nil
}

View File

@ -0,0 +1 @@
package machine

View File

@ -1,4 +1,4 @@
package qemu
package shim
import (
"fmt"

View File

@ -1,6 +1,6 @@
//go:build !darwin
package qemu
package shim
func dockerClaimHelperInstalled() bool {
return false

447
pkg/machine/shim/host.go Normal file
View File

@ -0,0 +1,447 @@
package shim
import (
"context"
"errors"
"fmt"
"os"
"runtime"
"strings"
"time"
"github.com/containers/common/pkg/util"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/connection"
machineDefine "github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/ignition"
"github.com/containers/podman/v4/pkg/machine/ocipull"
"github.com/containers/podman/v4/pkg/machine/stdpull"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/sirupsen/logrus"
)
/*
Host
├ Info
├ OS Apply
├ SSH
├ List
├ Init
├ VMExists
├ CheckExclusiveActiveVM *HyperV/WSL need to check their hypervisors as well
*/
func Info() {}
func OSApply() {}
func SSH() {}
// List is done at the host level to allow for a *possible* future where
// more than one provider is used
func List(vmstubbers []vmconfigs.VMProvider, opts machine.ListOptions) ([]*machine.ListResponse, error) {
var (
lrs []*machine.ListResponse
)
for _, s := range vmstubbers {
dirs, err := machine.GetMachineDirs(s.VMType())
if err != nil {
return nil, err
}
mcs, err := vmconfigs.LoadMachinesInDir(dirs)
if err != nil {
return nil, err
}
for name, mc := range mcs {
state, err := s.State(mc, false)
if err != nil {
return nil, err
}
lr := machine.ListResponse{
Name: name,
CreatedAt: mc.Created,
LastUp: mc.LastUp,
Running: state == machineDefine.Running,
Starting: mc.Starting,
//Stream: "", // No longer applicable
VMType: s.VMType().String(),
CPUs: mc.Resources.CPUs,
Memory: mc.Resources.Memory,
DiskSize: mc.Resources.DiskSize,
Port: mc.SSH.Port,
RemoteUsername: mc.SSH.RemoteUsername,
IdentityPath: mc.SSH.IdentityPath,
UserModeNetworking: false, // TODO Need to plumb this for WSL
}
lrs = append(lrs, &lr)
}
}
return lrs, nil
}
func Init(opts machineDefine.InitOptions, mp vmconfigs.VMProvider) (*vmconfigs.MachineConfig, error) {
var (
err error
imageExtension string
imagePath *machineDefine.VMFile
)
callbackFuncs := machine.InitCleanup()
defer callbackFuncs.CleanIfErr(&err)
go callbackFuncs.CleanOnSignal()
dirs, err := machine.GetMachineDirs(mp.VMType())
if err != nil {
return nil, err
}
sshIdentityPath, err := machine.GetSSHIdentityPath(machineDefine.DefaultIdentityName)
if err != nil {
return nil, err
}
sshKey, err := machine.GetSSHKeys(sshIdentityPath)
if err != nil {
return nil, err
}
mc, err := vmconfigs.NewMachineConfig(opts, dirs, sshIdentityPath)
if err != nil {
return nil, err
}
createOpts := machineDefine.CreateVMOpts{
Name: opts.Name,
Dirs: dirs,
}
// Get Image
// TODO This needs rework bigtime; my preference is most of below of not living in here.
// ideally we could get a func back that pulls the image, and only do so IF everything works because
// image stuff is the slowest part of the operation
// This is a break from before. New images are named vmname-ARCH.
// It turns out that Windows/HyperV will not accept a disk that
// is not suffixed as ".vhdx". Go figure
switch mp.VMType() {
case machineDefine.QemuVirt:
imageExtension = ".qcow2"
case machineDefine.AppleHvVirt:
imageExtension = ".raw"
case machineDefine.HyperVVirt:
imageExtension = ".vhdx"
default:
// do nothing
}
imagePath, err = dirs.DataDir.AppendToNewVMFile(fmt.Sprintf("%s-%s%s", opts.Name, runtime.GOARCH, imageExtension), nil)
if err != nil {
return nil, err
}
var mydisk ocipull.Disker
// TODO The following stanzas should be re-written in a differeent place. It should have a custom
// parser for our image pulling. It would be nice if init just got an error and mydisk back.
//
// Eventual valid input:
// "" <- means take the default
// "http|https://path"
// "/path
// "docker://quay.io/something/someManifest
if opts.ImagePath == "" {
mydisk, err = ocipull.NewVersioned(context.Background(), dirs.DataDir, opts.Name, mp.VMType().String(), imagePath)
} else {
if strings.HasPrefix(opts.ImagePath, "http") {
// TODO probably should use tempdir instead of datadir
mydisk, err = stdpull.NewDiskFromURL(opts.ImagePath, imagePath, dirs.DataDir)
} else {
mydisk, err = stdpull.NewStdDiskPull(opts.ImagePath, imagePath)
}
}
if err != nil {
return nil, err
}
err = mydisk.Get()
if err != nil {
return nil, err
}
mc.ImagePath = imagePath
callbackFuncs.Add(mc.ImagePath.Delete)
logrus.Debugf("--> imagePath is %q", imagePath.GetPath())
ignitionFile, err := mc.IgnitionFile()
if err != nil {
return nil, err
}
uid := os.Getuid()
if uid == -1 { // windows compensation
uid = 1000
}
ignBuilder := ignition.NewIgnitionBuilder(ignition.DynamicIgnition{
Name: opts.Username,
Key: sshKey,
TimeZone: opts.TimeZone,
UID: uid,
VMName: opts.Name,
VMType: mp.VMType(),
WritePath: ignitionFile.GetPath(),
Rootful: opts.Rootful,
})
// If the user provides an ignition file, we need to
// copy it into the conf dir
if len(opts.IgnitionPath) > 0 {
err = ignBuilder.BuildWithIgnitionFile(opts.IgnitionPath)
return nil, err
}
err = ignBuilder.GenerateIgnitionConfig()
if err != nil {
return nil, err
}
readyIgnOpts, err := mp.PrepareIgnition(mc, &ignBuilder)
if err != nil {
return nil, err
}
readyUnitFile, err := ignition.CreateReadyUnitFile(mp.VMType(), readyIgnOpts)
if err != nil {
return nil, err
}
readyUnit := ignition.Unit{
Enabled: ignition.BoolToPtr(true),
Name: "ready.service",
Contents: ignition.StrToPtr(readyUnitFile),
}
ignBuilder.WithUnit(readyUnit)
// Mounts
mc.Mounts = CmdLineVolumesToMounts(opts.Volumes, mp.MountType())
// TODO AddSSHConnectionToPodmanSocket could take an machineconfig instead
if err := connection.AddSSHConnectionsToPodmanSocket(mc.HostUser.UID, mc.SSH.Port, mc.SSH.IdentityPath, mc.Name, mc.SSH.RemoteUsername, opts); err != nil {
return nil, err
}
cleanup := func() error {
return connection.RemoveConnections(mc.Name, mc.Name+"-root")
}
callbackFuncs.Add(cleanup)
err = mp.CreateVM(createOpts, mc, &ignBuilder)
if err != nil {
return nil, err
}
err = ignBuilder.Build()
if err != nil {
return nil, err
}
return mc, err
}
// VMExists looks across given providers for a machine's existence. returns the actual config and found bool
func VMExists(name string, vmstubbers []vmconfigs.VMProvider) (*vmconfigs.MachineConfig, bool, error) {
// Look on disk first
mcs, err := getMCsOverProviders(vmstubbers)
if err != nil {
return nil, false, err
}
if mc, found := mcs[name]; found {
return mc, true, nil
}
// Check with the provider hypervisor
for _, vmstubber := range vmstubbers {
vms, err := vmstubber.GetHyperVisorVMs()
if err != nil {
return nil, false, err
}
if util.StringInSlice(name, vms) { //nolint:staticcheck
return nil, true, fmt.Errorf("vm %q already exists on hypervisor", name)
}
}
return nil, false, nil
}
// CheckExclusiveActiveVM checks if any of the machines are already running
func CheckExclusiveActiveVM(provider vmconfigs.VMProvider, mc *vmconfigs.MachineConfig) error {
// Check if any other machines are running; if so, we error
localMachines, err := getMCsOverProviders([]vmconfigs.VMProvider{provider})
if err != nil {
return err
}
for name, localMachine := range localMachines {
state, err := provider.State(localMachine, false)
if err != nil {
return err
}
if state == machineDefine.Running {
return fmt.Errorf("unable to start %q: machine %s already running", mc.Name, name)
}
}
return nil
}
// getMCsOverProviders loads machineconfigs from a config dir derived from the "provider". it returns only what is known on
// disk so things like status may be incomplete or inaccurate
func getMCsOverProviders(vmstubbers []vmconfigs.VMProvider) (map[string]*vmconfigs.MachineConfig, error) {
mcs := make(map[string]*vmconfigs.MachineConfig)
for _, stubber := range vmstubbers {
dirs, err := machine.GetMachineDirs(stubber.VMType())
if err != nil {
return nil, err
}
stubberMCs, err := vmconfigs.LoadMachinesInDir(dirs)
if err != nil {
return nil, err
}
// TODO When we get to golang-1.20+ we can replace the following with maps.Copy
// maps.Copy(mcs, stubberMCs)
// iterate known mcs and add the stubbers
for mcName, mc := range stubberMCs {
if _, ok := mcs[mcName]; !ok {
mcs[mcName] = mc
}
}
}
return mcs, nil
}
// Stop stops the machine as well as supporting binaries/processes
// TODO: I think this probably needs to go somewhere that remove can call it.
func Stop(mc *vmconfigs.MachineConfig, mp vmconfigs.VMProvider, dirs *machineDefine.MachineDirs, hardStop bool) error {
// state is checked here instead of earlier because stopping a stopped vm is not considered
// an error. so putting in one place instead of sprinkling all over.
state, err := mp.State(mc, false)
if err != nil {
return err
}
// stopping a stopped machine is NOT an error
if state == machineDefine.Stopped {
return nil
}
if state != machineDefine.Running {
return machineDefine.ErrWrongState
}
// Provider stops the machine
if err := mp.StopVM(mc, hardStop); err != nil {
return err
}
// Remove Ready Socket
readySocket, err := mc.ReadySocket()
if err != nil {
return err
}
if err := readySocket.Delete(); err != nil {
return err
}
// Stop GvProxy and remove PID file
gvproxyPidFile, err := dirs.RuntimeDir.AppendToNewVMFile("gvproxy.pid", nil)
if err != nil {
return err
}
defer func() {
if err := machine.CleanupGVProxy(*gvproxyPidFile); err != nil {
logrus.Errorf("unable to clean up gvproxy: %q", err)
}
}()
return nil
}
func Start(mc *vmconfigs.MachineConfig, mp vmconfigs.VMProvider, dirs *machineDefine.MachineDirs, opts machine.StartOptions) error {
defaultBackoff := 500 * time.Millisecond
maxBackoffs := 6
// start gvproxy and set up the API socket forwarding
forwardSocketPath, forwardingState, err := startNetworking(mc, mp)
if err != nil {
return err
}
// if there are generic things that need to be done, a preStart function could be added here
// should it be extensive
// releaseFunc is if the provider starts a vm using a go command
// and we still need control of it while it is booting until the ready
// socket is tripped
releaseCmd, WaitForReady, err := mp.StartVM(mc)
if err != nil {
return err
}
if WaitForReady == nil {
return errors.New("no valid wait function returned")
}
if err := WaitForReady(); err != nil {
return err
}
if releaseCmd != nil && releaseCmd() != nil { // some providers can return nil here (hyperv)
if err := releaseCmd(); err != nil {
// I think it is ok for a "light" error?
logrus.Error(err)
}
}
err = mp.PostStartNetworking(mc)
if err != nil {
return err
}
stateF := func() (machineDefine.Status, error) {
return mp.State(mc, true)
}
connected, sshError, err := conductVMReadinessCheck(mc, maxBackoffs, defaultBackoff, stateF)
if err != nil {
return err
}
if !connected {
msg := "machine did not transition into running state"
if sshError != nil {
return fmt.Errorf("%s: ssh error: %v", msg, sshError)
}
return errors.New(msg)
}
// mount the volumes to the VM
if err := mp.MountVolumesToVM(mc, opts.Quiet); err != nil {
return err
}
machine.WaitAPIAndPrintInfo(
forwardingState,
mc.Name,
findClaimHelper(),
forwardSocketPath,
opts.NoInfo,
mc.HostUser.Rootful,
)
// update the podman/docker socket service if the host user has been modified at all (UID or Rootful)
if mc.HostUser.Modified {
if machine.UpdatePodmanDockerSockService(mc) == nil {
// Reset modification state if there are no errors, otherwise ignore errors
// which are already logged
mc.HostUser.Modified = false
if err := mc.Write(); err != nil {
logrus.Error(err)
}
}
}
return nil
}

View File

@ -0,0 +1,215 @@
package shim
import (
"fmt"
"io/fs"
"net"
"os"
"path/filepath"
"strings"
"time"
"github.com/containers/common/pkg/config"
gvproxy "github.com/containers/gvisor-tap-vsock/pkg/types"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/sirupsen/logrus"
)
const (
dockerSock = "/var/run/docker.sock"
defaultGuestSock = "/run/user/%d/podman/podman.sock"
dockerConnectTimeout = 5 * time.Second
)
func startNetworking(mc *vmconfigs.MachineConfig, provider vmconfigs.VMProvider) (string, machine.APIForwardingState, error) {
var (
forwardingState machine.APIForwardingState
forwardSock string
)
// the guestSock is "inside" the guest machine
guestSock := fmt.Sprintf(defaultGuestSock, mc.HostUser.UID)
forwardUser := mc.SSH.RemoteUsername
// TODO should this go up the stack higher
if mc.HostUser.Rootful {
guestSock = "/run/podman/podman.sock"
forwardUser = "root"
}
cfg, err := config.Default()
if err != nil {
return "", 0, err
}
binary, err := cfg.FindHelperBinary(machine.ForwarderBinaryName, false)
if err != nil {
return "", 0, err
}
dataDir, err := mc.DataDir()
if err != nil {
return "", 0, err
}
hostSocket, err := dataDir.AppendToNewVMFile("podman.sock", nil)
if err != nil {
return "", 0, err
}
runDir, err := mc.RuntimeDir()
if err != nil {
return "", 0, err
}
linkSocketPath := filepath.Dir(dataDir.GetPath())
linkSocket, err := define.NewMachineFile(filepath.Join(linkSocketPath, "podman.sock"), nil)
if err != nil {
return "", 0, err
}
cmd := gvproxy.NewGvproxyCommand()
// GvProxy PID file path is now derived
cmd.PidFile = filepath.Join(runDir.GetPath(), "gvproxy.pid")
// TODO This can be re-enabled when gvisor-tap-vsock #305 is merged
// debug is set, we dump to a logfile as well
// if logrus.IsLevelEnabled(logrus.DebugLevel) {
// cmd.LogFile = filepath.Join(runDir.GetPath(), "gvproxy.log")
// }
cmd.SSHPort = mc.SSH.Port
cmd.AddForwardSock(hostSocket.GetPath())
cmd.AddForwardDest(guestSock)
cmd.AddForwardUser(forwardUser)
cmd.AddForwardIdentity(mc.SSH.IdentityPath)
if logrus.IsLevelEnabled(logrus.DebugLevel) {
cmd.Debug = true
logrus.Debug(cmd)
}
// This allows a provider to perform additional setup as well as
// add in any provider specific options for gvproxy
if err := provider.StartNetworking(mc, &cmd); err != nil {
return "", 0, err
}
if mc.HostUser.UID != -1 {
forwardSock, forwardingState = setupAPIForwarding(hostSocket, linkSocket)
}
c := cmd.Cmd(binary)
logrus.Debugf("gvproxy command-line: %s %s", binary, strings.Join(cmd.ToCmdline(), " "))
if err := c.Start(); err != nil {
return forwardSock, 0, fmt.Errorf("unable to execute: %q: %w", cmd.ToCmdline(), err)
}
return forwardSock, forwardingState, nil
}
type apiOptions struct { //nolint:unused
socketpath, destinationSocketPath *define.VMFile
fowardUser string
}
func setupAPIForwarding(hostSocket, linkSocket *define.VMFile) (string, machine.APIForwardingState) {
// The linking pattern is /var/run/docker.sock -> user global sock (link) -> machine sock (socket)
// This allows the helper to only have to maintain one constant target to the user, which can be
// repositioned without updating docker.sock.
if !dockerClaimSupported() {
return hostSocket.GetPath(), machine.ClaimUnsupported
}
if !dockerClaimHelperInstalled() {
return hostSocket.GetPath(), machine.NotInstalled
}
if !alreadyLinked(hostSocket.GetPath(), linkSocket.GetPath()) {
if checkSockInUse(linkSocket.GetPath()) {
return hostSocket.GetPath(), machine.MachineLocal
}
_ = linkSocket.Delete()
if err := os.Symlink(hostSocket.GetPath(), linkSocket.GetPath()); err != nil {
logrus.Warnf("could not create user global API forwarding link: %s", err.Error())
return hostSocket.GetPath(), machine.MachineLocal
}
}
if !alreadyLinked(linkSocket.GetPath(), dockerSock) {
if checkSockInUse(dockerSock) {
return hostSocket.GetPath(), machine.MachineLocal
}
if !claimDockerSock() {
logrus.Warn("podman helper is installed, but was not able to claim the global docker sock")
return hostSocket.GetPath(), machine.MachineLocal
}
}
return dockerSock, machine.DockerGlobal
}
func alreadyLinked(target string, link string) bool {
read, err := os.Readlink(link)
return err == nil && read == target
}
func checkSockInUse(sock string) bool {
if info, err := os.Stat(sock); err == nil && info.Mode()&fs.ModeSocket == fs.ModeSocket {
_, err = net.DialTimeout("unix", dockerSock, dockerConnectTimeout)
return err == nil
}
return false
}
// conductVMReadinessCheck checks to make sure the machine is in the proper state
// and that SSH is up and running
func conductVMReadinessCheck(mc *vmconfigs.MachineConfig, maxBackoffs int, backoff time.Duration, stateF func() (define.Status, error)) (connected bool, sshError error, err error) {
for i := 0; i < maxBackoffs; i++ {
if i > 0 {
time.Sleep(backoff)
backoff *= 2
}
state, err := stateF()
if err != nil {
return false, nil, err
}
if state == define.Running && isListening(mc.SSH.Port) {
// Also make sure that SSH is up and running. The
// ready service's dependencies don't fully make sure
// that clients can SSH into the machine immediately
// after boot.
//
// CoreOS users have reported the same observation but
// the underlying source of the issue remains unknown.
if sshError = machine.CommonSSH(mc.SSH.RemoteUsername, mc.SSH.IdentityPath, mc.Name, mc.SSH.Port, []string{"true"}); sshError != nil {
logrus.Debugf("SSH readiness check for machine failed: %v", sshError)
continue
}
connected = true
break
}
}
return
}
func isListening(port int) bool {
// Check if we can dial it
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", "127.0.0.1", port), 10*time.Millisecond)
if err != nil {
return false
}
if err := conn.Close(); err != nil {
logrus.Error(err)
}
return true
}

View File

@ -0,0 +1,30 @@
package shim
import (
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
)
func CmdLineVolumesToMounts(volumes []string, volumeType vmconfigs.VolumeMountType) []*vmconfigs.Mount {
mounts := []*vmconfigs.Mount{}
for i, volume := range volumes {
var mount vmconfigs.Mount
tag, source, target, readOnly, _ := vmconfigs.SplitVolume(i, volume)
switch volumeType {
case vmconfigs.VirtIOFS:
virtioMount := machine.NewVirtIoFsMount(source, target, readOnly)
mount = virtioMount.ToMount()
default:
mount = vmconfigs.Mount{
Type: volumeType.String(),
Tag: tag,
Source: source,
Target: target,
ReadOnly: readOnly,
OriginalInput: volume,
}
}
mounts = append(mounts, &mount)
}
return mounts
}

View File

@ -9,6 +9,7 @@ import (
"time"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/sirupsen/logrus"
)
// SetSocket creates a new machine file for the socket and assigns it to
@ -33,10 +34,12 @@ func ReadySocketPath(runtimeDir, machineName string) string {
func ListenAndWaitOnSocket(errChan chan<- error, listener net.Listener) {
conn, err := listener.Accept()
if err != nil {
logrus.Debug("failed to connect to ready socket")
errChan <- err
return
}
_, err = bufio.NewReader(conn).ReadString('\n')
logrus.Debug("ready ack received")
if closeErr := conn.Close(); closeErr != nil {
errChan <- closeErr

View File

@ -11,6 +11,7 @@ import (
// CommonSSH is a common function for ssh'ing to a podman machine using system-connections
// and a port
// TODO This should probably be taught about an machineconfig to reduce input
func CommonSSH(username, identityPath, name string, sshPort int, inputArgs []string) error {
sshDestination := username + "@localhost"
port := strconv.Itoa(sshPort)

View File

@ -0,0 +1,31 @@
package stdpull
import (
"os"
"github.com/containers/podman/v4/pkg/machine/compression"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/sirupsen/logrus"
)
type StdDiskPull struct {
finalPath *define.VMFile
inputPath *define.VMFile
}
func NewStdDiskPull(inputPath string, finalpath *define.VMFile) (*StdDiskPull, error) {
ip, err := define.NewMachineFile(inputPath, nil)
if err != nil {
return nil, err
}
return &StdDiskPull{inputPath: ip, finalPath: finalpath}, nil
}
func (s *StdDiskPull) Get() error {
if _, err := os.Stat(s.inputPath.GetPath()); err != nil {
// could not find disk
return err
}
logrus.Debugf("decompressing (if needed) %s to %s", s.inputPath.GetPath(), s.finalPath.GetPath())
return compression.Decompress(s.inputPath, s.finalPath.GetPath())
}

111
pkg/machine/stdpull/url.go Normal file
View File

@ -0,0 +1,111 @@
package stdpull
import (
"errors"
"fmt"
"io"
"io/fs"
"net/http"
url2 "net/url"
"os"
"path"
"path/filepath"
"github.com/containers/podman/v4/pkg/machine/compression"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/utils"
"github.com/sirupsen/logrus"
)
type DiskFromURL struct {
u *url2.URL
finalPath *define.VMFile
tempLocation *define.VMFile
}
func NewDiskFromURL(inputPath string, finalPath *define.VMFile, tempDir *define.VMFile) (*DiskFromURL, error) {
var (
err error
)
u, err := url2.Parse(inputPath)
if err != nil {
return nil, err
}
// Make sure the temporary location exists before we get too deep
if _, err := os.Stat(tempDir.GetPath()); err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, fmt.Errorf("temporary download directory %s does not exist", tempDir.GetPath())
}
}
remoteImageName := path.Base(inputPath)
if remoteImageName == "" {
return nil, fmt.Errorf("invalid url: unable to determine image name in %q", inputPath)
}
tempLocation, err := tempDir.AppendToNewVMFile(remoteImageName, nil)
if err != nil {
return nil, err
}
return &DiskFromURL{
u: u,
finalPath: finalPath,
tempLocation: tempLocation,
}, nil
}
func (d *DiskFromURL) Get() error {
// this fetches the image and writes it to the temporary location
if err := d.pull(); err != nil {
return err
}
logrus.Debugf("decompressing (if needed) %s to %s", d.tempLocation.GetPath(), d.finalPath.GetPath())
return compression.Decompress(d.tempLocation, d.finalPath.GetPath())
}
func (d *DiskFromURL) pull() error {
out, err := os.Create(d.tempLocation.GetPath())
if err != nil {
return err
}
defer func() {
if err := out.Close(); err != nil {
logrus.Error(err)
}
}()
resp, err := http.Get(d.u.String())
if err != nil {
return err
}
defer func() {
if err := resp.Body.Close(); err != nil {
logrus.Error(err)
}
}()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("downloading VM image %s: %s", d.u.String(), resp.Status)
}
size := resp.ContentLength
prefix := "Downloading VM image: " + filepath.Base(d.tempLocation.GetPath())
onComplete := prefix + ": done"
p, bar := utils.ProgressBar(prefix, size, onComplete)
proxyReader := bar.ProxyReader(resp.Body)
defer func() {
if err := proxyReader.Close(); err != nil {
logrus.Error(err)
}
}()
if _, err := io.Copy(out, proxyReader); err != nil {
return err
}
p.Wait()
return nil
}

View File

@ -6,20 +6,21 @@ import (
"fmt"
"github.com/containers/podman/v4/pkg/machine/ignition"
"github.com/containers/podman/v4/pkg/machine/vmconfigs"
"github.com/sirupsen/logrus"
)
func UpdatePodmanDockerSockService(vm VM, name string, uid int, rootful bool) error {
content := ignition.GetPodmanDockerTmpConfig(uid, rootful, false)
func UpdatePodmanDockerSockService(mc *vmconfigs.MachineConfig) error {
content := ignition.GetPodmanDockerTmpConfig(mc.HostUser.UID, mc.HostUser.Rootful, false)
command := fmt.Sprintf("'echo %q > %s'", content, ignition.PodmanDockerTmpConfPath)
args := []string{"sudo", "bash", "-c", command}
if err := vm.SSH(name, SSHOptions{Args: args}); err != nil {
if err := CommonSSH(mc.SSH.RemoteUsername, mc.SSH.IdentityPath, mc.Name, mc.SSH.Port, args); err != nil {
logrus.Warnf("Could not not update internal docker sock config")
return err
}
args = []string{"sudo", "systemd-tmpfiles", "--create", "--prefix=/run/docker.sock"}
if err := vm.SSH(name, SSHOptions{Args: args}); err != nil {
if err := CommonSSH(mc.SSH.RemoteUsername, mc.SSH.IdentityPath, mc.Name, mc.SSH.Port, args); err != nil {
logrus.Warnf("Could not create internal docker sock")
return err
}

View File

@ -5,33 +5,34 @@ import (
"net/url"
"time"
"github.com/containers/common/pkg/strongunits"
gvproxy "github.com/containers/gvisor-tap-vsock/pkg/types"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/ignition"
"github.com/containers/podman/v4/pkg/machine/qemu/command"
"github.com/containers/storage/pkg/lockfile"
)
type aThing struct{}
type MachineConfig struct {
// Common stuff
Created time.Time
GvProxy gvproxy.GvproxyCommand
HostUser HostUser
IgnitionFile *aThing // possible interface
LastUp time.Time
LogPath *define.VMFile `json:",omitempty"` // Revisit this for all providers
Mounts []Mount
Name string
ReadySocket *aThing // possible interface
Resources ResourceConfig
SSH SSHConfig
Starting *bool
Version uint
Created time.Time
GvProxy gvproxy.GvproxyCommand
HostUser HostUser
LastUp time.Time
Mounts []*Mount
Name string
Resources ResourceConfig
SSH SSHConfig
Version uint
// Image stuff
imageDescription machineImage //nolint:unused
ImagePath *define.VMFile // Temporary only until a proper image struct is worked out
// Provider stuff
AppleHypervisor *AppleHVConfig `json:",omitempty"`
QEMUHypervisor *QEMUConfig `json:",omitempty"`
@ -39,11 +40,22 @@ type MachineConfig struct {
WSLHypervisor *WSLConfig `json:",omitempty"`
lock *lockfile.LockFile //nolint:unused
// configPath can be used for reading, writing, removing
configPath *define.VMFile
// used for deriving file, socket, etc locations
dirs *define.MachineDirs
// State
// Starting is defined as "on" but not fully booted
Starting bool
}
// MachineImage describes a podman machine image
type MachineImage struct {
OCI *ociMachineImage
OCI *OCIMachineImage
FCOS *fcosMachineImage
}
@ -63,7 +75,7 @@ type machineImage interface { //nolint:unused
path() string
}
type ociMachineImage struct {
type OCIMachineImage struct {
// registry
// TODO JSON serial/deserial will write string to disk
// but in code it is a types.ImageReference
@ -72,11 +84,11 @@ type ociMachineImage struct {
FQImageReference string
}
func (o ociMachineImage) path() string {
func (o OCIMachineImage) path() string {
return ""
}
func (o ociMachineImage) download() error {
func (o OCIMachineImage) download() error {
return nil
}
@ -94,6 +106,24 @@ func (f fcosMachineImage) path() string {
return ""
}
type VMProvider interface { //nolint:interfacebloat
CreateVM(opts define.CreateVMOpts, mc *MachineConfig, builder *ignition.IgnitionBuilder) error
PrepareIgnition(mc *MachineConfig, ignBuilder *ignition.IgnitionBuilder) (*ignition.ReadyUnitOpts, error)
GetHyperVisorVMs() ([]string, error)
MountType() VolumeMountType
MountVolumesToVM(mc *MachineConfig, quiet bool) error
Remove(mc *MachineConfig) ([]string, func() error, error)
RemoveAndCleanMachines(dirs *define.MachineDirs) error
SetProviderAttrs(mc *MachineConfig, cpus, memory *uint64, newDiskSize *strongunits.GiB, newRootful *bool) error
StartNetworking(mc *MachineConfig, cmd *gvproxy.GvproxyCommand) error
PostStartNetworking(mc *MachineConfig) error
StartVM(mc *MachineConfig) (func() error, func() error, error)
State(mc *MachineConfig, bypass bool) (define.Status, error)
StopVM(mc *MachineConfig, hardStop bool) error
StopHostNetworking(mc *MachineConfig, vmType define.VMType) error
VMType() define.VMType
}
// HostUser describes the host user
type HostUser struct {
// Whether this machine should run in a rootful or rootless manner
@ -105,11 +135,13 @@ type HostUser struct {
}
type Mount struct {
ReadOnly bool
Source string
Tag string
Target string
Type string
OriginalInput string
ReadOnly bool
Source string
Tag string
Target string
Type string
VSockNumber *uint64
}
// ResourceConfig describes physical attributes of the machine

View File

@ -1,6 +1,8 @@
package vmconfigs
import (
"os"
"github.com/containers/podman/v4/pkg/machine/applehv/vfkit"
)
@ -13,3 +15,7 @@ type AppleHVConfig struct {
type HyperVConfig struct{}
type WSLConfig struct{}
type QEMUConfig struct{}
func getHostUID() int {
return os.Getuid()
}

View File

@ -1,7 +1,13 @@
package vmconfigs
import "os"
// Stubs
type HyperVConfig struct{}
type WSLConfig struct {}
type QEMUConfig struct {}
type AppleHVConfig struct {}
type WSLConfig struct{}
type QEMUConfig struct{}
type AppleHVConfig struct{}
func getHostUID() int {
return os.Getuid()
}

View File

@ -1,14 +1,24 @@
package vmconfigs
import (
"os"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/qemu/command"
)
type QEMUConfig struct {
cmd command.QemuCmd //nolint:unused
// QMPMonitor is the qemu monitor object for sending commands
QMPMonitor command.Monitor
// QEMUPidPath is where to write the PID for QEMU when running
QEMUPidPath *define.VMFile
}
// Stubs
type AppleHVConfig struct{}
type HyperVConfig struct{}
type WSLConfig struct{}
func getHostUID() int {
return os.Getuid()
}

View File

@ -5,17 +5,21 @@ import (
)
type HyperVConfig struct {
// ReadyVSock is the pipeline for the guest to alert the host
// it is running
ReadyVsock vsock.HVSockRegistryEntry
// NetworkVSock is for the user networking
NetworkHVSock vsock.HVSockRegistryEntry
// MountVsocks contains the currently-active vsocks, mapped to the
// directory they should be mounted on.
MountVsocks map[string]uint64
NetworkVSock vsock.HVSockRegistryEntry
}
type WSLConfig struct {
wslstuff *aThing
//wslstuff *aThing
}
// Stubs
type QEMUConfig struct{}
type AppleHVConfig struct{}
func getHostUID() int {
return 1000
}

View File

@ -0,0 +1,347 @@
package vmconfigs
import (
"encoding/json"
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"strings"
"time"
define2 "github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/machine/connection"
"github.com/containers/podman/v4/pkg/machine/define"
"github.com/containers/podman/v4/pkg/machine/lock"
"github.com/containers/podman/v4/utils"
"github.com/sirupsen/logrus"
)
/*
info Display machine host info common
init Initialize a virtual machine specific
inspect Inspect an existing machine specific
list List machines specific
os Manage a Podman virtual machine's OS common
rm Remove an existing machine specific
set Set a virtual machine setting specific
ssh SSH into an existing machine common
start Start an existing machine specific
stop Stop an existing machine specific
*/
var (
SSHRemoteConnection RemoteConnectionType = "ssh"
DefaultIgnitionUserName = "core"
ForwarderBinaryName = "gvproxy"
)
type RemoteConnectionType string
// NewMachineConfig creates the initial machine configuration file from cli options
func NewMachineConfig(opts define.InitOptions, dirs *define.MachineDirs, sshIdentityPath string) (*MachineConfig, error) {
mc := new(MachineConfig)
mc.Name = opts.Name
mc.dirs = dirs
machineLock, err := lock.GetMachineLock(opts.Name, dirs.ConfigDir.GetPath())
if err != nil {
return nil, err
}
mc.lock = machineLock
// Assign Dirs
cf, err := define.NewMachineFile(filepath.Join(dirs.ConfigDir.GetPath(), fmt.Sprintf("%s.json", opts.Name)), nil)
if err != nil {
return nil, err
}
mc.configPath = cf
// System Resources
mrc := ResourceConfig{
CPUs: opts.CPUS,
DiskSize: opts.DiskSize,
Memory: opts.Memory,
USBs: nil, // Needs to be filled in by providers?
}
mc.Resources = mrc
sshPort, err := utils.GetRandomPort()
if err != nil {
return nil, err
}
sshConfig := SSHConfig{
IdentityPath: sshIdentityPath,
Port: sshPort,
RemoteUsername: opts.Username,
}
mc.SSH = sshConfig
mc.Created = time.Now()
mc.HostUser = HostUser{UID: getHostUID(), Rootful: opts.Rootful}
return mc, nil
}
// Lock creates a lock on the machine for single access
func (mc *MachineConfig) Lock() {
mc.lock.Lock()
}
// Unlock removes an existing lock
func (mc *MachineConfig) Unlock() {
mc.lock.Unlock()
}
// Write is a locking way to the machine configuration file
func (mc *MachineConfig) Write() error {
mc.Lock()
defer mc.Unlock()
return mc.write()
}
// Refresh reloads the config file from disk
func (mc *MachineConfig) Refresh() error {
content, err := os.ReadFile(mc.configPath.GetPath())
if err != nil {
return err
}
return json.Unmarshal(content, mc)
}
// write is a non-locking way to write the machine configuration file to disk
func (mc *MachineConfig) write() error {
if mc.configPath == nil {
return fmt.Errorf("no configuration file associated with vm %q", mc.Name)
}
b, err := json.Marshal(mc)
if err != nil {
return err
}
logrus.Debugf("writing configuration file %q", mc.configPath.Path)
return os.WriteFile(mc.configPath.GetPath(), b, define.DefaultFilePerm)
}
func (mc *MachineConfig) SetRootful(rootful bool) error {
if err := connection.UpdateConnectionIfDefault(rootful, mc.Name, mc.Name+"-root"); err != nil {
return err
}
mc.HostUser.Rootful = rootful
mc.HostUser.Modified = true
return nil
}
func (mc *MachineConfig) removeSystemConnection() error { //nolint:unused
return define2.ErrNotImplemented
}
// updateLastBoot writes the current time to the machine configuration file. it is
// an non-locking method and assumes it is being called locked
func (mc *MachineConfig) updateLastBoot() error { //nolint:unused
mc.LastUp = time.Now()
return mc.Write()
}
func (mc *MachineConfig) Remove(saveIgnition, saveImage bool) ([]string, func() error, error) {
ignitionFile, err := mc.IgnitionFile()
if err != nil {
return nil, nil, err
}
readySocket, err := mc.ReadySocket()
if err != nil {
return nil, nil, err
}
logPath, err := mc.LogFile()
if err != nil {
return nil, nil, err
}
rmFiles := []string{
mc.configPath.GetPath(),
readySocket.GetPath(),
logPath.GetPath(),
}
if !saveImage {
mc.ImagePath.GetPath()
}
if !saveIgnition {
ignitionFile.GetPath()
}
mcRemove := func() error {
if !saveIgnition {
if err := ignitionFile.Delete(); err != nil {
logrus.Error(err)
}
}
if !saveImage {
if err := mc.ImagePath.Delete(); err != nil {
logrus.Error(err)
}
}
if err := mc.configPath.Delete(); err != nil {
logrus.Error(err)
}
if err := readySocket.Delete(); err != nil {
logrus.Error()
}
if err := logPath.Delete(); err != nil {
logrus.Error(err)
}
// TODO This should be bumped up into delete and called out in the text given then
// are not technically files per'se
return connection.RemoveConnections(mc.Name, mc.Name+"-root")
}
return rmFiles, mcRemove, nil
}
// ConfigDir is a simple helper to obtain the machine config dir
func (mc *MachineConfig) ConfigDir() (*define.VMFile, error) {
if mc.dirs == nil || mc.dirs.ConfigDir == nil {
return nil, errors.New("no configuration directory set")
}
return mc.dirs.ConfigDir, nil
}
// DataDir is a simple helper function to obtain the machine data dir
func (mc *MachineConfig) DataDir() (*define.VMFile, error) {
if mc.dirs == nil || mc.dirs.DataDir == nil {
return nil, errors.New("no data directory set")
}
return mc.dirs.DataDir, nil
}
// RuntimeDir is simple helper function to obtain the runtime dir
func (mc *MachineConfig) RuntimeDir() (*define.VMFile, error) {
if mc.dirs == nil || mc.dirs.RuntimeDir == nil {
return nil, errors.New("no runtime directory set")
}
return mc.dirs.RuntimeDir, nil
}
func (mc *MachineConfig) SetDirs(dirs *define.MachineDirs) {
mc.dirs = dirs
}
func (mc *MachineConfig) IgnitionFile() (*define.VMFile, error) {
configDir, err := mc.ConfigDir()
if err != nil {
return nil, err
}
return configDir.AppendToNewVMFile(mc.Name+".ign", nil)
}
func (mc *MachineConfig) ReadySocket() (*define.VMFile, error) {
rtDir, err := mc.RuntimeDir()
if err != nil {
return nil, err
}
return readySocket(mc.Name, rtDir)
}
func (mc *MachineConfig) GVProxySocket() (*define.VMFile, error) {
machineRuntimeDir, err := mc.RuntimeDir()
if err != nil {
return nil, err
}
return gvProxySocket(mc.Name, machineRuntimeDir)
}
func (mc *MachineConfig) LogFile() (*define.VMFile, error) {
rtDir, err := mc.RuntimeDir()
if err != nil {
return nil, err
}
return rtDir.AppendToNewVMFile(mc.Name+".log", nil)
}
func (mc *MachineConfig) Kind() (define.VMType, error) {
// Not super in love with this approach
if mc.QEMUHypervisor != nil {
return define.QemuVirt, nil
}
if mc.AppleHypervisor != nil {
return define.AppleHvVirt, nil
}
if mc.HyperVHypervisor != nil {
return define.HyperVVirt, nil
}
if mc.WSLHypervisor != nil {
return define.WSLVirt, nil
}
return define.UnknownVirt, nil
}
func (mc *MachineConfig) IsFirstBoot() (bool, error) {
never, err := time.Parse(time.RFC3339, "0001-01-01T00:00:00Z")
if err != nil {
return false, err
}
return mc.LastUp == never, nil
}
// LoadMachineByName returns a machine config based on the vm name and provider
func LoadMachineByName(name string, dirs *define.MachineDirs) (*MachineConfig, error) {
fullPath, err := dirs.ConfigDir.AppendToNewVMFile(name+".json", nil)
if err != nil {
return nil, err
}
mc, err := loadMachineFromFQPath(fullPath)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, &define.ErrVMDoesNotExist{Name: name}
}
return nil, err
}
mc.dirs = dirs
mc.configPath = fullPath
return mc, nil
}
// loadMachineFromFQPath stub function for loading a JSON configuration file and returning
// a machineconfig. this should only be called if you know what you are doing.
func loadMachineFromFQPath(path *define.VMFile) (*MachineConfig, error) {
mc := new(MachineConfig)
b, err := path.Read()
if err != nil {
return nil, err
}
if err = json.Unmarshal(b, mc); err != nil {
return nil, fmt.Errorf("unable to load machine config file: %q", err)
}
lock, err := lock.GetMachineLock(mc.Name, filepath.Dir(path.GetPath()))
mc.lock = lock
return mc, err
}
// LoadMachinesInDir returns all the machineconfigs located in given dir
func LoadMachinesInDir(dirs *define.MachineDirs) (map[string]*MachineConfig, error) {
mcs := make(map[string]*MachineConfig)
if err := filepath.WalkDir(dirs.ConfigDir.GetPath(), func(path string, d fs.DirEntry, err error) error {
if strings.HasSuffix(d.Name(), ".json") {
fullPath, err := dirs.ConfigDir.AppendToNewVMFile(d.Name(), nil)
if err != nil {
return err
}
mc, err := loadMachineFromFQPath(fullPath)
if err != nil {
return err
}
mc.configPath = fullPath
mc.dirs = dirs
mcs[mc.Name] = mc
}
return nil
}); err != nil {
return nil, err
}
return mcs, nil
}

View File

@ -0,0 +1,17 @@
//go:build !darwin
package vmconfigs
import (
"fmt"
"github.com/containers/podman/v4/pkg/machine/define"
)
func gvProxySocket(name string, machineRuntimeDir *define.VMFile) (*define.VMFile, error) {
return machineRuntimeDir.AppendToNewVMFile(fmt.Sprintf("%s-gvproxy.sock", name), nil)
}
func readySocket(name string, machineRuntimeDir *define.VMFile) (*define.VMFile, error) {
return machineRuntimeDir.AppendToNewVMFile(name+".sock", nil)
}

View File

@ -0,0 +1,17 @@
package vmconfigs
import (
"fmt"
"github.com/containers/podman/v4/pkg/machine/define"
)
func gvProxySocket(name string, machineRuntimeDir *define.VMFile) (*define.VMFile, error) {
socketName := fmt.Sprintf("%s-gvproxy.sock", name)
return machineRuntimeDir.AppendToNewVMFile(socketName, &socketName)
}
func readySocket(name string, machineRuntimeDir *define.VMFile) (*define.VMFile, error) {
socketName := name + ".sock"
return machineRuntimeDir.AppendToNewVMFile(socketName, &socketName)
}

View File

@ -0,0 +1,60 @@
package vmconfigs
import (
"fmt"
"strings"
)
type VolumeMountType int
const (
NineP VolumeMountType = iota
VirtIOFS
Unknown
)
func (v VolumeMountType) String() string {
switch v {
case NineP:
return "9p"
case VirtIOFS:
return "virtiofs"
default:
return "unknown"
}
}
func extractSourcePath(paths []string) string {
return paths[0]
}
func extractMountOptions(paths []string) (bool, string) {
readonly := false
securityModel := "none"
if len(paths) > 2 {
options := paths[2]
volopts := strings.Split(options, ",")
for _, o := range volopts {
switch {
case o == "rw":
readonly = false
case o == "ro":
readonly = true
case strings.HasPrefix(o, "security_model="):
securityModel = strings.Split(o, "=")[1]
default:
fmt.Printf("Unknown option: %s\n", o)
}
}
}
return readonly, securityModel
}
func SplitVolume(idx int, volume string) (string, string, string, bool, string) {
tag := fmt.Sprintf("vol%d", idx)
paths := pathsFromVolume(volume)
source := extractSourcePath(paths)
target := extractTargetPath(paths)
readonly, securityModel := extractMountOptions(paths)
return tag, source, target, readonly, securityModel
}

View File

@ -0,0 +1,16 @@
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd
package vmconfigs
import "strings"
func pathsFromVolume(volume string) []string {
return strings.SplitN(volume, ":", 3)
}
func extractTargetPath(paths []string) string {
if len(paths) > 1 {
return paths[1]
}
return paths[0]
}

Some files were not shown because too many files have changed in this diff Show More