mirror of
https://github.com/containers/podman.git
synced 2025-06-25 20:26:51 +08:00
Merge pull request #20050 from umohnani8/farm-build-2
Add podman farm build command
This commit is contained in:
19
.cirrus.yml
19
.cirrus.yml
@ -910,6 +910,24 @@ minikube_test_task:
|
|||||||
main_script: *main
|
main_script: *main
|
||||||
always: *logs_artifacts
|
always: *logs_artifacts
|
||||||
|
|
||||||
|
farm_test_task:
|
||||||
|
name: *std_name_fmt
|
||||||
|
alias: farm_test
|
||||||
|
# Docs: ./contrib/cirrus/CIModes.md
|
||||||
|
only_if: *not_tag_build_docs
|
||||||
|
depends_on:
|
||||||
|
- build
|
||||||
|
- rootless_system_test
|
||||||
|
gce_instance: *standardvm
|
||||||
|
env:
|
||||||
|
<<: *stdenvars
|
||||||
|
TEST_FLAVOR: farm
|
||||||
|
PRIV_NAME: rootless
|
||||||
|
clone_script: *get_gosrc
|
||||||
|
setup_script: *setup
|
||||||
|
main_script: *main
|
||||||
|
always: *logs_artifacts
|
||||||
|
|
||||||
buildah_bud_test_task:
|
buildah_bud_test_task:
|
||||||
name: *std_name_fmt
|
name: *std_name_fmt
|
||||||
alias: buildah_bud_test
|
alias: buildah_bud_test
|
||||||
@ -1054,6 +1072,7 @@ success_task:
|
|||||||
- rootless_system_test
|
- rootless_system_test
|
||||||
- rootless_remote_system_test
|
- rootless_remote_system_test
|
||||||
- minikube_test
|
- minikube_test
|
||||||
|
- farm_test
|
||||||
- buildah_bud_test
|
- buildah_bud_test
|
||||||
- rootless_buildah_bud_test
|
- rootless_buildah_bud_test
|
||||||
- upgrade_test
|
- upgrade_test
|
||||||
|
@ -46,7 +46,13 @@ type BuildFlagsWrapper struct {
|
|||||||
Cleanup bool
|
Cleanup bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func DefineBuildFlags(cmd *cobra.Command, buildOpts *BuildFlagsWrapper) {
|
// FarmBuildHiddenFlags are the flags hidden from the farm build command because they are either not
|
||||||
|
// supported or don't make sense in the farm build use case
|
||||||
|
var FarmBuildHiddenFlags = []string{"arch", "all-platforms", "compress", "cw", "disable-content-trust",
|
||||||
|
"logsplit", "manifest", "os", "output", "platform", "sign-by", "signature-policy", "stdin", "tls-verify",
|
||||||
|
"variant"}
|
||||||
|
|
||||||
|
func DefineBuildFlags(cmd *cobra.Command, buildOpts *BuildFlagsWrapper, isFarmBuild bool) {
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|
||||||
// buildx build --load ignored, but added for compliance
|
// buildx build --load ignored, but added for compliance
|
||||||
@ -116,6 +122,11 @@ func DefineBuildFlags(cmd *cobra.Command, buildOpts *BuildFlagsWrapper) {
|
|||||||
_ = flags.MarkHidden("logsplit")
|
_ = flags.MarkHidden("logsplit")
|
||||||
_ = flags.MarkHidden("cw")
|
_ = flags.MarkHidden("cw")
|
||||||
}
|
}
|
||||||
|
if isFarmBuild {
|
||||||
|
for _, f := range FarmBuildHiddenFlags {
|
||||||
|
_ = flags.MarkHidden(f)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseBuildOpts(cmd *cobra.Command, args []string, buildOpts *BuildFlagsWrapper) (*entities.BuildOptions, error) {
|
func ParseBuildOpts(cmd *cobra.Command, args []string, buildOpts *BuildFlagsWrapper) (*entities.BuildOptions, error) {
|
||||||
|
135
cmd/podman/farm/build.go
Normal file
135
cmd/podman/farm/build.go
Normal file
@ -0,0 +1,135 @@
|
|||||||
|
package farm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/containers/common/pkg/config"
|
||||||
|
"github.com/containers/podman/v4/cmd/podman/common"
|
||||||
|
"github.com/containers/podman/v4/cmd/podman/registry"
|
||||||
|
"github.com/containers/podman/v4/cmd/podman/utils"
|
||||||
|
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||||
|
"github.com/containers/podman/v4/pkg/farm"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type buildOptions struct {
|
||||||
|
buildOptions common.BuildFlagsWrapper
|
||||||
|
local bool
|
||||||
|
platforms []string
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
farmBuildDescription = `Build images on farm nodes, then bundle them into a manifest list`
|
||||||
|
buildCommand = &cobra.Command{
|
||||||
|
Use: "build [options] [CONTEXT]",
|
||||||
|
Short: "Build a container image for multiple architectures",
|
||||||
|
Long: farmBuildDescription,
|
||||||
|
RunE: build,
|
||||||
|
Example: "podman farm build [flags] buildContextDirectory",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
}
|
||||||
|
buildOpts = buildOptions{
|
||||||
|
buildOptions: common.BuildFlagsWrapper{},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
registry.Commands = append(registry.Commands, registry.CliCommand{
|
||||||
|
Command: buildCommand,
|
||||||
|
Parent: farmCmd,
|
||||||
|
})
|
||||||
|
flags := buildCommand.Flags()
|
||||||
|
flags.SetNormalizeFunc(utils.AliasFlags)
|
||||||
|
|
||||||
|
localFlagName := "local"
|
||||||
|
// Default for local is true and hide this flag for the remote use case
|
||||||
|
if !registry.IsRemote() {
|
||||||
|
flags.BoolVarP(&buildOpts.local, localFlagName, "l", true, "Build image on local machine as well as on farm nodes")
|
||||||
|
}
|
||||||
|
cleanupFlag := "cleanup"
|
||||||
|
flags.BoolVar(&buildOpts.buildOptions.Cleanup, cleanupFlag, false, "Remove built images from farm nodes on success")
|
||||||
|
platformsFlag := "platforms"
|
||||||
|
buildCommand.PersistentFlags().StringSliceVar(&buildOpts.platforms, platformsFlag, nil, "Build only on farm nodes that match the given platforms")
|
||||||
|
|
||||||
|
common.DefineBuildFlags(buildCommand, &buildOpts.buildOptions, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func build(cmd *cobra.Command, args []string) error {
|
||||||
|
// Return error if any of the hidden flags are used
|
||||||
|
for _, f := range common.FarmBuildHiddenFlags {
|
||||||
|
if cmd.Flags().Changed(f) {
|
||||||
|
return fmt.Errorf("%q is an unsupported flag for podman farm build", f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !cmd.Flags().Changed("tag") {
|
||||||
|
return errors.New("cannot create manifest list without a name, value for --tag is required")
|
||||||
|
}
|
||||||
|
opts, err := common.ParseBuildOpts(cmd, args, &buildOpts.buildOptions)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Close the logFile if one was created based on the flag
|
||||||
|
if opts.LogFileToClose != nil {
|
||||||
|
defer opts.LogFileToClose.Close()
|
||||||
|
}
|
||||||
|
if opts.TmpDirToClose != "" {
|
||||||
|
// We had to download the context directory.
|
||||||
|
// Delete it later.
|
||||||
|
defer func() {
|
||||||
|
if err = os.RemoveAll(opts.TmpDirToClose); err != nil {
|
||||||
|
logrus.Errorf("Removing temporary directory %q: %v", opts.TmpDirToClose, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
opts.Cleanup = buildOpts.buildOptions.Cleanup
|
||||||
|
iidFile, err := cmd.Flags().GetString("iidfile")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
opts.IIDFile = iidFile
|
||||||
|
|
||||||
|
cfg, err := config.ReadCustomConfig()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defaultFarm := cfg.Farms.Default
|
||||||
|
if farmCmd.Flags().Changed("farm") {
|
||||||
|
f, err := farmCmd.Flags().GetString("farm")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defaultFarm = f
|
||||||
|
}
|
||||||
|
|
||||||
|
var localEngine entities.ImageEngine
|
||||||
|
if buildOpts.local {
|
||||||
|
localEngine = registry.ImageEngine()
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := registry.Context()
|
||||||
|
farm, err := farm.NewFarm(ctx, defaultFarm, localEngine)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("initializing: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
schedule, err := farm.Schedule(ctx, buildOpts.platforms)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("scheduling builds: %w", err)
|
||||||
|
}
|
||||||
|
logrus.Infof("schedule: %v", schedule)
|
||||||
|
|
||||||
|
manifestName := opts.Output
|
||||||
|
// Set Output to "" so that the images built on the farm nodes have no name
|
||||||
|
opts.Output = ""
|
||||||
|
if err = farm.Build(ctx, schedule, *opts, manifestName); err != nil {
|
||||||
|
return fmt.Errorf("build: %w", err)
|
||||||
|
}
|
||||||
|
logrus.Infof("build: ok")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
@ -20,7 +20,6 @@ var (
|
|||||||
// Temporary struct to hold cli values.
|
// Temporary struct to hold cli values.
|
||||||
farmOpts = struct {
|
farmOpts = struct {
|
||||||
Farm string
|
Farm string
|
||||||
Local bool
|
|
||||||
}{}
|
}{}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -40,10 +39,4 @@ func init() {
|
|||||||
defaultFarm = podmanConfig.ContainersConfDefaultsRO.Farms.Default
|
defaultFarm = podmanConfig.ContainersConfDefaultsRO.Farms.Default
|
||||||
}
|
}
|
||||||
flags.StringVarP(&farmOpts.Farm, farmFlagName, "f", defaultFarm, "Farm to use for builds")
|
flags.StringVarP(&farmOpts.Farm, farmFlagName, "f", defaultFarm, "Farm to use for builds")
|
||||||
|
|
||||||
localFlagName := "local"
|
|
||||||
// Default for local is true and hide this flag for the remote use case
|
|
||||||
if !registry.IsRemote() {
|
|
||||||
flags.BoolVarP(&farmOpts.Local, localFlagName, "l", true, "Build image on local machine including on farm nodes")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -74,7 +74,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func buildFlags(cmd *cobra.Command) {
|
func buildFlags(cmd *cobra.Command) {
|
||||||
common.DefineBuildFlags(cmd, &buildOpts)
|
common.DefineBuildFlags(cmd, &buildOpts, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// build executes the build command.
|
// build executes the build command.
|
||||||
|
@ -137,7 +137,11 @@ setup_rootless() {
|
|||||||
# shellcheck disable=SC2154
|
# shellcheck disable=SC2154
|
||||||
if passwd --status $ROOTLESS_USER
|
if passwd --status $ROOTLESS_USER
|
||||||
then
|
then
|
||||||
if [[ $PRIV_NAME = "rootless" ]]; then
|
# Farm tests utilize the rootless user to simulate a "remote" podman instance.
|
||||||
|
# Root still needs to own the repo. clone and all things under `$GOPATH`. The
|
||||||
|
# opposite is true for the lower-level podman e2e tests, the rootless user
|
||||||
|
# runs them, and therefore needs permissions.
|
||||||
|
if [[ $PRIV_NAME = "rootless" ]] && [[ "$TEST_FLAVOR" != "farm" ]]; then
|
||||||
msg "Updating $ROOTLESS_USER user permissions on possibly changed libpod code"
|
msg "Updating $ROOTLESS_USER user permissions on possibly changed libpod code"
|
||||||
chown -R $ROOTLESS_USER:$ROOTLESS_USER "$GOPATH" "$GOSRC"
|
chown -R $ROOTLESS_USER:$ROOTLESS_USER "$GOPATH" "$GOSRC"
|
||||||
return 0
|
return 0
|
||||||
@ -184,6 +188,13 @@ setup_rootless() {
|
|||||||
# Maintain access-permission consistency with all other .ssh files.
|
# Maintain access-permission consistency with all other .ssh files.
|
||||||
install -Z -m 700 -o $ROOTLESS_USER -g $ROOTLESS_USER \
|
install -Z -m 700 -o $ROOTLESS_USER -g $ROOTLESS_USER \
|
||||||
/root/.ssh/known_hosts /home/$ROOTLESS_USER/.ssh/known_hosts
|
/root/.ssh/known_hosts /home/$ROOTLESS_USER/.ssh/known_hosts
|
||||||
|
|
||||||
|
if [[ -n "$ROOTLESS_USER" ]]; then
|
||||||
|
showrun echo "conditional setup for ROOTLESS_USER [=$ROOTLESS_USER]"
|
||||||
|
# Make all future CI scripts aware of these values
|
||||||
|
echo "ROOTLESS_USER=$ROOTLESS_USER" >> /etc/ci_environment
|
||||||
|
echo "ROOTLESS_UID=$ROOTLESS_UID" >> /etc/ci_environment
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
install_test_configs() {
|
install_test_configs() {
|
||||||
|
@ -126,6 +126,12 @@ function _run_minikube() {
|
|||||||
showrun bats test/minikube |& logformatter
|
showrun bats test/minikube |& logformatter
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function _run_farm() {
|
||||||
|
_bail_if_test_can_be_skipped test/farm test/system
|
||||||
|
msg "Testing podman farm."
|
||||||
|
showrun bats test/farm |& logformatter
|
||||||
|
}
|
||||||
|
|
||||||
exec_container() {
|
exec_container() {
|
||||||
local var_val
|
local var_val
|
||||||
local cmd
|
local cmd
|
||||||
|
@ -269,13 +269,6 @@ case "$PRIV_NAME" in
|
|||||||
*) die_unknown PRIV_NAME
|
*) die_unknown PRIV_NAME
|
||||||
esac
|
esac
|
||||||
|
|
||||||
# shellcheck disable=SC2154
|
|
||||||
if [[ -n "$ROOTLESS_USER" ]]; then
|
|
||||||
showrun echo "conditional setup for ROOTLESS_USER [=$ROOTLESS_USER]"
|
|
||||||
echo "ROOTLESS_USER=$ROOTLESS_USER" >> /etc/ci_environment
|
|
||||||
echo "ROOTLESS_UID=$ROOTLESS_UID" >> /etc/ci_environment
|
|
||||||
fi
|
|
||||||
|
|
||||||
# FIXME! experimental workaround for #16973, the "lookup cdn03.quay.io" flake.
|
# FIXME! experimental workaround for #16973, the "lookup cdn03.quay.io" flake.
|
||||||
#
|
#
|
||||||
# If you are reading this on or after April 2023:
|
# If you are reading this on or after April 2023:
|
||||||
@ -403,6 +396,13 @@ case "$TEST_FLAVOR" in
|
|||||||
die "Invalid value for \$TEST_ENVIRON=$TEST_ENVIRON"
|
die "Invalid value for \$TEST_ENVIRON=$TEST_ENVIRON"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
install_test_configs
|
||||||
|
;;
|
||||||
|
farm)
|
||||||
|
showrun loginctl enable-linger $ROOTLESS_USER
|
||||||
|
showrun ssh $ROOTLESS_USER@localhost systemctl --user enable --now podman.socket
|
||||||
|
remove_packaged_podman_files
|
||||||
|
showrun make install PREFIX=/usr ETCDIR=/etc
|
||||||
install_test_configs
|
install_test_configs
|
||||||
;;
|
;;
|
||||||
minikube)
|
minikube)
|
||||||
|
@ -62,6 +62,7 @@ type HostInfo struct {
|
|||||||
SwapFree int64 `json:"swapFree"`
|
SwapFree int64 `json:"swapFree"`
|
||||||
SwapTotal int64 `json:"swapTotal"`
|
SwapTotal int64 `json:"swapTotal"`
|
||||||
Uptime string `json:"uptime"`
|
Uptime string `json:"uptime"`
|
||||||
|
Variant string `json:"variant"`
|
||||||
Linkmode string `json:"linkmode"`
|
Linkmode string `json:"linkmode"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containers/buildah"
|
"github.com/containers/buildah"
|
||||||
|
"github.com/containers/buildah/pkg/parse"
|
||||||
"github.com/containers/buildah/pkg/util"
|
"github.com/containers/buildah/pkg/util"
|
||||||
"github.com/containers/common/pkg/version"
|
"github.com/containers/common/pkg/version"
|
||||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||||
@ -130,6 +131,11 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) {
|
|||||||
SwapFree: mi.SwapFree,
|
SwapFree: mi.SwapFree,
|
||||||
SwapTotal: mi.SwapTotal,
|
SwapTotal: mi.SwapTotal,
|
||||||
}
|
}
|
||||||
|
platform := parse.DefaultPlatform()
|
||||||
|
pArr := strings.Split(platform, "/")
|
||||||
|
if len(pArr) == 3 {
|
||||||
|
info.Variant = pArr[2]
|
||||||
|
}
|
||||||
if err := r.setPlatformHostInfo(&info); err != nil {
|
if err := r.setPlatformHostInfo(&info); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -19,6 +19,7 @@ import (
|
|||||||
|
|
||||||
"github.com/containers/buildah/define"
|
"github.com/containers/buildah/define"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
|
ldefine "github.com/containers/podman/v4/libpod/define"
|
||||||
"github.com/containers/podman/v4/pkg/auth"
|
"github.com/containers/podman/v4/pkg/auth"
|
||||||
"github.com/containers/podman/v4/pkg/bindings"
|
"github.com/containers/podman/v4/pkg/bindings"
|
||||||
"github.com/containers/podman/v4/pkg/domain/entities"
|
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||||
@ -500,6 +501,11 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
saveFormat := ldefine.OCIArchive
|
||||||
|
if options.OutputFormat == define.Dockerv2ImageManifest {
|
||||||
|
saveFormat = ldefine.V2s2Archive
|
||||||
|
}
|
||||||
|
|
||||||
// build secrets are usually absolute host path or relative to context dir on host
|
// build secrets are usually absolute host path or relative to context dir on host
|
||||||
// in any case move secret to current context and ship the tar.
|
// in any case move secret to current context and ship the tar.
|
||||||
if secrets := options.CommonBuildOpts.Secrets; len(secrets) > 0 {
|
if secrets := options.CommonBuildOpts.Secrets; len(secrets) > 0 {
|
||||||
@ -602,7 +608,7 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
|
|||||||
// even when the server quit but it seems desirable to
|
// even when the server quit but it seems desirable to
|
||||||
// distinguish a proper build from a transient EOF.
|
// distinguish a proper build from a transient EOF.
|
||||||
case <-response.Request.Context().Done():
|
case <-response.Request.Context().Done():
|
||||||
return &entities.BuildReport{ID: id}, nil
|
return &entities.BuildReport{ID: id, SaveFormat: saveFormat}, nil
|
||||||
default:
|
default:
|
||||||
// non-blocking select
|
// non-blocking select
|
||||||
}
|
}
|
||||||
@ -616,7 +622,7 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
|
|||||||
if errors.Is(err, io.EOF) && id != "" {
|
if errors.Is(err, io.EOF) && id != "" {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
return &entities.BuildReport{ID: id}, fmt.Errorf("decoding stream: %w", err)
|
return &entities.BuildReport{ID: id, SaveFormat: saveFormat}, fmt.Errorf("decoding stream: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
@ -629,12 +635,12 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
|
|||||||
case s.Error != "":
|
case s.Error != "":
|
||||||
// If there's an error, return directly. The stream
|
// If there's an error, return directly. The stream
|
||||||
// will be closed on return.
|
// will be closed on return.
|
||||||
return &entities.BuildReport{ID: id}, errors.New(s.Error)
|
return &entities.BuildReport{ID: id, SaveFormat: saveFormat}, errors.New(s.Error)
|
||||||
default:
|
default:
|
||||||
return &entities.BuildReport{ID: id}, errors.New("failed to parse build results stream, unexpected input")
|
return &entities.BuildReport{ID: id, SaveFormat: saveFormat}, errors.New("failed to parse build results stream, unexpected input")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return &entities.BuildReport{ID: id}, nil
|
return &entities.BuildReport{ID: id, SaveFormat: saveFormat}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func nTar(excludes []string, sources ...string) (io.ReadCloser, error) {
|
func nTar(excludes []string, sources ...string) (io.ReadCloser, error) {
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
podmanRegistry "github.com/containers/podman/v4/hack/podman-registry-go"
|
podmanRegistry "github.com/containers/podman/v4/hack/podman-registry-go"
|
||||||
|
"github.com/containers/podman/v4/libpod/define"
|
||||||
"github.com/containers/podman/v4/pkg/bindings"
|
"github.com/containers/podman/v4/pkg/bindings"
|
||||||
"github.com/containers/podman/v4/pkg/bindings/containers"
|
"github.com/containers/podman/v4/pkg/bindings/containers"
|
||||||
"github.com/containers/podman/v4/pkg/bindings/images"
|
"github.com/containers/podman/v4/pkg/bindings/images"
|
||||||
@ -410,6 +411,7 @@ var _ = Describe("Podman images", func() {
|
|||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(*results).To(MatchFields(IgnoreMissing, Fields{
|
Expect(*results).To(MatchFields(IgnoreMissing, Fields{
|
||||||
"ID": Not(BeEmpty()),
|
"ID": Not(BeEmpty()),
|
||||||
|
"SaveFormat": ContainSubstring(define.OCIArchive),
|
||||||
}))
|
}))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -50,6 +50,7 @@ type PodmanConfig struct {
|
|||||||
Syslog bool // write logging information to syslog as well as the console
|
Syslog bool // write logging information to syslog as well as the console
|
||||||
Trace bool // Hidden: Trace execution
|
Trace bool // Hidden: Trace execution
|
||||||
URI string // URI to RESTful API Service
|
URI string // URI to RESTful API Service
|
||||||
|
FarmNodeName string // Name of farm node
|
||||||
|
|
||||||
Runroot string
|
Runroot string
|
||||||
ImageStore string
|
ImageStore string
|
||||||
|
@ -40,5 +40,11 @@ type ImageEngine interface { //nolint:interfacebloat
|
|||||||
ManifestRemoveDigest(ctx context.Context, names, image string) (string, error)
|
ManifestRemoveDigest(ctx context.Context, names, image string) (string, error)
|
||||||
ManifestRm(ctx context.Context, names []string) (*ImageRemoveReport, []error)
|
ManifestRm(ctx context.Context, names []string) (*ImageRemoveReport, []error)
|
||||||
ManifestPush(ctx context.Context, name, destination string, imagePushOpts ImagePushOptions) (string, error)
|
ManifestPush(ctx context.Context, name, destination string, imagePushOpts ImagePushOptions) (string, error)
|
||||||
|
ManifestListClear(ctx context.Context, name string) (string, error)
|
||||||
Sign(ctx context.Context, names []string, options SignOptions) (*SignReport, error)
|
Sign(ctx context.Context, names []string, options SignOptions) (*SignReport, error)
|
||||||
|
FarmNodeName(ctx context.Context) string
|
||||||
|
FarmNodeDriver(ctx context.Context) string
|
||||||
|
FarmNodeInspect(ctx context.Context) (*FarmInspectReport, error)
|
||||||
|
PullToFile(ctx context.Context, options PullToFileOptions) (string, error)
|
||||||
|
PullToLocal(ctx context.Context, options PullToLocalOptions) (string, error)
|
||||||
}
|
}
|
||||||
|
@ -478,3 +478,33 @@ type ImageUnmountReport struct {
|
|||||||
Err error
|
Err error
|
||||||
Id string //nolint:revive,stylecheck
|
Id string //nolint:revive,stylecheck
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
LocalFarmImageBuilderName = "(local)"
|
||||||
|
LocalFarmImageBuilderDriver = "local"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FarmInspectReport describes the response from farm inspect
|
||||||
|
type FarmInspectReport struct {
|
||||||
|
NativePlatforms []string
|
||||||
|
EmulatedPlatforms []string
|
||||||
|
OS string
|
||||||
|
Arch string
|
||||||
|
Variant string
|
||||||
|
}
|
||||||
|
|
||||||
|
// PullToFileOptions are the options for pulling the images from farm
|
||||||
|
// nodes into a dir
|
||||||
|
type PullToFileOptions struct {
|
||||||
|
ImageID string
|
||||||
|
SaveFormat string
|
||||||
|
SaveFile string
|
||||||
|
}
|
||||||
|
|
||||||
|
// PullToLocalOptions are the options for pulling the images from farm
|
||||||
|
// nodes into containers-storage
|
||||||
|
type PullToLocalOptions struct {
|
||||||
|
ImageID string
|
||||||
|
SaveFormat string
|
||||||
|
Destination ImageEngine
|
||||||
|
}
|
||||||
|
@ -112,6 +112,7 @@ type ContainerCreateResponse struct {
|
|||||||
type BuildOptions struct {
|
type BuildOptions struct {
|
||||||
buildahDefine.BuildOptions
|
buildahDefine.BuildOptions
|
||||||
ContainerFiles []string
|
ContainerFiles []string
|
||||||
|
FarmBuildOptions
|
||||||
// Files that need to be closed after the build
|
// Files that need to be closed after the build
|
||||||
// so need to pass this to the main build functions
|
// so need to pass this to the main build functions
|
||||||
LogFileToClose *os.File
|
LogFileToClose *os.File
|
||||||
@ -122,6 +123,14 @@ type BuildOptions struct {
|
|||||||
type BuildReport struct {
|
type BuildReport struct {
|
||||||
// ID of the image.
|
// ID of the image.
|
||||||
ID string
|
ID string
|
||||||
|
// Format to save the image in
|
||||||
|
SaveFormat string
|
||||||
|
}
|
||||||
|
|
||||||
|
// FarmBuildOptions describes the options for building container images on farm nodes
|
||||||
|
type FarmBuildOptions struct {
|
||||||
|
// Cleanup removes built images from farm nodes on success
|
||||||
|
Cleanup bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type IDOrNameResponse struct {
|
type IDOrNameResponse struct {
|
||||||
|
120
pkg/domain/infra/abi/farm.go
Normal file
120
pkg/domain/infra/abi/farm.go
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
//go:build !remote
|
||||||
|
// +build !remote
|
||||||
|
|
||||||
|
package abi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/containers/buildah/pkg/parse"
|
||||||
|
lplatform "github.com/containers/common/libimage/platform"
|
||||||
|
istorage "github.com/containers/image/v5/storage"
|
||||||
|
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||||
|
"github.com/containers/podman/v4/pkg/emulation"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FarmNodeName returns the local engine's name.
|
||||||
|
func (ir *ImageEngine) FarmNodeName(ctx context.Context) string {
|
||||||
|
return entities.LocalFarmImageBuilderName
|
||||||
|
}
|
||||||
|
|
||||||
|
// FarmNodeDriver returns a description of the local image builder driver
|
||||||
|
func (ir *ImageEngine) FarmNodeDriver(ctx context.Context) string {
|
||||||
|
return entities.LocalFarmImageBuilderDriver
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ir *ImageEngine) fetchInfo(_ context.Context) (os, arch, variant string, nativePlatforms []string, emulatedPlatforms []string, err error) {
|
||||||
|
nativePlatform := parse.DefaultPlatform()
|
||||||
|
platform := strings.SplitN(nativePlatform, "/", 3)
|
||||||
|
switch len(platform) {
|
||||||
|
case 0, 1:
|
||||||
|
return "", "", "", nil, nil, fmt.Errorf("unparseable default platform %q", nativePlatform)
|
||||||
|
case 2:
|
||||||
|
os, arch = platform[0], platform[1]
|
||||||
|
case 3:
|
||||||
|
os, arch, variant = platform[0], platform[1], platform[2]
|
||||||
|
}
|
||||||
|
os, arch, variant = lplatform.Normalize(os, arch, variant)
|
||||||
|
nativePlatform = os + "/" + arch
|
||||||
|
if variant != "" {
|
||||||
|
nativePlatform += ("/" + variant)
|
||||||
|
}
|
||||||
|
emulatedPlatforms = emulation.Registered()
|
||||||
|
return os, arch, variant, append([]string{}, nativePlatform), emulatedPlatforms, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FarmNodeInspect returns information about the remote engines in the farm
|
||||||
|
func (ir *ImageEngine) FarmNodeInspect(ctx context.Context) (*entities.FarmInspectReport, error) {
|
||||||
|
ir.platforms.Do(func() {
|
||||||
|
ir.os, ir.arch, ir.variant, ir.nativePlatforms, ir.emulatedPlatforms, ir.platformsErr = ir.fetchInfo(ctx)
|
||||||
|
})
|
||||||
|
return &entities.FarmInspectReport{NativePlatforms: ir.nativePlatforms,
|
||||||
|
EmulatedPlatforms: ir.emulatedPlatforms,
|
||||||
|
OS: ir.os,
|
||||||
|
Arch: ir.arch,
|
||||||
|
Variant: ir.variant}, ir.platformsErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// PullToFile pulls the image from the remote engine and saves it to a file,
|
||||||
|
// returning a string-format reference which can be parsed by containers/image.
|
||||||
|
func (ir *ImageEngine) PullToFile(ctx context.Context, options entities.PullToFileOptions) (reference string, err error) {
|
||||||
|
saveOptions := entities.ImageSaveOptions{
|
||||||
|
Format: options.SaveFormat,
|
||||||
|
Output: options.SaveFile,
|
||||||
|
}
|
||||||
|
if err := ir.Save(ctx, options.ImageID, nil, saveOptions); err != nil {
|
||||||
|
return "", fmt.Errorf("saving image %q: %w", options.ImageID, err)
|
||||||
|
}
|
||||||
|
return options.SaveFormat + ":" + options.SaveFile, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PullToFile pulls the image from the remote engine and saves it to the local
|
||||||
|
// engine passed in via options, returning a string-format reference which can
|
||||||
|
// be parsed by containers/image.
|
||||||
|
func (ir *ImageEngine) PullToLocal(ctx context.Context, options entities.PullToLocalOptions) (reference string, err error) {
|
||||||
|
destination := options.Destination
|
||||||
|
if destination == nil {
|
||||||
|
return "", fmt.Errorf("destination not given, cannot pull image %q", options.ImageID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the image is already present at destination
|
||||||
|
var br *entities.BoolReport
|
||||||
|
br, err = destination.Exists(ctx, options.ImageID)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if br.Value {
|
||||||
|
return istorage.Transport.Name() + ":" + options.ImageID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
tempFile, err := os.CreateTemp("", "")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer os.Remove(tempFile.Name())
|
||||||
|
defer tempFile.Close()
|
||||||
|
|
||||||
|
saveOptions := entities.ImageSaveOptions{
|
||||||
|
Format: options.SaveFormat,
|
||||||
|
Output: tempFile.Name(),
|
||||||
|
}
|
||||||
|
// Save image built on builder in a temp file
|
||||||
|
if err := ir.Save(ctx, options.ImageID, nil, saveOptions); err != nil {
|
||||||
|
return "", fmt.Errorf("saving image %q: %w", options.ImageID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load the image saved in tempFile into the local engine
|
||||||
|
loadOptions := entities.ImageLoadOptions{
|
||||||
|
Input: tempFile.Name(),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = destination.Load(ctx, loadOptions)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return istorage.Transport.Name() + ":" + options.ImageID, nil
|
||||||
|
}
|
@ -15,6 +15,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
bdefine "github.com/containers/buildah/define"
|
||||||
"github.com/containers/common/libimage"
|
"github.com/containers/common/libimage"
|
||||||
"github.com/containers/common/libimage/filter"
|
"github.com/containers/common/libimage/filter"
|
||||||
"github.com/containers/common/pkg/config"
|
"github.com/containers/common/pkg/config"
|
||||||
@ -524,7 +525,11 @@ func (ir *ImageEngine) Build(ctx context.Context, containerFiles []string, opts
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &entities.BuildReport{ID: id}, nil
|
saveFormat := define.OCIArchive
|
||||||
|
if opts.OutputFormat == bdefine.Dockerv2ImageManifest {
|
||||||
|
saveFormat = define.V2s2Archive
|
||||||
|
}
|
||||||
|
return &entities.BuildReport{ID: id, SaveFormat: saveFormat}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ir *ImageEngine) Tree(ctx context.Context, nameOrID string, opts entities.ImageTreeOptions) (*entities.ImageTreeReport, error) {
|
func (ir *ImageEngine) Tree(ctx context.Context, nameOrID string, opts entities.ImageTreeOptions) (*entities.ImageTreeReport, error) {
|
||||||
|
@ -392,3 +392,24 @@ func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination strin
|
|||||||
|
|
||||||
return manDigest.String(), err
|
return manDigest.String(), err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ManifestListClear clears out all instances from the manifest list
|
||||||
|
func (ir *ImageEngine) ManifestListClear(ctx context.Context, name string) (string, error) {
|
||||||
|
manifestList, err := ir.Libpod.LibimageRuntime().LookupManifestList(name)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
listContents, err := manifestList.Inspect()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, instance := range listContents.Manifests {
|
||||||
|
if err := manifestList.RemoveInstance(instance.Digest); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return manifestList.ID(), nil
|
||||||
|
}
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
// Image-related runtime linked against libpod library
|
// Image-related runtime linked against libpod library
|
||||||
type ImageEngine struct {
|
type ImageEngine struct {
|
||||||
Libpod *libpod.Runtime
|
Libpod *libpod.Runtime
|
||||||
|
FarmNode
|
||||||
}
|
}
|
||||||
|
|
||||||
// Container-related runtime linked against libpod library
|
// Container-related runtime linked against libpod library
|
||||||
@ -21,4 +22,14 @@ type SystemEngine struct {
|
|||||||
Libpod *libpod.Runtime
|
Libpod *libpod.Runtime
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type FarmNode struct {
|
||||||
|
platforms sync.Once
|
||||||
|
platformsErr error
|
||||||
|
os string
|
||||||
|
arch string
|
||||||
|
variant string
|
||||||
|
nativePlatforms []string
|
||||||
|
emulatedPlatforms []string
|
||||||
|
}
|
||||||
|
|
||||||
var shutdownSync sync.Once
|
var shutdownSync sync.Once
|
||||||
|
@ -39,7 +39,7 @@ func NewImageEngine(facts *entities.PodmanConfig) (entities.ImageEngine, error)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%w: %s", err, facts.URI)
|
return nil, fmt.Errorf("%w: %s", err, facts.URI)
|
||||||
}
|
}
|
||||||
return &tunnel.ImageEngine{ClientCtx: ctx}, nil
|
return &tunnel.ImageEngine{ClientCtx: ctx, FarmNode: tunnel.FarmNode{NodeName: facts.FarmNodeName}}, nil
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
|
return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
|
||||||
}
|
}
|
||||||
|
@ -18,11 +18,12 @@ var (
|
|||||||
connection *context.Context
|
connection *context.Context
|
||||||
)
|
)
|
||||||
|
|
||||||
func newConnection(uri string, identity string, machine bool) (context.Context, error) {
|
func newConnection(uri string, identity, farmNodeName string, machine bool) (context.Context, error) {
|
||||||
connectionMutex.Lock()
|
connectionMutex.Lock()
|
||||||
defer connectionMutex.Unlock()
|
defer connectionMutex.Unlock()
|
||||||
|
|
||||||
if connection == nil {
|
// if farmNodeName given, then create a connection with the node so that we can send builds there
|
||||||
|
if connection == nil || farmNodeName != "" {
|
||||||
ctx, err := bindings.NewConnectionWithIdentity(context.Background(), uri, identity, machine)
|
ctx, err := bindings.NewConnectionWithIdentity(context.Background(), uri, identity, machine)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ctx, err
|
return ctx, err
|
||||||
@ -37,7 +38,7 @@ func NewContainerEngine(facts *entities.PodmanConfig) (entities.ContainerEngine,
|
|||||||
case entities.ABIMode:
|
case entities.ABIMode:
|
||||||
return nil, fmt.Errorf("direct runtime not supported")
|
return nil, fmt.Errorf("direct runtime not supported")
|
||||||
case entities.TunnelMode:
|
case entities.TunnelMode:
|
||||||
ctx, err := newConnection(facts.URI, facts.Identity, facts.MachineMode)
|
ctx, err := newConnection(facts.URI, facts.Identity, "", facts.MachineMode)
|
||||||
return &tunnel.ContainerEngine{ClientCtx: ctx}, err
|
return &tunnel.ContainerEngine{ClientCtx: ctx}, err
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
|
return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
|
||||||
@ -49,8 +50,8 @@ func NewImageEngine(facts *entities.PodmanConfig) (entities.ImageEngine, error)
|
|||||||
case entities.ABIMode:
|
case entities.ABIMode:
|
||||||
return nil, fmt.Errorf("direct image runtime not supported")
|
return nil, fmt.Errorf("direct image runtime not supported")
|
||||||
case entities.TunnelMode:
|
case entities.TunnelMode:
|
||||||
ctx, err := newConnection(facts.URI, facts.Identity, facts.MachineMode)
|
ctx, err := newConnection(facts.URI, facts.Identity, facts.FarmNodeName, facts.MachineMode)
|
||||||
return &tunnel.ImageEngine{ClientCtx: ctx}, err
|
return &tunnel.ImageEngine{ClientCtx: ctx, FarmNode: tunnel.FarmNode{NodeName: facts.FarmNodeName}}, err
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
|
return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
|
||||||
}
|
}
|
||||||
|
93
pkg/domain/infra/tunnel/farm.go
Normal file
93
pkg/domain/infra/tunnel/farm.go
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
package tunnel
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
istorage "github.com/containers/image/v5/storage"
|
||||||
|
"github.com/containers/podman/v4/pkg/bindings/system"
|
||||||
|
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
remoteFarmImageBuilderDriver = "podman-remote"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FarmNodeName returns the remote engine's name.
|
||||||
|
func (ir *ImageEngine) FarmNodeName(ctx context.Context) string {
|
||||||
|
return ir.NodeName
|
||||||
|
}
|
||||||
|
|
||||||
|
// FarmNodeDriver returns a description of the image builder driver
|
||||||
|
func (ir *ImageEngine) FarmNodeDriver(ctx context.Context) string {
|
||||||
|
return remoteFarmImageBuilderDriver
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ir *ImageEngine) fetchInfo(_ context.Context) (os, arch, variant string, nativePlatforms []string, err error) {
|
||||||
|
engineInfo, err := system.Info(ir.ClientCtx, &system.InfoOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return "", "", "", nil, fmt.Errorf("retrieving host info from %q: %w", ir.NodeName, err)
|
||||||
|
}
|
||||||
|
nativePlatform := engineInfo.Host.OS + "/" + engineInfo.Host.Arch
|
||||||
|
if engineInfo.Host.Variant != "" {
|
||||||
|
nativePlatform = nativePlatform + "/" + engineInfo.Host.Variant
|
||||||
|
}
|
||||||
|
return engineInfo.Host.OS, engineInfo.Host.Arch, engineInfo.Host.Variant, []string{nativePlatform}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FarmNodeInspect returns information about the remote engines in the farm
|
||||||
|
func (ir *ImageEngine) FarmNodeInspect(ctx context.Context) (*entities.FarmInspectReport, error) {
|
||||||
|
ir.platforms.Do(func() {
|
||||||
|
ir.os, ir.arch, ir.variant, ir.nativePlatforms, ir.platformsErr = ir.fetchInfo(ctx)
|
||||||
|
})
|
||||||
|
return &entities.FarmInspectReport{NativePlatforms: ir.nativePlatforms,
|
||||||
|
OS: ir.os,
|
||||||
|
Arch: ir.arch,
|
||||||
|
Variant: ir.variant}, ir.platformsErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// PullToFile pulls the image from the remote engine and saves it to a file,
|
||||||
|
// returning a string-format reference which can be parsed by containers/image.
|
||||||
|
func (ir *ImageEngine) PullToFile(ctx context.Context, options entities.PullToFileOptions) (reference string, err error) {
|
||||||
|
saveOptions := entities.ImageSaveOptions{
|
||||||
|
Format: options.SaveFormat,
|
||||||
|
Output: options.SaveFile,
|
||||||
|
}
|
||||||
|
if err := ir.Save(ctx, options.ImageID, nil, saveOptions); err != nil {
|
||||||
|
return "", fmt.Errorf("saving image %q: %w", options.ImageID, err)
|
||||||
|
}
|
||||||
|
return options.SaveFormat + ":" + options.SaveFile, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PullToLocal pulls the image from the remote engine and saves it to the local
|
||||||
|
// engine passed in via options, returning a string-format reference which can
|
||||||
|
// be parsed by containers/image.
|
||||||
|
func (ir *ImageEngine) PullToLocal(ctx context.Context, options entities.PullToLocalOptions) (reference string, err error) {
|
||||||
|
tempFile, err := os.CreateTemp("", "")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer os.Remove(tempFile.Name())
|
||||||
|
defer tempFile.Close()
|
||||||
|
saveOptions := entities.ImageSaveOptions{
|
||||||
|
Format: options.SaveFormat,
|
||||||
|
Output: tempFile.Name(),
|
||||||
|
}
|
||||||
|
if err := ir.Save(ctx, options.ImageID, nil, saveOptions); err != nil {
|
||||||
|
return "", fmt.Errorf("saving image %q to temporary file: %w", options.ImageID, err)
|
||||||
|
}
|
||||||
|
loadOptions := entities.ImageLoadOptions{
|
||||||
|
Input: tempFile.Name(),
|
||||||
|
}
|
||||||
|
if options.Destination == nil {
|
||||||
|
return "", errors.New("internal error: options.Destination not set")
|
||||||
|
} else {
|
||||||
|
if _, err = options.Destination.Load(ctx, loadOptions); err != nil {
|
||||||
|
return "", fmt.Errorf("loading image %q: %w", options.ImageID, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
name := fmt.Sprintf("%s:%s", istorage.Transport.Name(), options.ImageID)
|
||||||
|
return name, err
|
||||||
|
}
|
@ -9,11 +9,13 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
bdefine "github.com/containers/buildah/define"
|
||||||
"github.com/containers/common/libimage/filter"
|
"github.com/containers/common/libimage/filter"
|
||||||
"github.com/containers/common/pkg/config"
|
"github.com/containers/common/pkg/config"
|
||||||
"github.com/containers/common/pkg/ssh"
|
"github.com/containers/common/pkg/ssh"
|
||||||
"github.com/containers/image/v5/docker/reference"
|
"github.com/containers/image/v5/docker/reference"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
|
"github.com/containers/podman/v4/libpod/define"
|
||||||
"github.com/containers/podman/v4/pkg/bindings/images"
|
"github.com/containers/podman/v4/pkg/bindings/images"
|
||||||
"github.com/containers/podman/v4/pkg/domain/entities"
|
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||||
"github.com/containers/podman/v4/pkg/domain/entities/reports"
|
"github.com/containers/podman/v4/pkg/domain/entities/reports"
|
||||||
@ -377,6 +379,10 @@ func (ir *ImageEngine) Build(_ context.Context, containerFiles []string, opts en
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
report.SaveFormat = define.OCIArchive
|
||||||
|
if opts.OutputFormat == bdefine.Dockerv2ImageManifest {
|
||||||
|
report.SaveFormat = define.V2s2Archive
|
||||||
|
}
|
||||||
return report, nil
|
return report, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,3 +157,19 @@ func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination strin
|
|||||||
|
|
||||||
return digest, err
|
return digest, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ManifestListClear clears out all instances from a manifest list
|
||||||
|
func (ir *ImageEngine) ManifestListClear(ctx context.Context, name string) (string, error) {
|
||||||
|
listContents, err := manifests.InspectListData(ctx, name, &manifests.InspectOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, instance := range listContents.Manifests {
|
||||||
|
if _, err := manifests.Remove(ctx, name, instance.Digest.String(), &manifests.RemoveOptions{}); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return name, nil
|
||||||
|
}
|
||||||
|
@ -3,6 +3,7 @@ package tunnel
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"os"
|
"os"
|
||||||
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/containers/podman/v4/libpod/define"
|
"github.com/containers/podman/v4/libpod/define"
|
||||||
@ -13,6 +14,7 @@ import (
|
|||||||
// Image-related runtime using an ssh-tunnel to utilize Podman service
|
// Image-related runtime using an ssh-tunnel to utilize Podman service
|
||||||
type ImageEngine struct {
|
type ImageEngine struct {
|
||||||
ClientCtx context.Context
|
ClientCtx context.Context
|
||||||
|
FarmNode
|
||||||
}
|
}
|
||||||
|
|
||||||
// Container-related runtime using an ssh-tunnel to utilize Podman service
|
// Container-related runtime using an ssh-tunnel to utilize Podman service
|
||||||
@ -25,6 +27,16 @@ type SystemEngine struct {
|
|||||||
ClientCtx context.Context
|
ClientCtx context.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type FarmNode struct {
|
||||||
|
NodeName string
|
||||||
|
platforms sync.Once
|
||||||
|
platformsErr error
|
||||||
|
os string
|
||||||
|
arch string
|
||||||
|
variant string
|
||||||
|
nativePlatforms []string
|
||||||
|
}
|
||||||
|
|
||||||
func remoteProxySignals(ctrID string, killFunc func(string) error) {
|
func remoteProxySignals(ctrID string, killFunc func(string) error) {
|
||||||
sigBuffer := make(chan os.Signal, signal.SignalBufferSize)
|
sigBuffer := make(chan os.Signal, signal.SignalBufferSize)
|
||||||
signal.CatchAll(sigBuffer)
|
signal.CatchAll(sigBuffer)
|
||||||
|
169
pkg/emulation/binfmtmisc_linux.go
Normal file
169
pkg/emulation/binfmtmisc_linux.go
Normal file
@ -0,0 +1,169 @@
|
|||||||
|
//go:build !remote
|
||||||
|
// +build !remote
|
||||||
|
|
||||||
|
package emulation
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// registeredBinfmtMisc walks /proc/sys/fs/binfmt_misc and iterates through a
|
||||||
|
// list of known ELF header values to see if there's an emulator registered for
|
||||||
|
// them. Returns the list of emulated targets (which may be empty), or an
|
||||||
|
// error if something unexpected happened.
|
||||||
|
func registeredBinfmtMisc() ([]string, error) {
|
||||||
|
var registered []string
|
||||||
|
globalEnabled := false
|
||||||
|
err := filepath.WalkDir("/proc/sys/fs/binfmt_misc", func(path string, d fs.DirEntry, err error) error {
|
||||||
|
if filepath.Base(path) == "register" { // skip this one
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
info, err := d.Info()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !info.Mode().IsRegular() {
|
||||||
|
return nil // skip the directory itself
|
||||||
|
}
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
if filepath.Base(path) == "status" {
|
||||||
|
b, err := io.ReadAll(f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
status := strings.TrimSpace(string(b))
|
||||||
|
switch status {
|
||||||
|
case "disabled":
|
||||||
|
globalEnabled = false
|
||||||
|
case "enabled":
|
||||||
|
globalEnabled = true
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unrecognized binfmt_misc status value %q in %q", status, path)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
offset, magic, mask, err := parseBinfmtMisc(path, f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if offset < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for platform, headers := range getKnownELFPlatformHeaders() {
|
||||||
|
for _, header := range headers {
|
||||||
|
if magicMatch(header, offset, mask, magic) {
|
||||||
|
registered = append(registered, platform)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if !globalEnabled {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
sort.Strings(registered)
|
||||||
|
return registered, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// magicMatch compares header, starting at the specified offset, masked with
|
||||||
|
// mask, against the magic value
|
||||||
|
func magicMatch(header []byte, offset int, mask, magic []byte) bool {
|
||||||
|
mismatch := 0
|
||||||
|
for i := offset; i < offset+len(magic); i++ {
|
||||||
|
if i >= len(header) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
m := magic[i-offset]
|
||||||
|
if len(mask) > i-offset {
|
||||||
|
m &= mask[i-offset]
|
||||||
|
}
|
||||||
|
if header[i] != m {
|
||||||
|
// mismatch
|
||||||
|
break
|
||||||
|
}
|
||||||
|
mismatch = i + 1
|
||||||
|
}
|
||||||
|
return mismatch >= offset+len(magic)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseBinfmtMisc parses a binfmt_misc registry entry. It returns the offset,
|
||||||
|
// magic, and mask values, or an error if there was an error parsing the data.
|
||||||
|
// If the returned offset is negative, the entry was disabled or should be
|
||||||
|
// non-fatally ignored for some other reason.
|
||||||
|
func parseBinfmtMisc(path string, r io.Reader) (int, []byte, []byte, error) {
|
||||||
|
offset := 0
|
||||||
|
magicString, maskString := "", ""
|
||||||
|
scanner := bufio.NewScanner(r)
|
||||||
|
for scanner.Scan() {
|
||||||
|
text := scanner.Text()
|
||||||
|
if strings.TrimSpace(text) == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fields := strings.Fields(text)
|
||||||
|
switch fields[0] {
|
||||||
|
case "disabled":
|
||||||
|
return -1, nil, nil, nil // we should ignore this specific one
|
||||||
|
case "enabled": // keep scanning this entry
|
||||||
|
case "interpreter": // good, but not something we need to record
|
||||||
|
case "offset":
|
||||||
|
if len(fields) != 2 {
|
||||||
|
return -1, nil, nil, fmt.Errorf("invalid format for %q in %q", text, path)
|
||||||
|
}
|
||||||
|
offset64, err := strconv.ParseInt(fields[1], 10, 8)
|
||||||
|
if err != nil {
|
||||||
|
return -1, nil, nil, fmt.Errorf("invalid offset %q in %q", fields[1], path)
|
||||||
|
}
|
||||||
|
offset = int(offset64)
|
||||||
|
case "magic":
|
||||||
|
if len(fields) != 2 {
|
||||||
|
return -1, nil, nil, fmt.Errorf("invalid format for %q in %q", text, path)
|
||||||
|
}
|
||||||
|
magicString = fields[1]
|
||||||
|
case "mask":
|
||||||
|
if len(fields) != 2 {
|
||||||
|
return -1, nil, nil, fmt.Errorf("invalid format for %q in %q", text, path)
|
||||||
|
}
|
||||||
|
maskString = fields[1]
|
||||||
|
case "flags", "flags:":
|
||||||
|
if len(fields) != 2 {
|
||||||
|
return -1, nil, nil, fmt.Errorf("invalid format for %q in %q", text, path)
|
||||||
|
}
|
||||||
|
if !strings.Contains(fields[1], "F") { // won't work in other mount namespaces, so ignore it
|
||||||
|
return -1, nil, nil, nil
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return -1, nil, nil, fmt.Errorf("unrecognized field %q in %q", fields[0], path)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if magicString == "" || maskString == "" { // entry is missing some info we need here
|
||||||
|
return -1, nil, nil, nil
|
||||||
|
}
|
||||||
|
magic, err := hex.DecodeString(magicString)
|
||||||
|
if err != nil {
|
||||||
|
return -1, nil, nil, fmt.Errorf("invalid magic %q in %q", magicString, path)
|
||||||
|
}
|
||||||
|
mask, err := hex.DecodeString(maskString)
|
||||||
|
if err != nil {
|
||||||
|
return -1, nil, nil, fmt.Errorf("invalid mask %q in %q", maskString, path)
|
||||||
|
}
|
||||||
|
return offset, magic, mask, nil
|
||||||
|
}
|
106
pkg/emulation/binfmtmisc_linux_test.go
Normal file
106
pkg/emulation/binfmtmisc_linux_test.go
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
//go:build !remote
|
||||||
|
// +build !remote
|
||||||
|
|
||||||
|
package emulation
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// parseBinfmtMisc parses a binfmt_misc registry entry. It returns the offset,
|
||||||
|
// magic, and mask values, or an error if there was an error parsing the data.
|
||||||
|
// If the returned offset is negative, the entry was disabled or should be
|
||||||
|
// non-fatally ignored for some other reason.
|
||||||
|
func TestParseBinfmtMisc(t *testing.T) {
|
||||||
|
vectors := []struct {
|
||||||
|
platform, contents string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"linux/386",
|
||||||
|
`
|
||||||
|
enabled
|
||||||
|
interpreter /usr/bin/qemu-i386-static
|
||||||
|
flags: F
|
||||||
|
offset 0
|
||||||
|
magic 7f454c4601010100000000000000000002000300
|
||||||
|
mask fffffffffffefe00fffffffffffffffffeffffff
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"linux/amd64",
|
||||||
|
`
|
||||||
|
enabled
|
||||||
|
interpreter /usr/bin/qemu-x86_64-static
|
||||||
|
flags: F
|
||||||
|
offset 0
|
||||||
|
magic 7f454c4602010100000000000000000002003e00
|
||||||
|
mask fffffffffffefe00fffffffffffffffffeffffff
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"linux/arm",
|
||||||
|
`
|
||||||
|
enabled
|
||||||
|
interpreter /usr/bin/qemu-arm-static
|
||||||
|
flags: F
|
||||||
|
offset 0
|
||||||
|
magic 7f454c4601010100000000000000000002002800
|
||||||
|
mask ffffffffffffff00fffffffffffffffffeffffff
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"linux/arm64",
|
||||||
|
`
|
||||||
|
enabled
|
||||||
|
interpreter /usr/bin/qemu-aarch64-static
|
||||||
|
flags: F
|
||||||
|
offset 0
|
||||||
|
magic 7f454c460201010000000000000000000200b700
|
||||||
|
mask ffffffffffffff00fffffffffffffffffeffffff
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"linux/ppc64le",
|
||||||
|
`
|
||||||
|
enabled
|
||||||
|
interpreter /usr/bin/qemu-ppc64le-static
|
||||||
|
flags: F
|
||||||
|
offset 0
|
||||||
|
magic 7f454c4602010100000000000000000002001500
|
||||||
|
mask ffffffffffffff00fffffffffffffffffeffff00
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"linux/s390x",
|
||||||
|
`
|
||||||
|
enabled
|
||||||
|
interpreter /usr/bin/qemu-s390x-static
|
||||||
|
flags: F
|
||||||
|
offset 0
|
||||||
|
magic 7f454c4602020100000000000000000000020016
|
||||||
|
mask ffffffffffffff00fffffffffffffffffffeffff
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for i := range vectors {
|
||||||
|
v := vectors[i]
|
||||||
|
t.Run(v.platform, func(t *testing.T) {
|
||||||
|
offset, magic, mask, err := parseBinfmtMisc(fmt.Sprintf("test vector %d", i), strings.NewReader(v.contents))
|
||||||
|
require.NoError(t, err, "parseBinfmtMisc: %v", err)
|
||||||
|
require.GreaterOrEqual(t, offset, 0, "%q shouldn't have been disabled", v.platform)
|
||||||
|
headers := getKnownELFPlatformHeaders()[v.platform]
|
||||||
|
matched := false
|
||||||
|
for _, header := range headers {
|
||||||
|
if magicMatch(header, offset, mask, magic) {
|
||||||
|
matched = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.True(t, matched, "%q did not match an expected header match", v.platform)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
8
pkg/emulation/binfmtmisc_other.go
Normal file
8
pkg/emulation/binfmtmisc_other.go
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
//go:build !linux && !remote
|
||||||
|
// +build !linux,!remote
|
||||||
|
|
||||||
|
package emulation
|
||||||
|
|
||||||
|
func registeredBinfmtMisc() ([]string, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
221
pkg/emulation/elf.go
Normal file
221
pkg/emulation/elf.go
Normal file
@ -0,0 +1,221 @@
|
|||||||
|
//go:build !remote
|
||||||
|
// +build !remote
|
||||||
|
|
||||||
|
package emulation
|
||||||
|
|
||||||
|
import (
|
||||||
|
"debug/elf"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type elfPlatform struct {
|
||||||
|
platform string
|
||||||
|
osabi []elf.OSABI
|
||||||
|
class elf.Class
|
||||||
|
data elf.Data
|
||||||
|
alsoNone bool // also try with data=none,version=0
|
||||||
|
machine elf.Machine
|
||||||
|
flags []uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// knownELFPlatformHeaders is a mapping from target platform names and
|
||||||
|
// plausible headers for the binaries built for those platforms. Call
|
||||||
|
// getKnownELFPlatformHeaders() instead of reading this map directly.
|
||||||
|
knownELFPlatformHeaders = make(map[string][][]byte)
|
||||||
|
knownELFPlatformHeadersOnce sync.Once
|
||||||
|
// knownELFPlatforms is a table of target platforms that we built a
|
||||||
|
// trivial program for, and the other fields are filled in based on
|
||||||
|
// what we got when we ran eu-readelf -h against the results.
|
||||||
|
knownELFPlatforms = []elfPlatform{
|
||||||
|
{
|
||||||
|
platform: "linux/386",
|
||||||
|
osabi: []elf.OSABI{elf.ELFOSABI_NONE, elf.ELFOSABI_LINUX},
|
||||||
|
class: elf.ELFCLASS32,
|
||||||
|
data: elf.ELFDATA2LSB,
|
||||||
|
alsoNone: true,
|
||||||
|
machine: elf.EM_386,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
platform: "linux/amd64",
|
||||||
|
osabi: []elf.OSABI{elf.ELFOSABI_NONE, elf.ELFOSABI_LINUX},
|
||||||
|
class: elf.ELFCLASS64,
|
||||||
|
data: elf.ELFDATA2LSB,
|
||||||
|
alsoNone: true,
|
||||||
|
machine: elf.EM_X86_64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
platform: "linux/arm",
|
||||||
|
osabi: []elf.OSABI{elf.ELFOSABI_NONE, elf.ELFOSABI_LINUX},
|
||||||
|
class: elf.ELFCLASS32,
|
||||||
|
data: elf.ELFDATA2LSB,
|
||||||
|
machine: elf.EM_ARM,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
platform: "linux/arm64",
|
||||||
|
osabi: []elf.OSABI{elf.ELFOSABI_NONE, elf.ELFOSABI_LINUX},
|
||||||
|
class: elf.ELFCLASS64,
|
||||||
|
data: elf.ELFDATA2LSB,
|
||||||
|
machine: elf.EM_AARCH64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
platform: "linux/arm64be",
|
||||||
|
osabi: []elf.OSABI{elf.ELFOSABI_NONE, elf.ELFOSABI_LINUX},
|
||||||
|
class: elf.ELFCLASS64,
|
||||||
|
data: elf.ELFDATA2MSB,
|
||||||
|
machine: elf.EM_AARCH64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
platform: "linux/loong64",
|
||||||
|
osabi: []elf.OSABI{elf.ELFOSABI_NONE, elf.ELFOSABI_LINUX},
|
||||||
|
class: elf.ELFCLASS64,
|
||||||
|
data: elf.ELFDATA2LSB,
|
||||||
|
machine: elf.EM_LOONGARCH,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
platform: "linux/mips",
|
||||||
|
osabi: []elf.OSABI{elf.ELFOSABI_NONE, elf.ELFOSABI_LINUX},
|
||||||
|
class: elf.ELFCLASS32,
|
||||||
|
data: elf.ELFDATA2MSB,
|
||||||
|
machine: elf.EM_MIPS,
|
||||||
|
flags: []uint32{0, 2}, // elf.EF_MIPS_PIC set, or not
|
||||||
|
},
|
||||||
|
{
|
||||||
|
platform: "linux/mipsle",
|
||||||
|
osabi: []elf.OSABI{elf.ELFOSABI_NONE, elf.ELFOSABI_LINUX},
|
||||||
|
class: elf.ELFCLASS32,
|
||||||
|
data: elf.ELFDATA2LSB,
|
||||||
|
machine: elf.EM_MIPS_RS3_LE,
|
||||||
|
flags: []uint32{0, 2}, // elf.EF_MIPS_PIC set, or not
|
||||||
|
},
|
||||||
|
{
|
||||||
|
platform: "linux/mips64",
|
||||||
|
osabi: []elf.OSABI{elf.ELFOSABI_NONE, elf.ELFOSABI_LINUX},
|
||||||
|
class: elf.ELFCLASS64,
|
||||||
|
data: elf.ELFDATA2MSB,
|
||||||
|
machine: elf.EM_MIPS,
|
||||||
|
flags: []uint32{0, 2}, // elf.EF_MIPS_PIC set, or not
|
||||||
|
},
|
||||||
|
{
|
||||||
|
platform: "linux/mips64le",
|
||||||
|
osabi: []elf.OSABI{elf.ELFOSABI_NONE, elf.ELFOSABI_LINUX},
|
||||||
|
class: elf.ELFCLASS64,
|
||||||
|
data: elf.ELFDATA2LSB,
|
||||||
|
machine: elf.EM_MIPS_RS3_LE,
|
||||||
|
flags: []uint32{0, 2}, // elf.EF_MIPS_PIC set, or not
|
||||||
|
},
|
||||||
|
{
|
||||||
|
platform: "linux/ppc",
|
||||||
|
osabi: []elf.OSABI{elf.ELFOSABI_NONE, elf.ELFOSABI_LINUX},
|
||||||
|
class: elf.ELFCLASS32,
|
||||||
|
data: elf.ELFDATA2MSB,
|
||||||
|
machine: elf.EM_PPC,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
platform: "linux/ppc64",
|
||||||
|
osabi: []elf.OSABI{elf.ELFOSABI_NONE, elf.ELFOSABI_LINUX},
|
||||||
|
class: elf.ELFCLASS64,
|
||||||
|
data: elf.ELFDATA2MSB,
|
||||||
|
machine: elf.EM_PPC64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
platform: "linux/ppc64le",
|
||||||
|
osabi: []elf.OSABI{elf.ELFOSABI_NONE, elf.ELFOSABI_LINUX},
|
||||||
|
class: elf.ELFCLASS64,
|
||||||
|
data: elf.ELFDATA2LSB,
|
||||||
|
machine: elf.EM_PPC64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
platform: "linux/riscv32",
|
||||||
|
osabi: []elf.OSABI{elf.ELFOSABI_NONE, elf.ELFOSABI_LINUX},
|
||||||
|
class: elf.ELFCLASS32,
|
||||||
|
data: elf.ELFDATA2LSB,
|
||||||
|
machine: elf.EM_RISCV,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
platform: "linux/riscv64",
|
||||||
|
osabi: []elf.OSABI{elf.ELFOSABI_NONE, elf.ELFOSABI_LINUX},
|
||||||
|
class: elf.ELFCLASS64,
|
||||||
|
data: elf.ELFDATA2LSB,
|
||||||
|
machine: elf.EM_RISCV,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
platform: "linux/s390x",
|
||||||
|
osabi: []elf.OSABI{elf.ELFOSABI_NONE, elf.ELFOSABI_LINUX},
|
||||||
|
class: elf.ELFCLASS64,
|
||||||
|
data: elf.ELFDATA2MSB,
|
||||||
|
machine: elf.EM_S390,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// header generates an approximation of what the initial N bytes of a binary
|
||||||
|
// built for a given target looks like
|
||||||
|
func (e *elfPlatform) header() ([][]byte, error) {
|
||||||
|
var headers [][]byte
|
||||||
|
osabi := e.osabi
|
||||||
|
if len(osabi) == 0 {
|
||||||
|
osabi = []elf.OSABI{elf.ELFOSABI_NONE}
|
||||||
|
}
|
||||||
|
for i := range osabi {
|
||||||
|
flags := e.flags
|
||||||
|
if len(flags) == 0 {
|
||||||
|
flags = []uint32{0}
|
||||||
|
}
|
||||||
|
for f := range flags {
|
||||||
|
var endian binary.ByteOrder
|
||||||
|
var entrySize, phoffSize, shoffSize int
|
||||||
|
header := make([]byte, 40)
|
||||||
|
copy(header, elf.ELFMAG)
|
||||||
|
switch e.class {
|
||||||
|
case elf.ELFCLASS32:
|
||||||
|
entrySize, phoffSize, shoffSize = 2, 2, 2
|
||||||
|
case elf.ELFCLASS64:
|
||||||
|
entrySize, phoffSize, shoffSize = 4, 4, 4
|
||||||
|
}
|
||||||
|
switch e.data {
|
||||||
|
case elf.ELFDATA2LSB:
|
||||||
|
endian = binary.LittleEndian
|
||||||
|
case elf.ELFDATA2MSB:
|
||||||
|
endian = binary.BigEndian
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("internal error in entry for %q", e.platform)
|
||||||
|
}
|
||||||
|
header[elf.EI_OSABI] = byte(osabi[i])
|
||||||
|
header[elf.EI_CLASS] = byte(e.class)
|
||||||
|
header[elf.EI_DATA] = byte(e.data)
|
||||||
|
header[elf.EI_VERSION] = byte(elf.EV_CURRENT)
|
||||||
|
header[elf.EI_ABIVERSION] = 0
|
||||||
|
endian.PutUint16(header[16:], uint16(elf.ET_EXEC))
|
||||||
|
endian.PutUint16(header[18:], uint16(e.machine))
|
||||||
|
endian.PutUint32(header[20:], uint32(elf.EV_CURRENT))
|
||||||
|
endian.PutUint32(header[24+entrySize+phoffSize+shoffSize:], flags[f])
|
||||||
|
headers = append(headers, append([]byte{}, header...))
|
||||||
|
if e.alsoNone {
|
||||||
|
header[elf.EI_DATA] = byte(elf.ELFDATANONE)
|
||||||
|
header[elf.EI_VERSION] = byte(elf.EV_NONE)
|
||||||
|
endian.PutUint32(header[20:], uint32(elf.EV_NONE))
|
||||||
|
headers = append(headers, append([]byte{}, header...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return headers, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getKnownELFPlatformHeaders() map[string][][]byte {
|
||||||
|
knownELFPlatformHeadersOnce.Do(func() {
|
||||||
|
for _, p := range knownELFPlatforms {
|
||||||
|
headerList, err := p.header()
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("generating headers for %q: %v\n", p.platform, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
knownELFPlatformHeaders[p.platform] = headerList
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return knownELFPlatformHeaders
|
||||||
|
}
|
19
pkg/emulation/emulation.go
Normal file
19
pkg/emulation/emulation.go
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
//go:build !remote
|
||||||
|
// +build !remote
|
||||||
|
|
||||||
|
package emulation
|
||||||
|
|
||||||
|
import "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
// Registered returns a list of platforms for which we think we have user
|
||||||
|
// space emulation available.
|
||||||
|
func Registered() []string {
|
||||||
|
var registered []string
|
||||||
|
binfmt, err := registeredBinfmtMisc()
|
||||||
|
if err != nil {
|
||||||
|
logrus.Warnf("registeredBinfmtMisc(): %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
registered = append(registered, binfmt...)
|
||||||
|
return registered
|
||||||
|
}
|
492
pkg/farm/farm.go
Normal file
492
pkg/farm/farm.go
Normal file
@ -0,0 +1,492 @@
|
|||||||
|
package farm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/containers/buildah/define"
|
||||||
|
lplatform "github.com/containers/common/libimage/platform"
|
||||||
|
"github.com/containers/common/pkg/config"
|
||||||
|
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||||
|
"github.com/containers/podman/v4/pkg/domain/infra"
|
||||||
|
"github.com/hashicorp/go-multierror"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Farm represents a group of connections to builders.
|
||||||
|
type Farm struct {
|
||||||
|
name string
|
||||||
|
localEngine entities.ImageEngine // not nil -> use local engine, too
|
||||||
|
builders map[string]entities.ImageEngine // name -> builder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Schedule is a description of where and how we'll do builds.
|
||||||
|
type Schedule struct {
|
||||||
|
platformBuilders map[string]string // target->connection
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFarmWithBuilders(_ context.Context, name string, destinations *map[string]config.Destination, localEngine entities.ImageEngine) (*Farm, error) {
|
||||||
|
farm := &Farm{
|
||||||
|
builders: make(map[string]entities.ImageEngine),
|
||||||
|
localEngine: localEngine,
|
||||||
|
name: name,
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
builderMutex sync.Mutex
|
||||||
|
builderGroup multierror.Group
|
||||||
|
)
|
||||||
|
// Set up the remote connections to handle the builds
|
||||||
|
for name, dest := range *destinations {
|
||||||
|
name, dest := name, dest
|
||||||
|
builderGroup.Go(func() error {
|
||||||
|
fmt.Printf("Connecting to %q\n", name)
|
||||||
|
engine, err := infra.NewImageEngine(&entities.PodmanConfig{
|
||||||
|
EngineMode: entities.TunnelMode,
|
||||||
|
URI: dest.URI,
|
||||||
|
Identity: dest.Identity,
|
||||||
|
MachineMode: dest.IsMachine,
|
||||||
|
FarmNodeName: name,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("initializing image engine at %q: %w", dest.URI, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer fmt.Printf("Builder %q ready\n", name)
|
||||||
|
builderMutex.Lock()
|
||||||
|
defer builderMutex.Unlock()
|
||||||
|
farm.builders[name] = engine
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
// If local=true then use the local machine for builds as well
|
||||||
|
if localEngine != nil {
|
||||||
|
builderGroup.Go(func() error {
|
||||||
|
fmt.Println("Setting up local builder")
|
||||||
|
defer fmt.Println("Local builder ready")
|
||||||
|
builderMutex.Lock()
|
||||||
|
defer builderMutex.Unlock()
|
||||||
|
farm.builders[entities.LocalFarmImageBuilderName] = localEngine
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if builderError := builderGroup.Wait(); builderError != nil {
|
||||||
|
if err := builderError.ErrorOrNil(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(farm.builders) > 0 {
|
||||||
|
defer fmt.Printf("Farm %q ready\n", farm.name)
|
||||||
|
return farm, nil
|
||||||
|
}
|
||||||
|
return nil, errors.New("no builders configured")
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFarm(ctx context.Context, name string, localEngine entities.ImageEngine) (*Farm, error) {
|
||||||
|
// Get the destinations of the connections specified in the farm
|
||||||
|
destinations, err := getFarmDestinations(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return newFarmWithBuilders(ctx, name, &destinations, localEngine)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done performs any necessary end-of-process cleanup for the farm's members.
|
||||||
|
func (f *Farm) Done(ctx context.Context) error {
|
||||||
|
return f.forEach(ctx, func(ctx context.Context, name string, engine entities.ImageEngine) (bool, error) {
|
||||||
|
engine.Shutdown(ctx)
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status polls the connections in the farm and returns a map of their
|
||||||
|
// individual status, along with an error if any are down or otherwise unreachable.
|
||||||
|
func (f *Farm) Status(ctx context.Context) (map[string]error, error) {
|
||||||
|
status := make(map[string]error)
|
||||||
|
var (
|
||||||
|
statusMutex sync.Mutex
|
||||||
|
statusGroup multierror.Group
|
||||||
|
)
|
||||||
|
for _, engine := range f.builders {
|
||||||
|
engine := engine
|
||||||
|
statusGroup.Go(func() error {
|
||||||
|
logrus.Debugf("getting status of %q", engine.FarmNodeName(ctx))
|
||||||
|
defer logrus.Debugf("got status of %q", engine.FarmNodeName(ctx))
|
||||||
|
_, err := engine.Config(ctx)
|
||||||
|
statusMutex.Lock()
|
||||||
|
defer statusMutex.Unlock()
|
||||||
|
status[engine.FarmNodeName(ctx)] = err
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
statusError := statusGroup.Wait()
|
||||||
|
|
||||||
|
return status, statusError.ErrorOrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
// forEach runs the called function once for every node in the farm and
|
||||||
|
// collects their results, continuing until it finishes visiting every node or
|
||||||
|
// a function call returns true as its first return value.
|
||||||
|
func (f *Farm) forEach(ctx context.Context, fn func(context.Context, string, entities.ImageEngine) (bool, error)) error {
|
||||||
|
var merr *multierror.Error
|
||||||
|
for name, engine := range f.builders {
|
||||||
|
stop, err := fn(ctx, name, engine)
|
||||||
|
if err != nil {
|
||||||
|
merr = multierror.Append(merr, fmt.Errorf("%s: %w", engine.FarmNodeName(ctx), err))
|
||||||
|
}
|
||||||
|
if stop {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return merr.ErrorOrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NativePlatforms returns a list of the set of platforms for which the farm
|
||||||
|
// can build images natively.
|
||||||
|
func (f *Farm) NativePlatforms(ctx context.Context) ([]string, error) {
|
||||||
|
nativeMap := make(map[string]struct{})
|
||||||
|
platforms := []string{}
|
||||||
|
var (
|
||||||
|
nativeMutex sync.Mutex
|
||||||
|
nativeGroup multierror.Group
|
||||||
|
)
|
||||||
|
for _, engine := range f.builders {
|
||||||
|
engine := engine
|
||||||
|
nativeGroup.Go(func() error {
|
||||||
|
logrus.Debugf("getting native platform of %q\n", engine.FarmNodeName(ctx))
|
||||||
|
defer logrus.Debugf("got native platform of %q", engine.FarmNodeName(ctx))
|
||||||
|
inspect, err := engine.FarmNodeInspect(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
nativeMutex.Lock()
|
||||||
|
defer nativeMutex.Unlock()
|
||||||
|
for _, platform := range inspect.NativePlatforms {
|
||||||
|
nativeMap[platform] = struct{}{}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
merr := nativeGroup.Wait()
|
||||||
|
if merr != nil {
|
||||||
|
if err := merr.ErrorOrNil(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for platform := range nativeMap {
|
||||||
|
platforms = append(platforms, platform)
|
||||||
|
}
|
||||||
|
sort.Strings(platforms)
|
||||||
|
return platforms, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EmulatedPlatforms returns a list of the set of platforms for which the farm
|
||||||
|
// can build images with the help of emulation.
|
||||||
|
func (f *Farm) EmulatedPlatforms(ctx context.Context) ([]string, error) {
|
||||||
|
emulatedMap := make(map[string]struct{})
|
||||||
|
platforms := []string{}
|
||||||
|
var (
|
||||||
|
emulatedMutex sync.Mutex
|
||||||
|
emulatedGroup multierror.Group
|
||||||
|
)
|
||||||
|
for _, engine := range f.builders {
|
||||||
|
engine := engine
|
||||||
|
emulatedGroup.Go(func() error {
|
||||||
|
logrus.Debugf("getting emulated platforms of %q", engine.FarmNodeName(ctx))
|
||||||
|
defer logrus.Debugf("got emulated platforms of %q", engine.FarmNodeName(ctx))
|
||||||
|
inspect, err := engine.FarmNodeInspect(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
emulatedMutex.Lock()
|
||||||
|
defer emulatedMutex.Unlock()
|
||||||
|
for _, platform := range inspect.EmulatedPlatforms {
|
||||||
|
emulatedMap[platform] = struct{}{}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
merr := emulatedGroup.Wait()
|
||||||
|
if merr != nil {
|
||||||
|
if err := merr.ErrorOrNil(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for platform := range emulatedMap {
|
||||||
|
platforms = append(platforms, platform)
|
||||||
|
}
|
||||||
|
sort.Strings(platforms)
|
||||||
|
return platforms, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Schedule takes a list of platforms and returns a list of connections which
|
||||||
|
// can be used to build for those platforms. It always prefers native builders
|
||||||
|
// over emulated builders, but will assign a builder which can use emulation
|
||||||
|
// for a platform if no suitable native builder is available.
|
||||||
|
//
|
||||||
|
// If platforms is an empty list, all available native platforms will be
|
||||||
|
// scheduled.
|
||||||
|
//
|
||||||
|
// TODO: add (Priority,Weight *int) a la RFC 2782 to destinations that we know
|
||||||
|
// of, and factor those in when assigning builds to nodes in here.
|
||||||
|
func (f *Farm) Schedule(ctx context.Context, platforms []string) (Schedule, error) {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
infoGroup multierror.Group
|
||||||
|
infoMutex sync.Mutex
|
||||||
|
)
|
||||||
|
// If we weren't given a list of target platforms, generate one.
|
||||||
|
if len(platforms) == 0 {
|
||||||
|
platforms, err = f.NativePlatforms(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return Schedule{}, fmt.Errorf("reading list of available native platforms: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
platformBuilders := make(map[string]string)
|
||||||
|
native := make(map[string]string)
|
||||||
|
emulated := make(map[string]string)
|
||||||
|
var localPlatform string
|
||||||
|
// Make notes of which platforms we can build for natively, and which
|
||||||
|
// ones we can build for using emulation.
|
||||||
|
for name, engine := range f.builders {
|
||||||
|
name, engine := name, engine
|
||||||
|
infoGroup.Go(func() error {
|
||||||
|
inspect, err := engine.FarmNodeInspect(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
infoMutex.Lock()
|
||||||
|
defer infoMutex.Unlock()
|
||||||
|
for _, n := range inspect.NativePlatforms {
|
||||||
|
if _, assigned := native[n]; !assigned {
|
||||||
|
native[n] = name
|
||||||
|
}
|
||||||
|
if name == entities.LocalFarmImageBuilderName {
|
||||||
|
localPlatform = n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, e := range inspect.EmulatedPlatforms {
|
||||||
|
if _, assigned := emulated[e]; !assigned {
|
||||||
|
emulated[e] = name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
merr := infoGroup.Wait()
|
||||||
|
if merr != nil {
|
||||||
|
if err := merr.ErrorOrNil(); err != nil {
|
||||||
|
return Schedule{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Assign a build to the first node that could build it natively, and
|
||||||
|
// if there isn't one, the first one that can build it with the help of
|
||||||
|
// emulation, and if there aren't any, error out.
|
||||||
|
for _, platform := range platforms {
|
||||||
|
if builder, ok := native[platform]; ok {
|
||||||
|
platformBuilders[platform] = builder
|
||||||
|
} else if builder, ok := emulated[platform]; ok {
|
||||||
|
platformBuilders[platform] = builder
|
||||||
|
} else {
|
||||||
|
return Schedule{}, fmt.Errorf("no builder capable of building for platform %q available", platform)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If local is set, prioritize building on local
|
||||||
|
if localPlatform != "" {
|
||||||
|
platformBuilders[localPlatform] = entities.LocalFarmImageBuilderName
|
||||||
|
}
|
||||||
|
schedule := Schedule{
|
||||||
|
platformBuilders: platformBuilders,
|
||||||
|
}
|
||||||
|
return schedule, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build runs a build using the specified targetplatform:service map. If all
|
||||||
|
// builds succeed, it copies the resulting images from the remote hosts to the
|
||||||
|
// local service and builds a manifest list with the specified reference name.
|
||||||
|
func (f *Farm) Build(ctx context.Context, schedule Schedule, options entities.BuildOptions, reference string) error {
|
||||||
|
switch options.OutputFormat {
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown output format %q requested", options.OutputFormat)
|
||||||
|
case "", define.OCIv1ImageManifest:
|
||||||
|
options.OutputFormat = define.OCIv1ImageManifest
|
||||||
|
case define.Dockerv2ImageManifest:
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the list of jobs.
|
||||||
|
var jobs sync.Map
|
||||||
|
type job struct {
|
||||||
|
platform string
|
||||||
|
os string
|
||||||
|
arch string
|
||||||
|
variant string
|
||||||
|
builder entities.ImageEngine
|
||||||
|
}
|
||||||
|
for platform, builderName := range schedule.platformBuilders { // prepare to build
|
||||||
|
builder, ok := f.builders[builderName]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unknown builder %q", builderName)
|
||||||
|
}
|
||||||
|
var rawOS, rawArch, rawVariant string
|
||||||
|
p := strings.Split(platform, "/")
|
||||||
|
if len(p) > 0 && p[0] != "" {
|
||||||
|
rawOS = p[0]
|
||||||
|
}
|
||||||
|
if len(p) > 1 {
|
||||||
|
rawArch = p[1]
|
||||||
|
}
|
||||||
|
if len(p) > 2 {
|
||||||
|
rawVariant = p[2]
|
||||||
|
}
|
||||||
|
os, arch, variant := lplatform.Normalize(rawOS, rawArch, rawVariant)
|
||||||
|
jobs.Store(builderName, job{
|
||||||
|
platform: platform,
|
||||||
|
os: os,
|
||||||
|
arch: arch,
|
||||||
|
variant: variant,
|
||||||
|
builder: builder,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decide where the final result will be stored.
|
||||||
|
var (
|
||||||
|
manifestListBuilder listBuilder
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
listBuilderOptions := listBuilderOptions{
|
||||||
|
cleanup: options.Cleanup,
|
||||||
|
iidFile: options.IIDFile,
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(reference, "dir:") || f.localEngine == nil {
|
||||||
|
location := strings.TrimPrefix(reference, "dir:")
|
||||||
|
manifestListBuilder, err = newFileManifestListBuilder(location, listBuilderOptions)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("preparing to build list: %w", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
manifestListBuilder = newLocalManifestListBuilder(reference, f.localEngine, listBuilderOptions)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start builds in parallel and wait for them all to finish.
|
||||||
|
var (
|
||||||
|
buildResults sync.Map
|
||||||
|
buildGroup multierror.Group
|
||||||
|
)
|
||||||
|
type buildResult struct {
|
||||||
|
report entities.BuildReport
|
||||||
|
builder entities.ImageEngine
|
||||||
|
}
|
||||||
|
for platform, builder := range schedule.platformBuilders {
|
||||||
|
platform, builder := platform, builder
|
||||||
|
outReader, outWriter := io.Pipe()
|
||||||
|
errReader, errWriter := io.Pipe()
|
||||||
|
go func() {
|
||||||
|
defer outReader.Close()
|
||||||
|
reader := bufio.NewReader(outReader)
|
||||||
|
writer := options.Out
|
||||||
|
if writer == nil {
|
||||||
|
writer = os.Stdout
|
||||||
|
}
|
||||||
|
line, err := reader.ReadString('\n')
|
||||||
|
for err == nil {
|
||||||
|
line = strings.TrimSuffix(line, "\n")
|
||||||
|
fmt.Fprintf(writer, "[%s@%s] %s\n", platform, builder, line)
|
||||||
|
line, err = reader.ReadString('\n')
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
defer errReader.Close()
|
||||||
|
reader := bufio.NewReader(errReader)
|
||||||
|
writer := options.Err
|
||||||
|
if writer == nil {
|
||||||
|
writer = os.Stderr
|
||||||
|
}
|
||||||
|
line, err := reader.ReadString('\n')
|
||||||
|
for err == nil {
|
||||||
|
line = strings.TrimSuffix(line, "\n")
|
||||||
|
fmt.Fprintf(writer, "[%s@%s] %s\n", platform, builder, line)
|
||||||
|
line, err = reader.ReadString('\n')
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
buildGroup.Go(func() error {
|
||||||
|
var j job
|
||||||
|
defer outWriter.Close()
|
||||||
|
defer errWriter.Close()
|
||||||
|
c, ok := jobs.Load(builder)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unknown connection for %q (shouldn't happen)", builder)
|
||||||
|
}
|
||||||
|
if j, ok = c.(job); !ok {
|
||||||
|
return fmt.Errorf("unexpected connection type for %q (shouldn't happen)", builder)
|
||||||
|
}
|
||||||
|
buildOptions := options
|
||||||
|
buildOptions.Platforms = []struct{ OS, Arch, Variant string }{{j.os, j.arch, j.variant}}
|
||||||
|
buildOptions.Out = outWriter
|
||||||
|
buildOptions.Err = errWriter
|
||||||
|
fmt.Printf("Starting build for %v at %q\n", buildOptions.Platforms, builder)
|
||||||
|
buildReport, err := j.builder.Build(ctx, options.ContainerFiles, buildOptions)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("building for %q on %q: %w", j.platform, builder, err)
|
||||||
|
}
|
||||||
|
fmt.Printf("finished build for %v at %q: built %s\n", buildOptions.Platforms, builder, buildReport.ID)
|
||||||
|
buildResults.Store(platform, buildResult{
|
||||||
|
report: *buildReport,
|
||||||
|
builder: j.builder,
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
buildErrors := buildGroup.Wait()
|
||||||
|
if err := buildErrors.ErrorOrNil(); err != nil {
|
||||||
|
return fmt.Errorf("building: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assemble the final result.
|
||||||
|
perArchBuilds := make(map[entities.BuildReport]entities.ImageEngine)
|
||||||
|
buildResults.Range(func(k, v any) bool {
|
||||||
|
result, ok := v.(buildResult)
|
||||||
|
if !ok {
|
||||||
|
fmt.Fprintf(os.Stderr, "report %v not a build result?", v)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
perArchBuilds[result.report] = result.builder
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
location, err := manifestListBuilder.build(ctx, perArchBuilds)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fmt.Printf("Saved list to %q\n", location)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFarmDestinations(name string) (map[string]config.Destination, error) {
|
||||||
|
dest := make(map[string]config.Destination)
|
||||||
|
cfg, err := config.ReadCustomConfig()
|
||||||
|
if err != nil {
|
||||||
|
return dest, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no farm name is given, then grab all the service destinations available
|
||||||
|
if name == "" {
|
||||||
|
return cfg.Engine.ServiceDestinations, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Go through the connections in the farm and get their destination
|
||||||
|
for _, c := range cfg.Farms.List[name] {
|
||||||
|
dest[c] = cfg.Engine.ServiceDestinations[c]
|
||||||
|
}
|
||||||
|
|
||||||
|
return dest, nil
|
||||||
|
}
|
297
pkg/farm/list_builder.go
Normal file
297
pkg/farm/list_builder.go
Normal file
@ -0,0 +1,297 @@
|
|||||||
|
package farm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io/fs"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
lmanifests "github.com/containers/common/libimage/manifests"
|
||||||
|
"github.com/containers/common/pkg/supplemented"
|
||||||
|
cp "github.com/containers/image/v5/copy"
|
||||||
|
"github.com/containers/image/v5/manifest"
|
||||||
|
"github.com/containers/image/v5/signature"
|
||||||
|
"github.com/containers/image/v5/transports/alltransports"
|
||||||
|
"github.com/containers/image/v5/types"
|
||||||
|
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||||
|
"github.com/hashicorp/go-multierror"
|
||||||
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type listBuilder interface {
|
||||||
|
build(ctx context.Context, images map[entities.BuildReport]entities.ImageEngine) (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type listBuilderOptions struct {
|
||||||
|
cleanup bool
|
||||||
|
iidFile string
|
||||||
|
}
|
||||||
|
|
||||||
|
type listLocal struct {
|
||||||
|
listName string
|
||||||
|
localEngine entities.ImageEngine
|
||||||
|
options listBuilderOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// newLocalManifestListBuilder returns a manifest list builder which saves a
|
||||||
|
// manifest list and images to local storage.
|
||||||
|
func newLocalManifestListBuilder(listName string, localEngine entities.ImageEngine, options listBuilderOptions) listBuilder {
|
||||||
|
return &listLocal{
|
||||||
|
listName: listName,
|
||||||
|
options: options,
|
||||||
|
localEngine: localEngine,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build retrieves images from the build reports and assembles them into a
|
||||||
|
// manifest list in local container storage.
|
||||||
|
func (l *listLocal) build(ctx context.Context, images map[entities.BuildReport]entities.ImageEngine) (string, error) {
|
||||||
|
manifest := l.listName
|
||||||
|
exists, err := l.localEngine.ManifestExists(ctx, l.listName)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
// Create list if it doesn't exist
|
||||||
|
if !exists.Value {
|
||||||
|
manifest, err = l.localEngine.ManifestCreate(ctx, l.listName, []string{}, entities.ManifestCreateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("creating manifest list %q: %w", l.listName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pull the images into local storage
|
||||||
|
var (
|
||||||
|
pullGroup multierror.Group
|
||||||
|
refsMutex sync.Mutex
|
||||||
|
)
|
||||||
|
refs := []string{}
|
||||||
|
for image, engine := range images {
|
||||||
|
image, engine := image, engine
|
||||||
|
pullOptions := entities.PullToLocalOptions{
|
||||||
|
ImageID: image.ID,
|
||||||
|
SaveFormat: image.SaveFormat,
|
||||||
|
Destination: l.localEngine,
|
||||||
|
}
|
||||||
|
pullGroup.Go(func() error {
|
||||||
|
logrus.Infof("copying image %s", image.ID)
|
||||||
|
defer logrus.Infof("copied image %s", image.ID)
|
||||||
|
ref, err := engine.PullToLocal(ctx, pullOptions)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("pulling image %q to local storage: %w", image, err)
|
||||||
|
}
|
||||||
|
refsMutex.Lock()
|
||||||
|
defer refsMutex.Unlock()
|
||||||
|
refs = append(refs, ref)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
pullErrors := pullGroup.Wait()
|
||||||
|
err = pullErrors.ErrorOrNil()
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("building: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if l.options.cleanup {
|
||||||
|
var rmGroup multierror.Group
|
||||||
|
for image, engine := range images {
|
||||||
|
if engine.FarmNodeName(ctx) == entities.LocalFarmImageBuilderName {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
image, engine := image, engine
|
||||||
|
rmGroup.Go(func() error {
|
||||||
|
_, err := engine.Remove(ctx, []string{image.ID}, entities.ImageRemoveOptions{})
|
||||||
|
if len(err) > 0 {
|
||||||
|
return err[0]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
rmErrors := rmGroup.Wait()
|
||||||
|
if rmErrors != nil {
|
||||||
|
if err = rmErrors.ErrorOrNil(); err != nil {
|
||||||
|
return "", fmt.Errorf("removing intermediate images: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear the list in the event it already existed
|
||||||
|
if exists.Value {
|
||||||
|
_, err = l.localEngine.ManifestListClear(ctx, manifest)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error clearing list %q", manifest)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the images to the list
|
||||||
|
listID, err := l.localEngine.ManifestAdd(ctx, manifest, refs, entities.ManifestAddOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("adding images %q to list: %w", refs, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the manifest list's ID file if we're expected to
|
||||||
|
if l.options.iidFile != "" {
|
||||||
|
if err := os.WriteFile(l.options.iidFile, []byte("sha256:"+listID), 0644); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return l.listName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type listFiles struct {
|
||||||
|
directory string
|
||||||
|
options listBuilderOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// newFileManifestListBuilder returns a manifest list builder which saves a manifest
|
||||||
|
// list and images to a specified directory in the non-standard dir: format.
|
||||||
|
func newFileManifestListBuilder(directory string, options listBuilderOptions) (listBuilder, error) {
|
||||||
|
if options.iidFile != "" {
|
||||||
|
return nil, fmt.Errorf("saving to dir: format doesn't use image IDs, --iidfile not supported")
|
||||||
|
}
|
||||||
|
return &listFiles{directory: directory, options: options}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build retrieves images from the build reports and assembles them into a
|
||||||
|
// manifest list in the configured directory.
|
||||||
|
func (m *listFiles) build(ctx context.Context, images map[entities.BuildReport]entities.ImageEngine) (string, error) {
|
||||||
|
listFormat := v1.MediaTypeImageIndex
|
||||||
|
imageFormat := v1.MediaTypeImageManifest
|
||||||
|
|
||||||
|
tempDir, err := os.MkdirTemp("", "")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
|
name := fmt.Sprintf("dir:%s", tempDir)
|
||||||
|
tempRef, err := alltransports.ParseImageName(name)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("parsing temporary image ref %q: %w", name, err)
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(m.directory, 0o755); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
output, err := alltransports.ParseImageName("dir:" + m.directory)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("parsing output directory ref %q: %w", "dir:"+m.directory, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pull the images into the temporary directory
|
||||||
|
var (
|
||||||
|
pullGroup multierror.Group
|
||||||
|
pullErrors *multierror.Error
|
||||||
|
refsMutex sync.Mutex
|
||||||
|
)
|
||||||
|
refs := make(map[entities.BuildReport]types.ImageReference)
|
||||||
|
for image, engine := range images {
|
||||||
|
image, engine := image, engine
|
||||||
|
tempFile, err := os.CreateTemp(tempDir, "archive-*.tar")
|
||||||
|
if err != nil {
|
||||||
|
defer func() {
|
||||||
|
pullErrors = pullGroup.Wait()
|
||||||
|
}()
|
||||||
|
perr := pullErrors.ErrorOrNil()
|
||||||
|
if perr != nil {
|
||||||
|
return "", perr
|
||||||
|
}
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer tempFile.Close()
|
||||||
|
|
||||||
|
pullGroup.Go(func() error {
|
||||||
|
logrus.Infof("copying image %s", image.ID)
|
||||||
|
defer logrus.Infof("copied image %s", image.ID)
|
||||||
|
pullOptions := entities.PullToFileOptions{
|
||||||
|
ImageID: image.ID,
|
||||||
|
SaveFormat: image.SaveFormat,
|
||||||
|
SaveFile: tempFile.Name(),
|
||||||
|
}
|
||||||
|
if image.SaveFormat == manifest.DockerV2Schema2MediaType {
|
||||||
|
listFormat = manifest.DockerV2ListMediaType
|
||||||
|
imageFormat = manifest.DockerV2Schema2MediaType
|
||||||
|
}
|
||||||
|
reference, err := engine.PullToFile(ctx, pullOptions)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("pulling image %q to temporary directory: %w", image, err)
|
||||||
|
}
|
||||||
|
ref, err := alltransports.ParseImageName(reference)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("pulling image %q to temporary directory: %w", image, err)
|
||||||
|
}
|
||||||
|
refsMutex.Lock()
|
||||||
|
defer refsMutex.Unlock()
|
||||||
|
refs[image] = ref
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
pullErrors = pullGroup.Wait()
|
||||||
|
err = pullErrors.ErrorOrNil()
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("building: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.options.cleanup {
|
||||||
|
var rmGroup multierror.Group
|
||||||
|
for image, engine := range images {
|
||||||
|
image, engine := image, engine
|
||||||
|
rmGroup.Go(func() error {
|
||||||
|
_, err := engine.Remove(ctx, []string{image.ID}, entities.ImageRemoveOptions{})
|
||||||
|
if len(err) > 0 {
|
||||||
|
return err[0]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
rmErrors := rmGroup.Wait()
|
||||||
|
if rmErrors != nil {
|
||||||
|
if err = rmErrors.ErrorOrNil(); err != nil {
|
||||||
|
return "", fmt.Errorf("removing intermediate images: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
supplemental := []types.ImageReference{}
|
||||||
|
var sys types.SystemContext
|
||||||
|
// Create a manifest list
|
||||||
|
list := lmanifests.Create()
|
||||||
|
// Add the images to the list
|
||||||
|
for image, ref := range refs {
|
||||||
|
if _, err = list.Add(ctx, &sys, ref, true); err != nil {
|
||||||
|
return "", fmt.Errorf("adding image %q to list: %w", image.ID, err)
|
||||||
|
}
|
||||||
|
supplemental = append(supplemental, ref)
|
||||||
|
}
|
||||||
|
// Save the list to the temporary directory to be the main manifest
|
||||||
|
listBytes, err := list.Serialize(listFormat)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("serializing manifest list: %w", err)
|
||||||
|
}
|
||||||
|
if err = os.WriteFile(filepath.Join(tempDir, "manifest.json"), listBytes, fs.FileMode(0o600)); err != nil {
|
||||||
|
return "", fmt.Errorf("writing temporary manifest list: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now copy everything to the final dir: location
|
||||||
|
defaultPolicy, err := signature.DefaultPolicy(&sys)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
policyContext, err := signature.NewPolicyContext(defaultPolicy)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
input := supplemented.Reference(tempRef, supplemental, cp.CopyAllImages, nil)
|
||||||
|
copyOptions := cp.Options{
|
||||||
|
ForceManifestMIMEType: imageFormat,
|
||||||
|
ImageListSelection: cp.CopyAllImages,
|
||||||
|
}
|
||||||
|
_, err = cp.Image(ctx, policyContext, output, input, ©Options)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("copying images to dir:%q: %w", m.directory, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return "dir:" + m.directory, nil
|
||||||
|
}
|
77
test/farm/001-farm.bats
Normal file
77
test/farm/001-farm.bats
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
#!/usr/bin/env bats
|
||||||
|
#
|
||||||
|
# Tests of podman farm commands
|
||||||
|
#
|
||||||
|
|
||||||
|
load helpers.bash
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# BEGIN tests
|
||||||
|
|
||||||
|
fname="test-farm"
|
||||||
|
containerfile="test/farm/Containerfile"
|
||||||
|
|
||||||
|
@test "farm - check farm has been created" {
|
||||||
|
run_podman farm ls
|
||||||
|
assert "$output" =~ $fname
|
||||||
|
assert "$output" =~ "test-node"
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "farm - build on local only" {
|
||||||
|
iname="test-image-1"
|
||||||
|
empty_farm="empty-farm"
|
||||||
|
# create an empty farm
|
||||||
|
run_podman farm create $empty_farm
|
||||||
|
run_podman farm --farm $empty_farm build -f $containerfile -t $iname .
|
||||||
|
assert "$output" =~ "Local builder ready"
|
||||||
|
# get the system architecture
|
||||||
|
run_podman info --format '{{.Host.Arch}}'
|
||||||
|
ARCH=$output
|
||||||
|
# inspect manifest list built and saved in local containers-storage
|
||||||
|
run_podman manifest inspect $iname
|
||||||
|
assert "$output" =~ $ARCH
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "farm - build on farm node only with --cleanup" {
|
||||||
|
iname="test-image-2"
|
||||||
|
run_podman farm build -f $containerfile --cleanup --local=false -t $iname .
|
||||||
|
assert "$output" =~ "Farm \"$fname\" ready"
|
||||||
|
# get the system architecture
|
||||||
|
run_podman info --format '{{.Host.Arch}}'
|
||||||
|
ARCH=$output
|
||||||
|
# inspect manifest list built and saved in dir
|
||||||
|
manifest=$(cat $iname/manifest.json)
|
||||||
|
assert "$manifest" =~ $ARCH
|
||||||
|
# see if we can ssh into node to check the image was cleaned up
|
||||||
|
nodeimg=$(ssh $ROOTLESS_USER@localhost podman images --filter dangling=true --noheading 2>&1)
|
||||||
|
assert "$nodeimg" = ""
|
||||||
|
# check that no image was built locally
|
||||||
|
run_podman images --filter dangling=true --noheading
|
||||||
|
assert "$output" = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "farm - build on farm node and local" {
|
||||||
|
iname="test-image-3"
|
||||||
|
run_podman farm build -f $containerfile -t $iname .
|
||||||
|
assert "$output" =~ "Farm \"$fname\" ready"
|
||||||
|
# get the system architecture
|
||||||
|
run_podman info --format '{{.Host.Arch}}'
|
||||||
|
ARCH=$output
|
||||||
|
# inspect manifest list built and saved in dir
|
||||||
|
run_podman manifest inspect $iname
|
||||||
|
assert "$output" =~ $ARCH
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test out podman-remote
|
||||||
|
|
||||||
|
@test "farm - build on farm node only (podman-remote)" {
|
||||||
|
iname="test-image-4"
|
||||||
|
run_podman --remote farm build -f $containerfile -t $iname .
|
||||||
|
assert "$output" =~ "Farm \"$fname\" ready"
|
||||||
|
# get the system architecture
|
||||||
|
run_podman --remote info --format '{{.Host.Arch}}'
|
||||||
|
ARCH=$output
|
||||||
|
# inspect manifest list built and saved in dir
|
||||||
|
manifest=$(cat $iname/manifest.json)
|
||||||
|
assert "$manifest" =~ $ARCH
|
||||||
|
}
|
3
test/farm/Containerfile
Normal file
3
test/farm/Containerfile
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
FROM alpine
|
||||||
|
RUN arch | tee /arch.txt
|
||||||
|
RUN date | tee /built.txt
|
11
test/farm/helpers.bash
Normal file
11
test/farm/helpers.bash
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# -*- bash -*-
|
||||||
|
|
||||||
|
load ../system/helpers.bash
|
||||||
|
|
||||||
|
function setup(){
|
||||||
|
basic_setup
|
||||||
|
}
|
||||||
|
|
||||||
|
function teardown(){
|
||||||
|
basic_teardown
|
||||||
|
}
|
14
test/farm/setup_suite.bash
Normal file
14
test/farm/setup_suite.bash
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
# -*- bash -*-
|
||||||
|
|
||||||
|
load helpers.bash
|
||||||
|
|
||||||
|
function setup_suite(){
|
||||||
|
# only set up the podman farm before the first test
|
||||||
|
run_podman system connection add --identity /home/$ROOTLESS_USER/.ssh/id_rsa test-node $ROOTLESS_USER@localhost
|
||||||
|
run_podman farm create test-farm test-node
|
||||||
|
}
|
||||||
|
|
||||||
|
function teardown(){
|
||||||
|
# clear out the farms after the last farm test
|
||||||
|
run podman farm rm --all
|
||||||
|
}
|
Reference in New Issue
Block a user