Add podman farm build command

Add podman farm build command that sends out builds to
nodes defined in the farm, builds the images on the farm
nodes, and pulls them back to the local machine to create
a manifest list.

Signed-off-by: Urvashi Mohnani <umohnani@redhat.com>
This commit is contained in:
Urvashi Mohnani
2023-09-19 09:56:25 -04:00
parent 2e88d580a9
commit dd8f57a3b4
28 changed files with 1308 additions and 24 deletions

View File

@ -46,7 +46,13 @@ type BuildFlagsWrapper struct {
Cleanup bool Cleanup bool
} }
func DefineBuildFlags(cmd *cobra.Command, buildOpts *BuildFlagsWrapper) { // FarmBuildHiddenFlags are the flags hidden from the farm build command because they are either not
// supported or don't make sense in the farm build use case
var FarmBuildHiddenFlags = []string{"arch", "all-platforms", "compress", "cw", "disable-content-trust",
"logsplit", "manifest", "os", "output", "platform", "sign-by", "signature-policy", "stdin", "tls-verify",
"variant"}
func DefineBuildFlags(cmd *cobra.Command, buildOpts *BuildFlagsWrapper, isFarmBuild bool) {
flags := cmd.Flags() flags := cmd.Flags()
// buildx build --load ignored, but added for compliance // buildx build --load ignored, but added for compliance
@ -116,6 +122,11 @@ func DefineBuildFlags(cmd *cobra.Command, buildOpts *BuildFlagsWrapper) {
_ = flags.MarkHidden("logsplit") _ = flags.MarkHidden("logsplit")
_ = flags.MarkHidden("cw") _ = flags.MarkHidden("cw")
} }
if isFarmBuild {
for _, f := range FarmBuildHiddenFlags {
_ = flags.MarkHidden(f)
}
}
} }
func ParseBuildOpts(cmd *cobra.Command, args []string, buildOpts *BuildFlagsWrapper) (*entities.BuildOptions, error) { func ParseBuildOpts(cmd *cobra.Command, args []string, buildOpts *BuildFlagsWrapper) (*entities.BuildOptions, error) {

135
cmd/podman/farm/build.go Normal file
View File

@ -0,0 +1,135 @@
package farm
import (
"errors"
"fmt"
"os"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/farm"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
type buildOptions struct {
buildOptions common.BuildFlagsWrapper
local bool
platforms []string
}
var (
farmBuildDescription = `Build images on farm nodes, then bundle them into a manifest list`
buildCommand = &cobra.Command{
Use: "build [options] [CONTEXT]",
Short: "Build a container image for multiple architectures",
Long: farmBuildDescription,
RunE: build,
Example: "podman farm build [flags] buildContextDirectory",
Args: cobra.ExactArgs(1),
}
buildOpts = buildOptions{
buildOptions: common.BuildFlagsWrapper{},
}
)
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
Command: buildCommand,
Parent: farmCmd,
})
flags := buildCommand.Flags()
flags.SetNormalizeFunc(utils.AliasFlags)
localFlagName := "local"
// Default for local is true and hide this flag for the remote use case
if !registry.IsRemote() {
flags.BoolVarP(&buildOpts.local, localFlagName, "l", true, "Build image on local machine as well as on farm nodes")
}
cleanupFlag := "cleanup"
flags.BoolVar(&buildOpts.buildOptions.Cleanup, cleanupFlag, false, "Remove built images from farm nodes on success")
platformsFlag := "platforms"
buildCommand.PersistentFlags().StringSliceVar(&buildOpts.platforms, platformsFlag, nil, "Build only on farm nodes that match the given platforms")
common.DefineBuildFlags(buildCommand, &buildOpts.buildOptions, true)
}
func build(cmd *cobra.Command, args []string) error {
// Return error if any of the hidden flags are used
for _, f := range common.FarmBuildHiddenFlags {
if cmd.Flags().Changed(f) {
return fmt.Errorf("%q is an unsupported flag for podman farm build", f)
}
}
if !cmd.Flags().Changed("tag") {
return errors.New("cannot create manifest list without a name, value for --tag is required")
}
opts, err := common.ParseBuildOpts(cmd, args, &buildOpts.buildOptions)
if err != nil {
return err
}
// Close the logFile if one was created based on the flag
if opts.LogFileToClose != nil {
defer opts.LogFileToClose.Close()
}
if opts.TmpDirToClose != "" {
// We had to download the context directory.
// Delete it later.
defer func() {
if err = os.RemoveAll(opts.TmpDirToClose); err != nil {
logrus.Errorf("Removing temporary directory %q: %v", opts.TmpDirToClose, err)
}
}()
}
opts.Cleanup = buildOpts.buildOptions.Cleanup
iidFile, err := cmd.Flags().GetString("iidfile")
if err != nil {
return err
}
opts.IIDFile = iidFile
cfg, err := config.ReadCustomConfig()
if err != nil {
return err
}
defaultFarm := cfg.Farms.Default
if farmCmd.Flags().Changed("farm") {
f, err := farmCmd.Flags().GetString("farm")
if err != nil {
return err
}
defaultFarm = f
}
var localEngine entities.ImageEngine
if buildOpts.local {
localEngine = registry.ImageEngine()
}
ctx := registry.Context()
farm, err := farm.NewFarm(ctx, defaultFarm, localEngine)
if err != nil {
return fmt.Errorf("initializing: %w", err)
}
schedule, err := farm.Schedule(ctx, buildOpts.platforms)
if err != nil {
return fmt.Errorf("scheduling builds: %w", err)
}
logrus.Infof("schedule: %v", schedule)
manifestName := opts.Output
// Set Output to "" so that the images built on the farm nodes have no name
opts.Output = ""
if err = farm.Build(ctx, schedule, *opts, manifestName); err != nil {
return fmt.Errorf("build: %w", err)
}
logrus.Infof("build: ok")
return nil
}

View File

@ -19,8 +19,7 @@ var (
var ( var (
// Temporary struct to hold cli values. // Temporary struct to hold cli values.
farmOpts = struct { farmOpts = struct {
Farm string Farm string
Local bool
}{} }{}
) )
@ -40,10 +39,4 @@ func init() {
defaultFarm = podmanConfig.ContainersConfDefaultsRO.Farms.Default defaultFarm = podmanConfig.ContainersConfDefaultsRO.Farms.Default
} }
flags.StringVarP(&farmOpts.Farm, farmFlagName, "f", defaultFarm, "Farm to use for builds") flags.StringVarP(&farmOpts.Farm, farmFlagName, "f", defaultFarm, "Farm to use for builds")
localFlagName := "local"
// Default for local is true and hide this flag for the remote use case
if !registry.IsRemote() {
flags.BoolVarP(&farmOpts.Local, localFlagName, "l", true, "Build image on local machine including on farm nodes")
}
} }

View File

@ -74,7 +74,7 @@ func init() {
} }
func buildFlags(cmd *cobra.Command) { func buildFlags(cmd *cobra.Command) {
common.DefineBuildFlags(cmd, &buildOpts) common.DefineBuildFlags(cmd, &buildOpts, false)
} }
// build executes the build command. // build executes the build command.

View File

@ -62,6 +62,7 @@ type HostInfo struct {
SwapFree int64 `json:"swapFree"` SwapFree int64 `json:"swapFree"`
SwapTotal int64 `json:"swapTotal"` SwapTotal int64 `json:"swapTotal"`
Uptime string `json:"uptime"` Uptime string `json:"uptime"`
Variant string `json:"variant"`
Linkmode string `json:"linkmode"` Linkmode string `json:"linkmode"`
} }

View File

@ -16,6 +16,7 @@ import (
"time" "time"
"github.com/containers/buildah" "github.com/containers/buildah"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/pkg/util" "github.com/containers/buildah/pkg/util"
"github.com/containers/common/pkg/version" "github.com/containers/common/pkg/version"
"github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/pkg/sysregistriesv2"
@ -130,6 +131,11 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) {
SwapFree: mi.SwapFree, SwapFree: mi.SwapFree,
SwapTotal: mi.SwapTotal, SwapTotal: mi.SwapTotal,
} }
platform := parse.DefaultPlatform()
pArr := strings.Split(platform, "/")
if len(pArr) == 3 {
info.Variant = pArr[2]
}
if err := r.setPlatformHostInfo(&info); err != nil { if err := r.setPlatformHostInfo(&info); err != nil {
return nil, err return nil, err
} }

View File

@ -19,6 +19,7 @@ import (
"github.com/containers/buildah/define" "github.com/containers/buildah/define"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
ldefine "github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/auth" "github.com/containers/podman/v4/pkg/auth"
"github.com/containers/podman/v4/pkg/bindings" "github.com/containers/podman/v4/pkg/bindings"
"github.com/containers/podman/v4/pkg/domain/entities" "github.com/containers/podman/v4/pkg/domain/entities"
@ -500,6 +501,11 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
} }
} }
saveFormat := ldefine.OCIArchive
if options.OutputFormat == define.Dockerv2ImageManifest {
saveFormat = ldefine.V2s2Archive
}
// build secrets are usually absolute host path or relative to context dir on host // build secrets are usually absolute host path or relative to context dir on host
// in any case move secret to current context and ship the tar. // in any case move secret to current context and ship the tar.
if secrets := options.CommonBuildOpts.Secrets; len(secrets) > 0 { if secrets := options.CommonBuildOpts.Secrets; len(secrets) > 0 {
@ -602,7 +608,7 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
// even when the server quit but it seems desirable to // even when the server quit but it seems desirable to
// distinguish a proper build from a transient EOF. // distinguish a proper build from a transient EOF.
case <-response.Request.Context().Done(): case <-response.Request.Context().Done():
return &entities.BuildReport{ID: id}, nil return &entities.BuildReport{ID: id, SaveFormat: saveFormat}, nil
default: default:
// non-blocking select // non-blocking select
} }
@ -616,7 +622,7 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
if errors.Is(err, io.EOF) && id != "" { if errors.Is(err, io.EOF) && id != "" {
break break
} }
return &entities.BuildReport{ID: id}, fmt.Errorf("decoding stream: %w", err) return &entities.BuildReport{ID: id, SaveFormat: saveFormat}, fmt.Errorf("decoding stream: %w", err)
} }
switch { switch {
@ -629,12 +635,12 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
case s.Error != "": case s.Error != "":
// If there's an error, return directly. The stream // If there's an error, return directly. The stream
// will be closed on return. // will be closed on return.
return &entities.BuildReport{ID: id}, errors.New(s.Error) return &entities.BuildReport{ID: id, SaveFormat: saveFormat}, errors.New(s.Error)
default: default:
return &entities.BuildReport{ID: id}, errors.New("failed to parse build results stream, unexpected input") return &entities.BuildReport{ID: id, SaveFormat: saveFormat}, errors.New("failed to parse build results stream, unexpected input")
} }
} }
return &entities.BuildReport{ID: id}, nil return &entities.BuildReport{ID: id, SaveFormat: saveFormat}, nil
} }
func nTar(excludes []string, sources ...string) (io.ReadCloser, error) { func nTar(excludes []string, sources ...string) (io.ReadCloser, error) {

View File

@ -50,6 +50,7 @@ type PodmanConfig struct {
Syslog bool // write logging information to syslog as well as the console Syslog bool // write logging information to syslog as well as the console
Trace bool // Hidden: Trace execution Trace bool // Hidden: Trace execution
URI string // URI to RESTful API Service URI string // URI to RESTful API Service
FarmNodeName string // Name of farm node
Runroot string Runroot string
ImageStore string ImageStore string

View File

@ -40,5 +40,11 @@ type ImageEngine interface { //nolint:interfacebloat
ManifestRemoveDigest(ctx context.Context, names, image string) (string, error) ManifestRemoveDigest(ctx context.Context, names, image string) (string, error)
ManifestRm(ctx context.Context, names []string) (*ImageRemoveReport, []error) ManifestRm(ctx context.Context, names []string) (*ImageRemoveReport, []error)
ManifestPush(ctx context.Context, name, destination string, imagePushOpts ImagePushOptions) (string, error) ManifestPush(ctx context.Context, name, destination string, imagePushOpts ImagePushOptions) (string, error)
ManifestListClear(ctx context.Context, name string) (string, error)
Sign(ctx context.Context, names []string, options SignOptions) (*SignReport, error) Sign(ctx context.Context, names []string, options SignOptions) (*SignReport, error)
FarmNodeName(ctx context.Context) string
FarmNodeDriver(ctx context.Context) string
FarmNodeInspect(ctx context.Context) (*FarmInspectReport, error)
PullToFile(ctx context.Context, options PullToFileOptions) (string, error)
PullToLocal(ctx context.Context, options PullToLocalOptions) (string, error)
} }

View File

@ -478,3 +478,33 @@ type ImageUnmountReport struct {
Err error Err error
Id string //nolint:revive,stylecheck Id string //nolint:revive,stylecheck
} }
const (
LocalFarmImageBuilderName = "(local)"
LocalFarmImageBuilderDriver = "local"
)
// FarmInspectReport describes the response from farm inspect
type FarmInspectReport struct {
NativePlatforms []string
EmulatedPlatforms []string
OS string
Arch string
Variant string
}
// PullToFileOptions are the options for pulling the images from farm
// nodes into a dir
type PullToFileOptions struct {
ImageID string
SaveFormat string
SaveFile string
}
// PullToLocalOptions are the options for pulling the images from farm
// nodes into containers-storage
type PullToLocalOptions struct {
ImageID string
SaveFormat string
Destination ImageEngine
}

View File

@ -112,6 +112,7 @@ type ContainerCreateResponse struct {
type BuildOptions struct { type BuildOptions struct {
buildahDefine.BuildOptions buildahDefine.BuildOptions
ContainerFiles []string ContainerFiles []string
FarmBuildOptions
// Files that need to be closed after the build // Files that need to be closed after the build
// so need to pass this to the main build functions // so need to pass this to the main build functions
LogFileToClose *os.File LogFileToClose *os.File
@ -122,6 +123,14 @@ type BuildOptions struct {
type BuildReport struct { type BuildReport struct {
// ID of the image. // ID of the image.
ID string ID string
// Format to save the image in
SaveFormat string
}
// FarmBuildOptions describes the options for building container images on farm nodes
type FarmBuildOptions struct {
// Cleanup removes built images from farm nodes on success
Cleanup bool
} }
type IDOrNameResponse struct { type IDOrNameResponse struct {

View File

@ -0,0 +1,120 @@
//go:build !remote
// +build !remote
package abi
import (
"context"
"fmt"
"os"
"strings"
"github.com/containers/buildah/pkg/parse"
lplatform "github.com/containers/common/libimage/platform"
istorage "github.com/containers/image/v5/storage"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/emulation"
)
// FarmNodeName returns the local engine's name.
func (ir *ImageEngine) FarmNodeName(ctx context.Context) string {
return entities.LocalFarmImageBuilderName
}
// FarmNodeDriver returns a description of the local image builder driver
func (ir *ImageEngine) FarmNodeDriver(ctx context.Context) string {
return entities.LocalFarmImageBuilderDriver
}
func (ir *ImageEngine) fetchInfo(_ context.Context) (os, arch, variant string, nativePlatforms []string, emulatedPlatforms []string, err error) {
nativePlatform := parse.DefaultPlatform()
platform := strings.SplitN(nativePlatform, "/", 3)
switch len(platform) {
case 0, 1:
return "", "", "", nil, nil, fmt.Errorf("unparseable default platform %q", nativePlatform)
case 2:
os, arch = platform[0], platform[1]
case 3:
os, arch, variant = platform[0], platform[1], platform[2]
}
os, arch, variant = lplatform.Normalize(os, arch, variant)
nativePlatform = os + "/" + arch
if variant != "" {
nativePlatform += ("/" + variant)
}
emulatedPlatforms = emulation.Registered()
return os, arch, variant, append([]string{}, nativePlatform), emulatedPlatforms, nil
}
// FarmNodeInspect returns information about the remote engines in the farm
func (ir *ImageEngine) FarmNodeInspect(ctx context.Context) (*entities.FarmInspectReport, error) {
ir.platforms.Do(func() {
ir.os, ir.arch, ir.variant, ir.nativePlatforms, ir.emulatedPlatforms, ir.platformsErr = ir.fetchInfo(ctx)
})
return &entities.FarmInspectReport{NativePlatforms: ir.nativePlatforms,
EmulatedPlatforms: ir.emulatedPlatforms,
OS: ir.os,
Arch: ir.arch,
Variant: ir.variant}, ir.platformsErr
}
// PullToFile pulls the image from the remote engine and saves it to a file,
// returning a string-format reference which can be parsed by containers/image.
func (ir *ImageEngine) PullToFile(ctx context.Context, options entities.PullToFileOptions) (reference string, err error) {
saveOptions := entities.ImageSaveOptions{
Format: options.SaveFormat,
Output: options.SaveFile,
}
if err := ir.Save(ctx, options.ImageID, nil, saveOptions); err != nil {
return "", fmt.Errorf("saving image %q: %w", options.ImageID, err)
}
return options.SaveFormat + ":" + options.SaveFile, nil
}
// PullToFile pulls the image from the remote engine and saves it to the local
// engine passed in via options, returning a string-format reference which can
// be parsed by containers/image.
func (ir *ImageEngine) PullToLocal(ctx context.Context, options entities.PullToLocalOptions) (reference string, err error) {
destination := options.Destination
if destination == nil {
return "", fmt.Errorf("destination not given, cannot pull image %q", options.ImageID)
}
// Check if the image is already present at destination
var br *entities.BoolReport
br, err = destination.Exists(ctx, options.ImageID)
if err != nil {
return "", err
}
if br.Value {
return istorage.Transport.Name() + ":" + options.ImageID, nil
}
tempFile, err := os.CreateTemp("", "")
if err != nil {
return "", err
}
defer os.Remove(tempFile.Name())
defer tempFile.Close()
saveOptions := entities.ImageSaveOptions{
Format: options.SaveFormat,
Output: tempFile.Name(),
}
// Save image built on builder in a temp file
if err := ir.Save(ctx, options.ImageID, nil, saveOptions); err != nil {
return "", fmt.Errorf("saving image %q: %w", options.ImageID, err)
}
// Load the image saved in tempFile into the local engine
loadOptions := entities.ImageLoadOptions{
Input: tempFile.Name(),
}
_, err = destination.Load(ctx, loadOptions)
if err != nil {
return "", err
}
return istorage.Transport.Name() + ":" + options.ImageID, nil
}

View File

@ -15,6 +15,7 @@ import (
"strings" "strings"
"syscall" "syscall"
bdefine "github.com/containers/buildah/define"
"github.com/containers/common/libimage" "github.com/containers/common/libimage"
"github.com/containers/common/libimage/filter" "github.com/containers/common/libimage/filter"
"github.com/containers/common/pkg/config" "github.com/containers/common/pkg/config"
@ -513,7 +514,11 @@ func (ir *ImageEngine) Build(ctx context.Context, containerFiles []string, opts
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &entities.BuildReport{ID: id}, nil saveFormat := define.OCIArchive
if opts.OutputFormat == bdefine.Dockerv2ImageManifest {
saveFormat = define.V2s2Archive
}
return &entities.BuildReport{ID: id, SaveFormat: saveFormat}, nil
} }
func (ir *ImageEngine) Tree(ctx context.Context, nameOrID string, opts entities.ImageTreeOptions) (*entities.ImageTreeReport, error) { func (ir *ImageEngine) Tree(ctx context.Context, nameOrID string, opts entities.ImageTreeOptions) (*entities.ImageTreeReport, error) {

View File

@ -392,3 +392,24 @@ func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination strin
return manDigest.String(), err return manDigest.String(), err
} }
// ManifestListClear clears out all instances from the manifest list
func (ir *ImageEngine) ManifestListClear(ctx context.Context, name string) (string, error) {
manifestList, err := ir.Libpod.LibimageRuntime().LookupManifestList(name)
if err != nil {
return "", err
}
listContents, err := manifestList.Inspect()
if err != nil {
return "", err
}
for _, instance := range listContents.Manifests {
if err := manifestList.RemoveInstance(instance.Digest); err != nil {
return "", err
}
}
return manifestList.ID(), nil
}

View File

@ -9,6 +9,7 @@ import (
// Image-related runtime linked against libpod library // Image-related runtime linked against libpod library
type ImageEngine struct { type ImageEngine struct {
Libpod *libpod.Runtime Libpod *libpod.Runtime
FarmNode
} }
// Container-related runtime linked against libpod library // Container-related runtime linked against libpod library
@ -21,4 +22,14 @@ type SystemEngine struct {
Libpod *libpod.Runtime Libpod *libpod.Runtime
} }
type FarmNode struct {
platforms sync.Once
platformsErr error
os string
arch string
variant string
nativePlatforms []string
emulatedPlatforms []string
}
var shutdownSync sync.Once var shutdownSync sync.Once

View File

@ -39,7 +39,7 @@ func NewImageEngine(facts *entities.PodmanConfig) (entities.ImageEngine, error)
if err != nil { if err != nil {
return nil, fmt.Errorf("%w: %s", err, facts.URI) return nil, fmt.Errorf("%w: %s", err, facts.URI)
} }
return &tunnel.ImageEngine{ClientCtx: ctx}, nil return &tunnel.ImageEngine{ClientCtx: ctx, FarmNode: tunnel.FarmNode{NodeName: facts.FarmNodeName}}, nil
} }
return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode) return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
} }

View File

@ -18,11 +18,12 @@ var (
connection *context.Context connection *context.Context
) )
func newConnection(uri string, identity string, machine bool) (context.Context, error) { func newConnection(uri string, identity, farmNodeName string, machine bool) (context.Context, error) {
connectionMutex.Lock() connectionMutex.Lock()
defer connectionMutex.Unlock() defer connectionMutex.Unlock()
if connection == nil { // if farmNodeName given, then create a connection with the node so that we can send builds there
if connection == nil || farmNodeName != "" {
ctx, err := bindings.NewConnectionWithIdentity(context.Background(), uri, identity, machine) ctx, err := bindings.NewConnectionWithIdentity(context.Background(), uri, identity, machine)
if err != nil { if err != nil {
return ctx, err return ctx, err
@ -37,7 +38,7 @@ func NewContainerEngine(facts *entities.PodmanConfig) (entities.ContainerEngine,
case entities.ABIMode: case entities.ABIMode:
return nil, fmt.Errorf("direct runtime not supported") return nil, fmt.Errorf("direct runtime not supported")
case entities.TunnelMode: case entities.TunnelMode:
ctx, err := newConnection(facts.URI, facts.Identity, facts.MachineMode) ctx, err := newConnection(facts.URI, facts.Identity, "", facts.MachineMode)
return &tunnel.ContainerEngine{ClientCtx: ctx}, err return &tunnel.ContainerEngine{ClientCtx: ctx}, err
} }
return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode) return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
@ -49,8 +50,8 @@ func NewImageEngine(facts *entities.PodmanConfig) (entities.ImageEngine, error)
case entities.ABIMode: case entities.ABIMode:
return nil, fmt.Errorf("direct image runtime not supported") return nil, fmt.Errorf("direct image runtime not supported")
case entities.TunnelMode: case entities.TunnelMode:
ctx, err := newConnection(facts.URI, facts.Identity, facts.MachineMode) ctx, err := newConnection(facts.URI, facts.Identity, facts.FarmNodeName, facts.MachineMode)
return &tunnel.ImageEngine{ClientCtx: ctx}, err return &tunnel.ImageEngine{ClientCtx: ctx, FarmNode: tunnel.FarmNode{NodeName: facts.FarmNodeName}}, err
} }
return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode) return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
} }

View File

@ -0,0 +1,93 @@
package tunnel
import (
"context"
"errors"
"fmt"
"os"
istorage "github.com/containers/image/v5/storage"
"github.com/containers/podman/v4/pkg/bindings/system"
"github.com/containers/podman/v4/pkg/domain/entities"
)
const (
remoteFarmImageBuilderDriver = "podman-remote"
)
// FarmNodeName returns the remote engine's name.
func (ir *ImageEngine) FarmNodeName(ctx context.Context) string {
return ir.NodeName
}
// FarmNodeDriver returns a description of the image builder driver
func (ir *ImageEngine) FarmNodeDriver(ctx context.Context) string {
return remoteFarmImageBuilderDriver
}
func (ir *ImageEngine) fetchInfo(_ context.Context) (os, arch, variant string, nativePlatforms []string, err error) {
engineInfo, err := system.Info(ir.ClientCtx, &system.InfoOptions{})
if err != nil {
return "", "", "", nil, fmt.Errorf("retrieving host info from %q: %w", ir.NodeName, err)
}
nativePlatform := engineInfo.Host.OS + "/" + engineInfo.Host.Arch
if engineInfo.Host.Variant != "" {
nativePlatform = nativePlatform + "/" + engineInfo.Host.Variant
}
return engineInfo.Host.OS, engineInfo.Host.Arch, engineInfo.Host.Variant, []string{nativePlatform}, nil
}
// FarmNodeInspect returns information about the remote engines in the farm
func (ir *ImageEngine) FarmNodeInspect(ctx context.Context) (*entities.FarmInspectReport, error) {
ir.platforms.Do(func() {
ir.os, ir.arch, ir.variant, ir.nativePlatforms, ir.platformsErr = ir.fetchInfo(ctx)
})
return &entities.FarmInspectReport{NativePlatforms: ir.nativePlatforms,
OS: ir.os,
Arch: ir.arch,
Variant: ir.variant}, ir.platformsErr
}
// PullToFile pulls the image from the remote engine and saves it to a file,
// returning a string-format reference which can be parsed by containers/image.
func (ir *ImageEngine) PullToFile(ctx context.Context, options entities.PullToFileOptions) (reference string, err error) {
saveOptions := entities.ImageSaveOptions{
Format: options.SaveFormat,
Output: options.SaveFile,
}
if err := ir.Save(ctx, options.ImageID, nil, saveOptions); err != nil {
return "", fmt.Errorf("saving image %q: %w", options.ImageID, err)
}
return options.SaveFormat + ":" + options.SaveFile, nil
}
// PullToLocal pulls the image from the remote engine and saves it to the local
// engine passed in via options, returning a string-format reference which can
// be parsed by containers/image.
func (ir *ImageEngine) PullToLocal(ctx context.Context, options entities.PullToLocalOptions) (reference string, err error) {
tempFile, err := os.CreateTemp("", "")
if err != nil {
return "", err
}
defer os.Remove(tempFile.Name())
defer tempFile.Close()
saveOptions := entities.ImageSaveOptions{
Format: options.SaveFormat,
Output: tempFile.Name(),
}
if err := ir.Save(ctx, options.ImageID, nil, saveOptions); err != nil {
return "", fmt.Errorf("saving image %q to temporary file: %w", options.ImageID, err)
}
loadOptions := entities.ImageLoadOptions{
Input: tempFile.Name(),
}
if options.Destination == nil {
return "", errors.New("internal error: options.Destination not set")
} else {
if _, err = options.Destination.Load(ctx, loadOptions); err != nil {
return "", fmt.Errorf("loading image %q: %w", options.ImageID, err)
}
}
name := fmt.Sprintf("%s:%s", istorage.Transport.Name(), options.ImageID)
return name, err
}

View File

@ -9,11 +9,13 @@ import (
"strings" "strings"
"time" "time"
bdefine "github.com/containers/buildah/define"
"github.com/containers/common/libimage/filter" "github.com/containers/common/libimage/filter"
"github.com/containers/common/pkg/config" "github.com/containers/common/pkg/config"
"github.com/containers/common/pkg/ssh" "github.com/containers/common/pkg/ssh"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/bindings/images" "github.com/containers/podman/v4/pkg/bindings/images"
"github.com/containers/podman/v4/pkg/domain/entities" "github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/domain/entities/reports" "github.com/containers/podman/v4/pkg/domain/entities/reports"
@ -377,6 +379,10 @@ func (ir *ImageEngine) Build(_ context.Context, containerFiles []string, opts en
if err != nil { if err != nil {
return nil, err return nil, err
} }
report.SaveFormat = define.OCIArchive
if opts.OutputFormat == bdefine.Dockerv2ImageManifest {
report.SaveFormat = define.V2s2Archive
}
return report, nil return report, nil
} }

View File

@ -157,3 +157,19 @@ func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination strin
return digest, err return digest, err
} }
// ManifestListClear clears out all instances from a manifest list
func (ir *ImageEngine) ManifestListClear(ctx context.Context, name string) (string, error) {
listContents, err := manifests.InspectListData(ctx, name, &manifests.InspectOptions{})
if err != nil {
return "", err
}
for _, instance := range listContents.Manifests {
if _, err := manifests.Remove(ctx, name, instance.Digest.String(), &manifests.RemoveOptions{}); err != nil {
return "", err
}
}
return name, nil
}

View File

@ -3,6 +3,7 @@ package tunnel
import ( import (
"context" "context"
"os" "os"
"sync"
"syscall" "syscall"
"github.com/containers/podman/v4/libpod/define" "github.com/containers/podman/v4/libpod/define"
@ -13,6 +14,7 @@ import (
// Image-related runtime using an ssh-tunnel to utilize Podman service // Image-related runtime using an ssh-tunnel to utilize Podman service
type ImageEngine struct { type ImageEngine struct {
ClientCtx context.Context ClientCtx context.Context
FarmNode
} }
// Container-related runtime using an ssh-tunnel to utilize Podman service // Container-related runtime using an ssh-tunnel to utilize Podman service
@ -25,6 +27,16 @@ type SystemEngine struct {
ClientCtx context.Context ClientCtx context.Context
} }
type FarmNode struct {
NodeName string
platforms sync.Once
platformsErr error
os string
arch string
variant string
nativePlatforms []string
}
func remoteProxySignals(ctrID string, killFunc func(string) error) { func remoteProxySignals(ctrID string, killFunc func(string) error) {
sigBuffer := make(chan os.Signal, signal.SignalBufferSize) sigBuffer := make(chan os.Signal, signal.SignalBufferSize)
signal.CatchAll(sigBuffer) signal.CatchAll(sigBuffer)

View File

@ -1,3 +1,6 @@
//go:build !remote
// +build !remote
package emulation package emulation
import ( import (

View File

@ -1,3 +1,6 @@
//go:build !remote
// +build !remote
package emulation package emulation
import ( import (

View File

@ -1,5 +1,5 @@
//go:build !linux //go:build !linux && !remote
// +build !linux // +build !linux,!remote
package emulation package emulation

View File

@ -1,3 +1,6 @@
//go:build !remote
// +build !remote
package emulation package emulation
import ( import (

View File

@ -1,3 +1,6 @@
//go:build !remote
// +build !remote
package emulation package emulation
import "github.com/sirupsen/logrus" import "github.com/sirupsen/logrus"

492
pkg/farm/farm.go Normal file
View File

@ -0,0 +1,492 @@
package farm
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"os"
"sort"
"strings"
"sync"
"github.com/containers/buildah/define"
lplatform "github.com/containers/common/libimage/platform"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/domain/infra"
"github.com/hashicorp/go-multierror"
"github.com/sirupsen/logrus"
)
// Farm represents a group of connections to builders.
type Farm struct {
name string
localEngine entities.ImageEngine // not nil -> use local engine, too
builders map[string]entities.ImageEngine // name -> builder
}
// Schedule is a description of where and how we'll do builds.
type Schedule struct {
platformBuilders map[string]string // target->connection
}
func newFarmWithBuilders(_ context.Context, name string, destinations *map[string]config.Destination, localEngine entities.ImageEngine) (*Farm, error) {
farm := &Farm{
builders: make(map[string]entities.ImageEngine),
localEngine: localEngine,
name: name,
}
var (
builderMutex sync.Mutex
builderGroup multierror.Group
)
// Set up the remote connections to handle the builds
for name, dest := range *destinations {
name, dest := name, dest
builderGroup.Go(func() error {
fmt.Printf("Connecting to %q\n", name)
engine, err := infra.NewImageEngine(&entities.PodmanConfig{
EngineMode: entities.TunnelMode,
URI: dest.URI,
Identity: dest.Identity,
MachineMode: dest.IsMachine,
FarmNodeName: name,
})
if err != nil {
return fmt.Errorf("initializing image engine at %q: %w", dest.URI, err)
}
defer fmt.Printf("Builder %q ready\n", name)
builderMutex.Lock()
defer builderMutex.Unlock()
farm.builders[name] = engine
return nil
})
}
// If local=true then use the local machine for builds as well
if localEngine != nil {
builderGroup.Go(func() error {
fmt.Println("Setting up local builder")
defer fmt.Println("Local builder ready")
builderMutex.Lock()
defer builderMutex.Unlock()
farm.builders[entities.LocalFarmImageBuilderName] = localEngine
return nil
})
}
if builderError := builderGroup.Wait(); builderError != nil {
if err := builderError.ErrorOrNil(); err != nil {
return nil, err
}
}
if len(farm.builders) > 0 {
defer fmt.Printf("Farm %q ready\n", farm.name)
return farm, nil
}
return nil, errors.New("no builders configured")
}
func NewFarm(ctx context.Context, name string, localEngine entities.ImageEngine) (*Farm, error) {
// Get the destinations of the connections specified in the farm
destinations, err := getFarmDestinations(name)
if err != nil {
return nil, err
}
return newFarmWithBuilders(ctx, name, &destinations, localEngine)
}
// Done performs any necessary end-of-process cleanup for the farm's members.
func (f *Farm) Done(ctx context.Context) error {
return f.forEach(ctx, func(ctx context.Context, name string, engine entities.ImageEngine) (bool, error) {
engine.Shutdown(ctx)
return false, nil
})
}
// Status polls the connections in the farm and returns a map of their
// individual status, along with an error if any are down or otherwise unreachable.
func (f *Farm) Status(ctx context.Context) (map[string]error, error) {
status := make(map[string]error)
var (
statusMutex sync.Mutex
statusGroup multierror.Group
)
for _, engine := range f.builders {
engine := engine
statusGroup.Go(func() error {
logrus.Debugf("getting status of %q", engine.FarmNodeName(ctx))
defer logrus.Debugf("got status of %q", engine.FarmNodeName(ctx))
_, err := engine.Config(ctx)
statusMutex.Lock()
defer statusMutex.Unlock()
status[engine.FarmNodeName(ctx)] = err
return err
})
}
statusError := statusGroup.Wait()
return status, statusError.ErrorOrNil()
}
// forEach runs the called function once for every node in the farm and
// collects their results, continuing until it finishes visiting every node or
// a function call returns true as its first return value.
func (f *Farm) forEach(ctx context.Context, fn func(context.Context, string, entities.ImageEngine) (bool, error)) error {
var merr *multierror.Error
for name, engine := range f.builders {
stop, err := fn(ctx, name, engine)
if err != nil {
merr = multierror.Append(merr, fmt.Errorf("%s: %w", engine.FarmNodeName(ctx), err))
}
if stop {
break
}
}
return merr.ErrorOrNil()
}
// NativePlatforms returns a list of the set of platforms for which the farm
// can build images natively.
func (f *Farm) NativePlatforms(ctx context.Context) ([]string, error) {
nativeMap := make(map[string]struct{})
platforms := []string{}
var (
nativeMutex sync.Mutex
nativeGroup multierror.Group
)
for _, engine := range f.builders {
engine := engine
nativeGroup.Go(func() error {
logrus.Debugf("getting native platform of %q\n", engine.FarmNodeName(ctx))
defer logrus.Debugf("got native platform of %q", engine.FarmNodeName(ctx))
inspect, err := engine.FarmNodeInspect(ctx)
if err != nil {
return err
}
nativeMutex.Lock()
defer nativeMutex.Unlock()
for _, platform := range inspect.NativePlatforms {
nativeMap[platform] = struct{}{}
}
return nil
})
}
merr := nativeGroup.Wait()
if merr != nil {
if err := merr.ErrorOrNil(); err != nil {
return nil, err
}
}
for platform := range nativeMap {
platforms = append(platforms, platform)
}
sort.Strings(platforms)
return platforms, nil
}
// EmulatedPlatforms returns a list of the set of platforms for which the farm
// can build images with the help of emulation.
func (f *Farm) EmulatedPlatforms(ctx context.Context) ([]string, error) {
emulatedMap := make(map[string]struct{})
platforms := []string{}
var (
emulatedMutex sync.Mutex
emulatedGroup multierror.Group
)
for _, engine := range f.builders {
engine := engine
emulatedGroup.Go(func() error {
logrus.Debugf("getting emulated platforms of %q", engine.FarmNodeName(ctx))
defer logrus.Debugf("got emulated platforms of %q", engine.FarmNodeName(ctx))
inspect, err := engine.FarmNodeInspect(ctx)
if err != nil {
return err
}
emulatedMutex.Lock()
defer emulatedMutex.Unlock()
for _, platform := range inspect.EmulatedPlatforms {
emulatedMap[platform] = struct{}{}
}
return nil
})
}
merr := emulatedGroup.Wait()
if merr != nil {
if err := merr.ErrorOrNil(); err != nil {
return nil, err
}
}
for platform := range emulatedMap {
platforms = append(platforms, platform)
}
sort.Strings(platforms)
return platforms, nil
}
// Schedule takes a list of platforms and returns a list of connections which
// can be used to build for those platforms. It always prefers native builders
// over emulated builders, but will assign a builder which can use emulation
// for a platform if no suitable native builder is available.
//
// If platforms is an empty list, all available native platforms will be
// scheduled.
//
// TODO: add (Priority,Weight *int) a la RFC 2782 to destinations that we know
// of, and factor those in when assigning builds to nodes in here.
func (f *Farm) Schedule(ctx context.Context, platforms []string) (Schedule, error) {
var (
err error
infoGroup multierror.Group
infoMutex sync.Mutex
)
// If we weren't given a list of target platforms, generate one.
if len(platforms) == 0 {
platforms, err = f.NativePlatforms(ctx)
if err != nil {
return Schedule{}, fmt.Errorf("reading list of available native platforms: %w", err)
}
}
platformBuilders := make(map[string]string)
native := make(map[string]string)
emulated := make(map[string]string)
var localPlatform string
// Make notes of which platforms we can build for natively, and which
// ones we can build for using emulation.
for name, engine := range f.builders {
name, engine := name, engine
infoGroup.Go(func() error {
inspect, err := engine.FarmNodeInspect(ctx)
if err != nil {
return err
}
infoMutex.Lock()
defer infoMutex.Unlock()
for _, n := range inspect.NativePlatforms {
if _, assigned := native[n]; !assigned {
native[n] = name
}
if name == entities.LocalFarmImageBuilderName {
localPlatform = n
}
}
for _, e := range inspect.EmulatedPlatforms {
if _, assigned := emulated[e]; !assigned {
emulated[e] = name
}
}
return nil
})
}
merr := infoGroup.Wait()
if merr != nil {
if err := merr.ErrorOrNil(); err != nil {
return Schedule{}, err
}
}
// Assign a build to the first node that could build it natively, and
// if there isn't one, the first one that can build it with the help of
// emulation, and if there aren't any, error out.
for _, platform := range platforms {
if builder, ok := native[platform]; ok {
platformBuilders[platform] = builder
} else if builder, ok := emulated[platform]; ok {
platformBuilders[platform] = builder
} else {
return Schedule{}, fmt.Errorf("no builder capable of building for platform %q available", platform)
}
}
// If local is set, prioritize building on local
if localPlatform != "" {
platformBuilders[localPlatform] = entities.LocalFarmImageBuilderName
}
schedule := Schedule{
platformBuilders: platformBuilders,
}
return schedule, nil
}
// Build runs a build using the specified targetplatform:service map. If all
// builds succeed, it copies the resulting images from the remote hosts to the
// local service and builds a manifest list with the specified reference name.
func (f *Farm) Build(ctx context.Context, schedule Schedule, options entities.BuildOptions, reference string) error {
switch options.OutputFormat {
default:
return fmt.Errorf("unknown output format %q requested", options.OutputFormat)
case "", define.OCIv1ImageManifest:
options.OutputFormat = define.OCIv1ImageManifest
case define.Dockerv2ImageManifest:
}
// Build the list of jobs.
var jobs sync.Map
type job struct {
platform string
os string
arch string
variant string
builder entities.ImageEngine
}
for platform, builderName := range schedule.platformBuilders { // prepare to build
builder, ok := f.builders[builderName]
if !ok {
return fmt.Errorf("unknown builder %q", builderName)
}
var rawOS, rawArch, rawVariant string
p := strings.Split(platform, "/")
if len(p) > 0 && p[0] != "" {
rawOS = p[0]
}
if len(p) > 1 {
rawArch = p[1]
}
if len(p) > 2 {
rawVariant = p[2]
}
os, arch, variant := lplatform.Normalize(rawOS, rawArch, rawVariant)
jobs.Store(builderName, job{
platform: platform,
os: os,
arch: arch,
variant: variant,
builder: builder,
})
}
// Decide where the final result will be stored.
var (
manifestListBuilder listBuilder
err error
)
listBuilderOptions := listBuilderOptions{
cleanup: options.Cleanup,
iidFile: options.IIDFile,
}
if strings.HasPrefix(reference, "dir:") || f.localEngine == nil {
location := strings.TrimPrefix(reference, "dir:")
manifestListBuilder, err = newFileManifestListBuilder(location, listBuilderOptions)
if err != nil {
return fmt.Errorf("preparing to build list: %w", err)
}
} else {
manifestListBuilder = newLocalManifestListBuilder(reference, f.localEngine, listBuilderOptions)
}
// Start builds in parallel and wait for them all to finish.
var (
buildResults sync.Map
buildGroup multierror.Group
)
type buildResult struct {
report entities.BuildReport
builder entities.ImageEngine
}
for platform, builder := range schedule.platformBuilders {
platform, builder := platform, builder
outReader, outWriter := io.Pipe()
errReader, errWriter := io.Pipe()
go func() {
defer outReader.Close()
reader := bufio.NewReader(outReader)
writer := options.Out
if writer == nil {
writer = os.Stdout
}
line, err := reader.ReadString('\n')
for err == nil {
line = strings.TrimSuffix(line, "\n")
fmt.Fprintf(writer, "[%s@%s] %s\n", platform, builder, line)
line, err = reader.ReadString('\n')
}
}()
go func() {
defer errReader.Close()
reader := bufio.NewReader(errReader)
writer := options.Err
if writer == nil {
writer = os.Stderr
}
line, err := reader.ReadString('\n')
for err == nil {
line = strings.TrimSuffix(line, "\n")
fmt.Fprintf(writer, "[%s@%s] %s\n", platform, builder, line)
line, err = reader.ReadString('\n')
}
}()
buildGroup.Go(func() error {
var j job
defer outWriter.Close()
defer errWriter.Close()
c, ok := jobs.Load(builder)
if !ok {
return fmt.Errorf("unknown connection for %q (shouldn't happen)", builder)
}
if j, ok = c.(job); !ok {
return fmt.Errorf("unexpected connection type for %q (shouldn't happen)", builder)
}
buildOptions := options
buildOptions.Platforms = []struct{ OS, Arch, Variant string }{{j.os, j.arch, j.variant}}
buildOptions.Out = outWriter
buildOptions.Err = errWriter
fmt.Printf("Starting build for %v at %q\n", buildOptions.Platforms, builder)
buildReport, err := j.builder.Build(ctx, options.ContainerFiles, buildOptions)
if err != nil {
return fmt.Errorf("building for %q on %q: %w", j.platform, builder, err)
}
fmt.Printf("finished build for %v at %q: built %s\n", buildOptions.Platforms, builder, buildReport.ID)
buildResults.Store(platform, buildResult{
report: *buildReport,
builder: j.builder,
})
return nil
})
}
buildErrors := buildGroup.Wait()
if err := buildErrors.ErrorOrNil(); err != nil {
return fmt.Errorf("building: %w", err)
}
// Assemble the final result.
perArchBuilds := make(map[entities.BuildReport]entities.ImageEngine)
buildResults.Range(func(k, v any) bool {
result, ok := v.(buildResult)
if !ok {
fmt.Fprintf(os.Stderr, "report %v not a build result?", v)
return false
}
perArchBuilds[result.report] = result.builder
return true
})
location, err := manifestListBuilder.build(ctx, perArchBuilds)
if err != nil {
return err
}
fmt.Printf("Saved list to %q\n", location)
return nil
}
func getFarmDestinations(name string) (map[string]config.Destination, error) {
dest := make(map[string]config.Destination)
cfg, err := config.ReadCustomConfig()
if err != nil {
return dest, err
}
// If no farm name is given, then grab all the service destinations available
if name == "" {
return cfg.Engine.ServiceDestinations, nil
}
// Go through the connections in the farm and get their destination
for _, c := range cfg.Farms.List[name] {
dest[c] = cfg.Engine.ServiceDestinations[c]
}
return dest, nil
}

297
pkg/farm/list_builder.go Normal file
View File

@ -0,0 +1,297 @@
package farm
import (
"context"
"fmt"
"io/fs"
"os"
"path/filepath"
"sync"
lmanifests "github.com/containers/common/libimage/manifests"
"github.com/containers/common/pkg/supplemented"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/hashicorp/go-multierror"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
type listBuilder interface {
build(ctx context.Context, images map[entities.BuildReport]entities.ImageEngine) (string, error)
}
type listBuilderOptions struct {
cleanup bool
iidFile string
}
type listLocal struct {
listName string
localEngine entities.ImageEngine
options listBuilderOptions
}
// newLocalManifestListBuilder returns a manifest list builder which saves a
// manifest list and images to local storage.
func newLocalManifestListBuilder(listName string, localEngine entities.ImageEngine, options listBuilderOptions) listBuilder {
return &listLocal{
listName: listName,
options: options,
localEngine: localEngine,
}
}
// Build retrieves images from the build reports and assembles them into a
// manifest list in local container storage.
func (l *listLocal) build(ctx context.Context, images map[entities.BuildReport]entities.ImageEngine) (string, error) {
manifest := l.listName
exists, err := l.localEngine.ManifestExists(ctx, l.listName)
if err != nil {
return "", err
}
// Create list if it doesn't exist
if !exists.Value {
manifest, err = l.localEngine.ManifestCreate(ctx, l.listName, []string{}, entities.ManifestCreateOptions{})
if err != nil {
return "", fmt.Errorf("creating manifest list %q: %w", l.listName, err)
}
}
// Pull the images into local storage
var (
pullGroup multierror.Group
refsMutex sync.Mutex
)
refs := []string{}
for image, engine := range images {
image, engine := image, engine
pullOptions := entities.PullToLocalOptions{
ImageID: image.ID,
SaveFormat: image.SaveFormat,
Destination: l.localEngine,
}
pullGroup.Go(func() error {
logrus.Infof("copying image %s", image.ID)
defer logrus.Infof("copied image %s", image.ID)
ref, err := engine.PullToLocal(ctx, pullOptions)
if err != nil {
return fmt.Errorf("pulling image %q to local storage: %w", image, err)
}
refsMutex.Lock()
defer refsMutex.Unlock()
refs = append(refs, ref)
return nil
})
}
pullErrors := pullGroup.Wait()
err = pullErrors.ErrorOrNil()
if err != nil {
return "", fmt.Errorf("building: %w", err)
}
if l.options.cleanup {
var rmGroup multierror.Group
for image, engine := range images {
if engine.FarmNodeName(ctx) == entities.LocalFarmImageBuilderName {
continue
}
image, engine := image, engine
rmGroup.Go(func() error {
_, err := engine.Remove(ctx, []string{image.ID}, entities.ImageRemoveOptions{})
if len(err) > 0 {
return err[0]
}
return nil
})
}
rmErrors := rmGroup.Wait()
if rmErrors != nil {
if err = rmErrors.ErrorOrNil(); err != nil {
return "", fmt.Errorf("removing intermediate images: %w", err)
}
}
}
// Clear the list in the event it already existed
if exists.Value {
_, err = l.localEngine.ManifestListClear(ctx, manifest)
if err != nil {
return "", fmt.Errorf("error clearing list %q", manifest)
}
}
// Add the images to the list
listID, err := l.localEngine.ManifestAdd(ctx, manifest, refs, entities.ManifestAddOptions{})
if err != nil {
return "", fmt.Errorf("adding images %q to list: %w", refs, err)
}
// Write the manifest list's ID file if we're expected to
if l.options.iidFile != "" {
if err := os.WriteFile(l.options.iidFile, []byte("sha256:"+listID), 0644); err != nil {
return "", err
}
}
return l.listName, nil
}
type listFiles struct {
directory string
options listBuilderOptions
}
// newFileManifestListBuilder returns a manifest list builder which saves a manifest
// list and images to a specified directory in the non-standard dir: format.
func newFileManifestListBuilder(directory string, options listBuilderOptions) (listBuilder, error) {
if options.iidFile != "" {
return nil, fmt.Errorf("saving to dir: format doesn't use image IDs, --iidfile not supported")
}
return &listFiles{directory: directory, options: options}, nil
}
// Build retrieves images from the build reports and assembles them into a
// manifest list in the configured directory.
func (m *listFiles) build(ctx context.Context, images map[entities.BuildReport]entities.ImageEngine) (string, error) {
listFormat := v1.MediaTypeImageIndex
imageFormat := v1.MediaTypeImageManifest
tempDir, err := os.MkdirTemp("", "")
if err != nil {
return "", err
}
defer os.RemoveAll(tempDir)
name := fmt.Sprintf("dir:%s", tempDir)
tempRef, err := alltransports.ParseImageName(name)
if err != nil {
return "", fmt.Errorf("parsing temporary image ref %q: %w", name, err)
}
if err := os.MkdirAll(m.directory, 0o755); err != nil {
return "", err
}
output, err := alltransports.ParseImageName("dir:" + m.directory)
if err != nil {
return "", fmt.Errorf("parsing output directory ref %q: %w", "dir:"+m.directory, err)
}
// Pull the images into the temporary directory
var (
pullGroup multierror.Group
pullErrors *multierror.Error
refsMutex sync.Mutex
)
refs := make(map[entities.BuildReport]types.ImageReference)
for image, engine := range images {
image, engine := image, engine
tempFile, err := os.CreateTemp(tempDir, "archive-*.tar")
if err != nil {
defer func() {
pullErrors = pullGroup.Wait()
}()
perr := pullErrors.ErrorOrNil()
if perr != nil {
return "", perr
}
return "", err
}
defer tempFile.Close()
pullGroup.Go(func() error {
logrus.Infof("copying image %s", image.ID)
defer logrus.Infof("copied image %s", image.ID)
pullOptions := entities.PullToFileOptions{
ImageID: image.ID,
SaveFormat: image.SaveFormat,
SaveFile: tempFile.Name(),
}
if image.SaveFormat == manifest.DockerV2Schema2MediaType {
listFormat = manifest.DockerV2ListMediaType
imageFormat = manifest.DockerV2Schema2MediaType
}
reference, err := engine.PullToFile(ctx, pullOptions)
if err != nil {
return fmt.Errorf("pulling image %q to temporary directory: %w", image, err)
}
ref, err := alltransports.ParseImageName(reference)
if err != nil {
return fmt.Errorf("pulling image %q to temporary directory: %w", image, err)
}
refsMutex.Lock()
defer refsMutex.Unlock()
refs[image] = ref
return nil
})
}
pullErrors = pullGroup.Wait()
err = pullErrors.ErrorOrNil()
if err != nil {
return "", fmt.Errorf("building: %w", err)
}
if m.options.cleanup {
var rmGroup multierror.Group
for image, engine := range images {
image, engine := image, engine
rmGroup.Go(func() error {
_, err := engine.Remove(ctx, []string{image.ID}, entities.ImageRemoveOptions{})
if len(err) > 0 {
return err[0]
}
return nil
})
}
rmErrors := rmGroup.Wait()
if rmErrors != nil {
if err = rmErrors.ErrorOrNil(); err != nil {
return "", fmt.Errorf("removing intermediate images: %w", err)
}
}
}
supplemental := []types.ImageReference{}
var sys types.SystemContext
// Create a manifest list
list := lmanifests.Create()
// Add the images to the list
for image, ref := range refs {
if _, err = list.Add(ctx, &sys, ref, true); err != nil {
return "", fmt.Errorf("adding image %q to list: %w", image.ID, err)
}
supplemental = append(supplemental, ref)
}
// Save the list to the temporary directory to be the main manifest
listBytes, err := list.Serialize(listFormat)
if err != nil {
return "", fmt.Errorf("serializing manifest list: %w", err)
}
if err = os.WriteFile(filepath.Join(tempDir, "manifest.json"), listBytes, fs.FileMode(0o600)); err != nil {
return "", fmt.Errorf("writing temporary manifest list: %w", err)
}
// Now copy everything to the final dir: location
defaultPolicy, err := signature.DefaultPolicy(&sys)
if err != nil {
return "", err
}
policyContext, err := signature.NewPolicyContext(defaultPolicy)
if err != nil {
return "", err
}
input := supplemented.Reference(tempRef, supplemental, cp.CopyAllImages, nil)
copyOptions := cp.Options{
ForceManifestMIMEType: imageFormat,
ImageListSelection: cp.CopyAllImages,
}
_, err = cp.Image(ctx, policyContext, output, input, &copyOptions)
if err != nil {
return "", fmt.Errorf("copying images to dir:%q: %w", m.directory, err)
}
return "dir:" + m.directory, nil
}