mirror of
https://github.com/containers/podman.git
synced 2025-05-21 00:56:36 +08:00
Added optional container restore statistics
This adds the parameter '--print-stats' to 'podman container restore'. With '--print-stats' Podman will measure how long Podman itself, the OCI runtime and CRIU requires to restore a checkpoint and print out these information. CRIU already creates process restore statistics which are just read in addition to the added measurements. In contrast to just printing out the ID of the restored container, Podman will now print out JSON: # podman container restore --latest --print-stats { "podman_restore_duration": 305871, "container_statistics": [ { "Id": "47b02e1d474b5d5fe917825e91ac653efa757c91e5a81a368d771a78f6b5ed20", "runtime_restore_duration": 140614, "criu_statistics": { "forking_time": 5, "restore_time": 67672, "pages_restored": 14 } } ] } The output contains 'podman_restore_duration' which contains the number of microseconds Podman required to restore the checkpoint. The output also includes 'runtime_restore_duration' which is the time the runtime needed to restore that specific container. Each container also includes 'criu_statistics' which displays the timing information collected by CRIU. Signed-off-by: Adrian Reber <areber@redhat.com>
This commit is contained in:
@ -3,6 +3,7 @@ package containers
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/containers/common/pkg/completion"
|
||||
"github.com/containers/podman/v3/cmd/podman/common"
|
||||
@ -39,6 +40,11 @@ var (
|
||||
|
||||
var restoreOptions entities.RestoreOptions
|
||||
|
||||
type restoreStatistics struct {
|
||||
PodmanDuration int64 `json:"podman_restore_duration"`
|
||||
ContainerStatistics []*entities.RestoreReport `json:"container_statistics"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
registry.Commands = append(registry.Commands, registry.CliCommand{
|
||||
Command: restoreCommand,
|
||||
@ -75,11 +81,19 @@ func init() {
|
||||
flags.StringVar(&restoreOptions.Pod, "pod", "", "Restore container into existing Pod (only works with --import)")
|
||||
_ = restoreCommand.RegisterFlagCompletionFunc("pod", common.AutocompletePodsRunning)
|
||||
|
||||
flags.BoolVar(
|
||||
&restoreOptions.PrintStats,
|
||||
"print-stats",
|
||||
false,
|
||||
"Display restore statistics",
|
||||
)
|
||||
|
||||
validate.AddLatestFlag(restoreCommand, &restoreOptions.Latest)
|
||||
}
|
||||
|
||||
func restore(cmd *cobra.Command, args []string) error {
|
||||
var errs utils.OutputErrors
|
||||
podmanStart := time.Now()
|
||||
if rootless.IsRootless() {
|
||||
return errors.New("restoring a container requires root")
|
||||
}
|
||||
@ -132,12 +146,30 @@ func restore(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
podmanFinished := time.Now()
|
||||
|
||||
var statistics restoreStatistics
|
||||
|
||||
for _, r := range responses {
|
||||
if r.Err == nil {
|
||||
if restoreOptions.PrintStats {
|
||||
statistics.ContainerStatistics = append(statistics.ContainerStatistics, r)
|
||||
} else {
|
||||
fmt.Println(r.Id)
|
||||
}
|
||||
} else {
|
||||
errs = append(errs, r.Err)
|
||||
}
|
||||
}
|
||||
|
||||
if restoreOptions.PrintStats {
|
||||
statistics.PodmanDuration = podmanFinished.Sub(podmanStart).Microseconds()
|
||||
j, err := json.MarshalIndent(statistics, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(string(j))
|
||||
}
|
||||
|
||||
return errs.PrintErrors()
|
||||
}
|
||||
|
@ -832,7 +832,11 @@ func (c *Container) Checkpoint(ctx context.Context, options ContainerCheckpointO
|
||||
}
|
||||
|
||||
// Restore restores a container
|
||||
func (c *Container) Restore(ctx context.Context, options ContainerCheckpointOptions) error {
|
||||
// The return values *define.CRIUCheckpointRestoreStatistics and int64 (time
|
||||
// the runtime needs to restore the container) are only set if
|
||||
// options.PrintStats is set to true. Not setting options.PrintStats to true
|
||||
// will return nil and 0.
|
||||
func (c *Container) Restore(ctx context.Context, options ContainerCheckpointOptions) (*define.CRIUCheckpointRestoreStatistics, int64, error) {
|
||||
if options.Pod == "" {
|
||||
logrus.Debugf("Trying to restore container %s", c.ID())
|
||||
} else {
|
||||
@ -843,7 +847,7 @@ func (c *Container) Restore(ctx context.Context, options ContainerCheckpointOpti
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if err := c.syncContainer(); err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
defer c.newContainerEvent(events.Restore)
|
||||
|
@ -1089,7 +1089,7 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
|
||||
}
|
||||
|
||||
// With the spec complete, do an OCI create
|
||||
if err := c.ociRuntime.CreateContainer(c, nil); err != nil {
|
||||
if _, err = c.ociRuntime.CreateContainer(c, nil); err != nil {
|
||||
// Fedora 31 is carrying a patch to display improved error
|
||||
// messages to better handle the V2 transition. This is NOT
|
||||
// upstream in any OCI runtime.
|
||||
|
@ -1264,7 +1264,7 @@ func (c *Container) importPreCheckpoint(input string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Container) restore(ctx context.Context, options ContainerCheckpointOptions) (retErr error) {
|
||||
func (c *Container) restore(ctx context.Context, options ContainerCheckpointOptions) (criuStatistics *define.CRIUCheckpointRestoreStatistics, runtimeRestoreDuration int64, retErr error) {
|
||||
minCriuVersion := func() int {
|
||||
if options.Pod == "" {
|
||||
return criu.MinCriuVersion
|
||||
@ -1272,37 +1272,37 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
||||
return criu.PodCriuVersion
|
||||
}()
|
||||
if err := c.checkpointRestoreSupported(minCriuVersion); err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if options.Pod != "" && !crutils.CRRuntimeSupportsPodCheckpointRestore(c.ociRuntime.Path()) {
|
||||
return errors.Errorf("runtime %s does not support pod restore", c.ociRuntime.Path())
|
||||
return nil, 0, errors.Errorf("runtime %s does not support pod restore", c.ociRuntime.Path())
|
||||
}
|
||||
|
||||
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited) {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID())
|
||||
return nil, 0, errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID())
|
||||
}
|
||||
|
||||
if options.ImportPrevious != "" {
|
||||
if err := c.importPreCheckpoint(options.ImportPrevious); err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if options.TargetFile != "" {
|
||||
if err := c.importCheckpoint(options.TargetFile); err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// Let's try to stat() CRIU's inventory file. If it does not exist, it makes
|
||||
// no sense to try a restore. This is a minimal check if a checkpoint exist.
|
||||
if _, err := os.Stat(filepath.Join(c.CheckpointPath(), "inventory.img")); os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "a complete checkpoint for this container cannot be found, cannot restore")
|
||||
return nil, 0, errors.Wrapf(err, "a complete checkpoint for this container cannot be found, cannot restore")
|
||||
}
|
||||
|
||||
if err := crutils.CRCreateFileWithLabel(c.bundlePath(), "restore.log", c.MountLabel()); err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// If a container is restored multiple times from an exported checkpoint with
|
||||
@ -1339,7 +1339,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
||||
// container with the same networks settings as during checkpointing.
|
||||
aliases, err := c.GetAllNetworkAliases()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
netOpts := make(map[string]types.PerNetworkOptions, len(netStatus))
|
||||
for network, status := range netStatus {
|
||||
@ -1364,7 +1364,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
||||
if perNetOpts.InterfaceName == "" {
|
||||
eth, exists := c.state.NetInterfaceDescriptions.getInterfaceByName(network)
|
||||
if !exists {
|
||||
return errors.Errorf("no network interface name for container %s on network %s", c.config.ID, network)
|
||||
return nil, 0, errors.Errorf("no network interface name for container %s on network %s", c.config.ID, network)
|
||||
}
|
||||
perNetOpts.InterfaceName = eth
|
||||
}
|
||||
@ -1382,7 +1382,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
||||
}()
|
||||
|
||||
if err := c.prepare(); err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// Read config
|
||||
@ -1391,7 +1391,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
||||
g, err := generate.NewFromFile(jsonPath)
|
||||
if err != nil {
|
||||
logrus.Debugf("generate.NewFromFile failed with %v", err)
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// Restoring from an import means that we are doing migration
|
||||
@ -1407,7 +1407,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
||||
}
|
||||
|
||||
if err := g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), netNSPath); err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -1416,23 +1416,23 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
||||
// the ones from the infrastructure container.
|
||||
pod, err := c.runtime.LookupPod(options.Pod)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "pod %q cannot be retrieved", options.Pod)
|
||||
return nil, 0, errors.Wrapf(err, "pod %q cannot be retrieved", options.Pod)
|
||||
}
|
||||
|
||||
infraContainer, err := pod.InfraContainer()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot retrieved infra container from pod %q", options.Pod)
|
||||
return nil, 0, errors.Wrapf(err, "cannot retrieved infra container from pod %q", options.Pod)
|
||||
}
|
||||
|
||||
infraContainer.lock.Lock()
|
||||
if err := infraContainer.syncContainer(); err != nil {
|
||||
infraContainer.lock.Unlock()
|
||||
return errors.Wrapf(err, "Error syncing infrastructure container %s status", infraContainer.ID())
|
||||
return nil, 0, errors.Wrapf(err, "Error syncing infrastructure container %s status", infraContainer.ID())
|
||||
}
|
||||
if infraContainer.state.State != define.ContainerStateRunning {
|
||||
if err := infraContainer.initAndStart(ctx); err != nil {
|
||||
infraContainer.lock.Unlock()
|
||||
return errors.Wrapf(err, "Error starting infrastructure container %s status", infraContainer.ID())
|
||||
return nil, 0, errors.Wrapf(err, "Error starting infrastructure container %s status", infraContainer.ID())
|
||||
}
|
||||
}
|
||||
infraContainer.lock.Unlock()
|
||||
@ -1440,56 +1440,56 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
||||
if c.config.IPCNsCtr != "" {
|
||||
nsPath, err := infraContainer.namespacePath(IPCNS)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot retrieve IPC namespace path for Pod %q", options.Pod)
|
||||
return nil, 0, errors.Wrapf(err, "cannot retrieve IPC namespace path for Pod %q", options.Pod)
|
||||
}
|
||||
if err := g.AddOrReplaceLinuxNamespace(string(spec.IPCNamespace), nsPath); err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if c.config.NetNsCtr != "" {
|
||||
nsPath, err := infraContainer.namespacePath(NetNS)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot retrieve network namespace path for Pod %q", options.Pod)
|
||||
return nil, 0, errors.Wrapf(err, "cannot retrieve network namespace path for Pod %q", options.Pod)
|
||||
}
|
||||
if err := g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), nsPath); err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if c.config.PIDNsCtr != "" {
|
||||
nsPath, err := infraContainer.namespacePath(PIDNS)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot retrieve PID namespace path for Pod %q", options.Pod)
|
||||
return nil, 0, errors.Wrapf(err, "cannot retrieve PID namespace path for Pod %q", options.Pod)
|
||||
}
|
||||
if err := g.AddOrReplaceLinuxNamespace(string(spec.PIDNamespace), nsPath); err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if c.config.UTSNsCtr != "" {
|
||||
nsPath, err := infraContainer.namespacePath(UTSNS)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot retrieve UTS namespace path for Pod %q", options.Pod)
|
||||
return nil, 0, errors.Wrapf(err, "cannot retrieve UTS namespace path for Pod %q", options.Pod)
|
||||
}
|
||||
if err := g.AddOrReplaceLinuxNamespace(string(spec.UTSNamespace), nsPath); err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if c.config.CgroupNsCtr != "" {
|
||||
nsPath, err := infraContainer.namespacePath(CgroupNS)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot retrieve Cgroup namespace path for Pod %q", options.Pod)
|
||||
return nil, 0, errors.Wrapf(err, "cannot retrieve Cgroup namespace path for Pod %q", options.Pod)
|
||||
}
|
||||
if err := g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), nsPath); err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.makeBindMounts(); err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if options.TargetFile != "" {
|
||||
@ -1511,12 +1511,12 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
||||
|
||||
// Cleanup for a working restore.
|
||||
if err := c.removeConmonFiles(); err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// Save the OCI spec to disk
|
||||
if err := c.saveSpec(g.Config); err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// When restoring from an imported archive, allow restoring the content of volumes.
|
||||
@ -1527,24 +1527,24 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
||||
|
||||
volumeFile, err := os.Open(volumeFilePath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open volume file %s", volumeFilePath)
|
||||
return nil, 0, errors.Wrapf(err, "failed to open volume file %s", volumeFilePath)
|
||||
}
|
||||
defer volumeFile.Close()
|
||||
|
||||
volume, err := c.runtime.GetVolume(v.Name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to retrieve volume %s", v.Name)
|
||||
return nil, 0, errors.Wrapf(err, "failed to retrieve volume %s", v.Name)
|
||||
}
|
||||
|
||||
mountPoint, err := volume.MountPoint()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
if mountPoint == "" {
|
||||
return errors.Wrapf(err, "unable to import volume %s as it is not mounted", volume.Name())
|
||||
return nil, 0, errors.Wrapf(err, "unable to import volume %s as it is not mounted", volume.Name())
|
||||
}
|
||||
if err := archive.UntarUncompressed(volumeFile, mountPoint, nil); err != nil {
|
||||
return errors.Wrapf(err, "Failed to extract volume %s to %s", volumeFilePath, mountPoint)
|
||||
return nil, 0, errors.Wrapf(err, "Failed to extract volume %s to %s", volumeFilePath, mountPoint)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1552,16 +1552,43 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
||||
// Before actually restarting the container, apply the root file-system changes
|
||||
if !options.IgnoreRootfs {
|
||||
if err := crutils.CRApplyRootFsDiffTar(c.bundlePath(), c.state.Mountpoint); err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if err := crutils.CRRemoveDeletedFiles(c.ID(), c.bundlePath(), c.state.Mountpoint); err != nil {
|
||||
return err
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.ociRuntime.CreateContainer(c, &options); err != nil {
|
||||
return err
|
||||
runtimeRestoreDuration, err = c.ociRuntime.CreateContainer(c, &options)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
criuStatistics, err = func() (*define.CRIUCheckpointRestoreStatistics, error) {
|
||||
if !options.PrintStats {
|
||||
return nil, nil
|
||||
}
|
||||
statsDirectory, err := os.Open(c.bundlePath())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Not able to open %q", c.bundlePath())
|
||||
}
|
||||
|
||||
restoreStatistics, err := stats.CriuGetRestoreStats(statsDirectory)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Displaying restore statistics not possible")
|
||||
}
|
||||
|
||||
return &define.CRIUCheckpointRestoreStatistics{
|
||||
PagesCompared: restoreStatistics.GetPagesCompared(),
|
||||
PagesSkippedCow: restoreStatistics.GetPagesSkippedCow(),
|
||||
ForkingTime: restoreStatistics.GetForkingTime(),
|
||||
RestoreTime: restoreStatistics.GetRestoreTime(),
|
||||
PagesRestored: restoreStatistics.GetPagesRestored(),
|
||||
}, nil
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
logrus.Debugf("Restored container %s", c.ID())
|
||||
@ -1600,7 +1627,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
||||
}
|
||||
}
|
||||
|
||||
return c.save()
|
||||
return criuStatistics, runtimeRestoreDuration, c.save()
|
||||
}
|
||||
|
||||
// Retrieves a container's "root" net namespace container dependency.
|
||||
|
@ -23,7 +23,10 @@ type OCIRuntime interface {
|
||||
Path() string
|
||||
|
||||
// CreateContainer creates the container in the OCI runtime.
|
||||
CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) error
|
||||
// The returned int64 contains the microseconds needed to restore
|
||||
// the given container if it is a restore and if restoreOptions.PrintStats
|
||||
// is true. In all other cases the returned int64 is 0.
|
||||
CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (int64, error)
|
||||
// UpdateContainerStatus updates the status of the given container.
|
||||
UpdateContainerStatus(ctr *Container) error
|
||||
// StartContainer starts the given container.
|
||||
|
@ -183,35 +183,39 @@ func hasCurrentUserMapped(ctr *Container) bool {
|
||||
}
|
||||
|
||||
// CreateContainer creates a container.
|
||||
func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) error {
|
||||
func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (int64, error) {
|
||||
// always make the run dir accessible to the current user so that the PID files can be read without
|
||||
// being in the rootless user namespace.
|
||||
if err := makeAccessible(ctr.state.RunDir, 0, 0); err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
if !hasCurrentUserMapped(ctr) {
|
||||
for _, i := range []string{ctr.state.RunDir, ctr.runtime.config.Engine.TmpDir, ctr.config.StaticDir, ctr.state.Mountpoint, ctr.runtime.config.Engine.VolumePath} {
|
||||
if err := makeAccessible(i, ctr.RootUID(), ctr.RootGID()); err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// if we are running a non privileged container, be sure to umount some kernel paths so they are not
|
||||
// bind mounted inside the container at all.
|
||||
if !ctr.config.Privileged && !rootless.IsRootless() {
|
||||
ch := make(chan error)
|
||||
type result struct {
|
||||
restoreDuration int64
|
||||
err error
|
||||
}
|
||||
ch := make(chan result)
|
||||
go func() {
|
||||
runtime.LockOSThread()
|
||||
err := func() error {
|
||||
restoreDuration, err := func() (int64, error) {
|
||||
fd, err := os.Open(fmt.Sprintf("/proc/%d/task/%d/ns/mnt", os.Getpid(), unix.Gettid()))
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
defer errorhandling.CloseQuiet(fd)
|
||||
|
||||
// create a new mountns on the current thread
|
||||
if err = unix.Unshare(unix.CLONE_NEWNS); err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := unix.Setns(int(fd.Fd()), unix.CLONE_NEWNS); err != nil {
|
||||
@ -224,12 +228,12 @@ func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *Conta
|
||||
// changes are propagated to the host.
|
||||
err = unix.Mount("/sys", "/sys", "none", unix.MS_REC|unix.MS_SLAVE, "")
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot make /sys slave")
|
||||
return 0, errors.Wrapf(err, "cannot make /sys slave")
|
||||
}
|
||||
|
||||
mounts, err := pmount.GetMounts()
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
for _, m := range mounts {
|
||||
if !strings.HasPrefix(m.Mountpoint, "/sys/kernel") {
|
||||
@ -237,15 +241,18 @@ func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *Conta
|
||||
}
|
||||
err = unix.Unmount(m.Mountpoint, 0)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "cannot unmount %s", m.Mountpoint)
|
||||
return 0, errors.Wrapf(err, "cannot unmount %s", m.Mountpoint)
|
||||
}
|
||||
}
|
||||
return r.createOCIContainer(ctr, restoreOptions)
|
||||
}()
|
||||
ch <- err
|
||||
ch <- result{
|
||||
restoreDuration: restoreDuration,
|
||||
err: err,
|
||||
}
|
||||
}()
|
||||
err := <-ch
|
||||
return err
|
||||
r := <-ch
|
||||
return r.restoreDuration, r.err
|
||||
}
|
||||
}
|
||||
return r.createOCIContainer(ctr, restoreOptions)
|
||||
@ -995,23 +1002,23 @@ func (r *ConmonOCIRuntime) getLogTag(ctr *Container) (string, error) {
|
||||
}
|
||||
|
||||
// createOCIContainer generates this container's main conmon instance and prepares it for starting
|
||||
func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) error {
|
||||
func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (int64, error) {
|
||||
var stderrBuf bytes.Buffer
|
||||
|
||||
runtimeDir, err := util.GetRuntimeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
parentSyncPipe, childSyncPipe, err := newPipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating socket pair")
|
||||
return 0, errors.Wrapf(err, "error creating socket pair")
|
||||
}
|
||||
defer errorhandling.CloseQuiet(parentSyncPipe)
|
||||
|
||||
childStartPipe, parentStartPipe, err := newPipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating socket pair for start pipe")
|
||||
return 0, errors.Wrapf(err, "error creating socket pair for start pipe")
|
||||
}
|
||||
|
||||
defer errorhandling.CloseQuiet(parentStartPipe)
|
||||
@ -1023,12 +1030,12 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
|
||||
|
||||
logTag, err := r.getLogTag(ctr)
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if ctr.config.CgroupsMode == cgroupSplit {
|
||||
if err := utils.MoveUnderCgroupSubtree("runtime"); err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -1079,7 +1086,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
|
||||
} else {
|
||||
fds, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("converting LISTEN_FDS=%s: %w", val, err)
|
||||
return 0, fmt.Errorf("converting LISTEN_FDS=%s: %w", val, err)
|
||||
}
|
||||
preserveFDs = uint(fds)
|
||||
}
|
||||
@ -1160,7 +1167,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
|
||||
if r.reservePorts && !rootless.IsRootless() && !ctr.config.NetMode.IsSlirp4netns() {
|
||||
ports, err := bindPorts(ctr.config.PortMappings)
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
filesToClose = append(filesToClose, ports...)
|
||||
|
||||
@ -1176,12 +1183,12 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
|
||||
if havePortMapping {
|
||||
ctr.rootlessPortSyncR, ctr.rootlessPortSyncW, err = os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create rootless port sync pipe")
|
||||
return 0, errors.Wrapf(err, "failed to create rootless port sync pipe")
|
||||
}
|
||||
}
|
||||
ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create rootless network sync pipe")
|
||||
return 0, errors.Wrapf(err, "failed to create rootless network sync pipe")
|
||||
}
|
||||
} else {
|
||||
if ctr.rootlessSlirpSyncR != nil {
|
||||
@ -1200,22 +1207,25 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
|
||||
cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessPortSyncW)
|
||||
}
|
||||
}
|
||||
|
||||
var runtimeRestoreStarted time.Time
|
||||
if restoreOptions != nil {
|
||||
runtimeRestoreStarted = time.Now()
|
||||
}
|
||||
err = startCommandGivenSelinux(cmd, ctr)
|
||||
|
||||
// regardless of whether we errored or not, we no longer need the children pipes
|
||||
childSyncPipe.Close()
|
||||
childStartPipe.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
if err := r.moveConmonToCgroupAndSignal(ctr, cmd, parentStartPipe); err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
/* Wait for initial setup and fork, and reap child */
|
||||
err = cmd.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
pid, err := readConmonPipeData(parentSyncPipe, ociLog)
|
||||
@ -1223,7 +1233,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
|
||||
if err2 := r.DeleteContainer(ctr); err2 != nil {
|
||||
logrus.Errorf("Removing container %s from runtime after creation failed", ctr.ID())
|
||||
}
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
ctr.state.PID = pid
|
||||
|
||||
@ -1249,13 +1259,20 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
|
||||
}
|
||||
}
|
||||
|
||||
runtimeRestoreDuration := func() int64 {
|
||||
if restoreOptions != nil && restoreOptions.PrintStats {
|
||||
return time.Since(runtimeRestoreStarted).Microseconds()
|
||||
}
|
||||
return 0
|
||||
}()
|
||||
|
||||
// These fds were passed down to the runtime. Close them
|
||||
// and not interfere
|
||||
for _, f := range filesToClose {
|
||||
errorhandling.CloseQuiet(f)
|
||||
}
|
||||
|
||||
return nil
|
||||
return runtimeRestoreDuration, nil
|
||||
}
|
||||
|
||||
// configureConmonEnv gets the environment values to add to conmon's exec struct
|
||||
|
@ -66,8 +66,8 @@ func (r *MissingRuntime) Path() string {
|
||||
}
|
||||
|
||||
// CreateContainer is not available as the runtime is missing
|
||||
func (r *MissingRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) error {
|
||||
return r.printError()
|
||||
func (r *MissingRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (int64, error) {
|
||||
return 0, r.printError()
|
||||
}
|
||||
|
||||
// UpdateContainerStatus is not available as the runtime is missing
|
||||
|
@ -294,6 +294,7 @@ func Restore(w http.ResponseWriter, r *http.Request) {
|
||||
IgnoreVolumes bool `schema:"ignoreVolumes"`
|
||||
IgnoreStaticIP bool `schema:"ignoreStaticIP"`
|
||||
IgnoreStaticMAC bool `schema:"ignoreStaticMAC"`
|
||||
PrintStats bool `schema:"printStats"`
|
||||
}{
|
||||
// override any golang type defaults
|
||||
}
|
||||
@ -329,17 +330,26 @@ func Restore(w http.ResponseWriter, r *http.Request) {
|
||||
IgnoreRootfs: query.IgnoreRootFS,
|
||||
IgnoreStaticIP: query.IgnoreStaticIP,
|
||||
IgnoreStaticMAC: query.IgnoreStaticMAC,
|
||||
PrintStats: query.PrintStats,
|
||||
}
|
||||
if query.Import {
|
||||
options.TargetFile = targetFile
|
||||
options.Name = query.Name
|
||||
}
|
||||
err = ctr.Restore(r.Context(), options)
|
||||
criuStatistics, runtimeRestoreDuration, err := ctr.Restore(r.Context(), options)
|
||||
if err != nil {
|
||||
utils.InternalServerError(w, err)
|
||||
return
|
||||
}
|
||||
utils.WriteResponse(w, http.StatusOK, entities.RestoreReport{Id: ctr.ID()})
|
||||
utils.WriteResponse(
|
||||
w,
|
||||
http.StatusOK,
|
||||
entities.RestoreReport{
|
||||
Id: ctr.ID(),
|
||||
RuntimeDuration: runtimeRestoreDuration,
|
||||
CRIUStatistics: criuStatistics,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func InitContainer(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -1499,6 +1499,10 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error {
|
||||
// name: ignoreStaticMAC
|
||||
// type: boolean
|
||||
// description: ignore MAC address if set statically
|
||||
// - in: query
|
||||
// name: printStats
|
||||
// type: boolean
|
||||
// description: add restore statistics to the returned RestoreReport
|
||||
// produces:
|
||||
// - application/json
|
||||
// responses:
|
||||
|
@ -214,11 +214,14 @@ type RestoreOptions struct {
|
||||
ImportPrevious string
|
||||
PublishPorts []nettypes.PortMapping
|
||||
Pod string
|
||||
PrintStats bool
|
||||
}
|
||||
|
||||
type RestoreReport struct {
|
||||
Err error
|
||||
Id string //nolint
|
||||
Err error `json:"-"`
|
||||
Id string `json:"Id` //nolint
|
||||
RuntimeDuration int64 `json:"runtime_restore_duration"`
|
||||
CRIUStatistics *define.CRIUCheckpointRestoreStatistics `json:"criu_statistics"`
|
||||
}
|
||||
|
||||
type ContainerCreateReport struct {
|
||||
|
@ -560,6 +560,7 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st
|
||||
IgnoreStaticMAC: options.IgnoreStaticMAC,
|
||||
ImportPrevious: options.ImportPrevious,
|
||||
Pod: options.Pod,
|
||||
PrintStats: options.PrintStats,
|
||||
}
|
||||
|
||||
filterFuncs := []libpod.ContainerFilter{
|
||||
@ -582,10 +583,12 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st
|
||||
}
|
||||
reports := make([]*entities.RestoreReport, 0, len(cons))
|
||||
for _, con := range cons {
|
||||
err := con.Restore(ctx, restoreOptions)
|
||||
criuStatistics, runtimeRestoreDuration, err := con.Restore(ctx, restoreOptions)
|
||||
reports = append(reports, &entities.RestoreReport{
|
||||
Err: err,
|
||||
Id: con.ID(),
|
||||
RuntimeDuration: runtimeRestoreDuration,
|
||||
CRIUStatistics: criuStatistics,
|
||||
})
|
||||
}
|
||||
return reports, nil
|
||||
|
Reference in New Issue
Block a user