Merge pull request #9521 from adrianreber/2021-02-25-checkpointctl

Reorder checkpoint/restore code for CRI-O
This commit is contained in:
OpenShift Merge Robot
2021-03-03 02:06:19 -08:00
committed by GitHub
10 changed files with 684 additions and 222 deletions

1
go.mod
View File

@ -6,6 +6,7 @@ require (
github.com/BurntSushi/toml v0.3.1
github.com/blang/semver v3.5.1+incompatible
github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37
github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
github.com/containernetworking/cni v0.8.1

10
go.sum
View File

@ -57,6 +57,8 @@ github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37/go.mod h1:u9UyCz2eTrS
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7 h1:ZmSAEFFtv3mepC4/Ze6E/hi6vGZlhRvywqp1l+w+qqw=
github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7/go.mod h1:Kp3ezoDVdhfYxZUtgs4OL8sVvgOLz3txk0sbQD0opvw=
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b h1:T4nWG1TXIxeor8mAu5bFguPJgSIGhZqv/f0z55KCrJM=
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho=
github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
@ -112,6 +114,8 @@ github.com/containers/ocicrypt v1.1.0 h1:A6UzSUFMla92uxO43O6lm86i7evMGjTY7wTKB2D
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
github.com/containers/psgo v1.5.2 h1:3aoozst/GIwsrr/5jnFy3FrJay98uujPCu9lTuSZ/Cw=
github.com/containers/psgo v1.5.2/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzPUWfawVU=
github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM=
github.com/containers/storage v1.24.5 h1:BusfdU0rCS2/Daa/DPw+0iLfGRlYA7UVF7D0el3N7Vk=
github.com/containers/storage v1.24.5/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ=
github.com/containers/storage v1.24.6/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ=
github.com/containers/storage v1.25.0 h1:p0PLlQcWmtE+7XLfOCR0WuYyMTby1yozpI4DaKOtWTA=
@ -333,6 +337,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.5 h1:xNCE0uE6yvTPRS+0wGNMHPo3NIpwnk6aluQZ6R6kRcc=
github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg=
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
@ -363,6 +369,7 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
@ -423,6 +430,7 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@ -466,6 +474,7 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20200817204227-f9c09b4ea1df/go.m
github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opencontainers/selinux v1.8.0 h1:+77ba4ar4jsCbL1GLbFL8fFM57w6suPfSS9PDLDY7KM=
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/openshift/imagebuilder v1.1.8 h1:gjiIl8pbNj0eC4XWvFJHATdDvYm64p9/pLDLQWoLZPA=
@ -594,6 +603,7 @@ github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmF
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=

View File

@ -13,6 +13,7 @@ import (
"strings"
"time"
metadata "github.com/checkpoint-restore/checkpointctl/lib"
"github.com/containers/buildah/copier"
"github.com/containers/common/pkg/secrets"
"github.com/containers/podman/v3/libpod/define"
@ -135,7 +136,7 @@ func (c *Container) ControlSocketPath() string {
// CheckpointPath returns the path to the directory containing the checkpoint
func (c *Container) CheckpointPath() string {
return filepath.Join(c.bundlePath(), "checkpoint")
return filepath.Join(c.bundlePath(), metadata.CheckpointDirectory)
}
// PreCheckpointPath returns the path to the directory containing the pre-checkpoint-images
@ -2141,26 +2142,11 @@ func (c *Container) canWithPrevious() error {
return err
}
// writeJSONFile marshalls and writes the given data to a JSON file
// in the bundle path
func (c *Container) writeJSONFile(v interface{}, file string) error {
fileJSON, err := json.MarshalIndent(v, "", " ")
if err != nil {
return errors.Wrapf(err, "error writing JSON to %s for container %s", file, c.ID())
}
file = filepath.Join(c.bundlePath(), file)
if err := ioutil.WriteFile(file, fileJSON, 0644); err != nil {
return err
}
return nil
}
// prepareCheckpointExport writes the config and spec to
// JSON files for later export
func (c *Container) prepareCheckpointExport() error {
// save live config
if err := c.writeJSONFile(c.Config(), "config.dump"); err != nil {
if _, err := metadata.WriteJSONFile(c.Config(), c.bundlePath(), metadata.ConfigDumpFile); err != nil {
return err
}
@ -2171,7 +2157,7 @@ func (c *Container) prepareCheckpointExport() error {
logrus.Debugf("generating spec for container %q failed with %v", c.ID(), err)
return err
}
if err := c.writeJSONFile(g.Config, "spec.dump"); err != nil {
if _, err := metadata.WriteJSONFile(g.Config, c.bundlePath(), metadata.SpecDumpFile); err != nil {
return err
}

View File

@ -19,6 +19,7 @@ import (
"syscall"
"time"
metadata "github.com/checkpoint-restore/checkpointctl/lib"
cnitypes "github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/containers/buildah/pkg/chrootuser"
@ -33,6 +34,7 @@ import (
"github.com/containers/podman/v3/libpod/events"
"github.com/containers/podman/v3/pkg/annotations"
"github.com/containers/podman/v3/pkg/cgroups"
"github.com/containers/podman/v3/pkg/checkpoint/crutils"
"github.com/containers/podman/v3/pkg/criu"
"github.com/containers/podman/v3/pkg/lookup"
"github.com/containers/podman/v3/pkg/resolvconf"
@ -884,80 +886,32 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
logrus.Debugf("Exporting checkpoint image of container %q to %q", c.ID(), options.TargetFile)
includeFiles := []string{
"checkpoint",
"artifacts",
"ctr.log",
"config.dump",
"spec.dump",
"network.status"}
metadata.CheckpointDirectory,
metadata.ConfigDumpFile,
metadata.SpecDumpFile,
metadata.NetworkStatusFile,
}
if options.PreCheckPoint {
includeFiles[0] = "pre-checkpoint"
}
// Get root file-system changes included in the checkpoint archive
rootfsDiffPath := filepath.Join(c.bundlePath(), "rootfs-diff.tar")
deleteFilesList := filepath.Join(c.bundlePath(), "deleted.files")
var addToTarFiles []string
if !options.IgnoreRootfs {
// To correctly track deleted files, let's go through the output of 'podman diff'
tarFiles, err := c.runtime.GetDiff("", c.ID())
rootFsChanges, err := c.runtime.GetDiff("", c.ID())
if err != nil {
return errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath)
}
var rootfsIncludeFiles []string
var deletedFiles []string
for _, file := range tarFiles {
if file.Kind == archive.ChangeAdd {
rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path)
continue
}
if file.Kind == archive.ChangeDelete {
deletedFiles = append(deletedFiles, file.Path)
continue
}
fileName, err := os.Stat(file.Path)
if err != nil {
continue
}
if !fileName.IsDir() && file.Kind == archive.ChangeModify {
rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path)
continue
}
return errors.Wrapf(err, "error exporting root file-system diff for %q", c.ID())
}
if len(rootfsIncludeFiles) > 0 {
rootfsTar, err := archive.TarWithOptions(c.state.Mountpoint, &archive.TarOptions{
Compression: archive.Uncompressed,
IncludeSourceDir: true,
IncludeFiles: rootfsIncludeFiles,
})
if err != nil {
return errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath)
}
rootfsDiffFile, err := os.Create(rootfsDiffPath)
if err != nil {
return errors.Wrapf(err, "error creating root file-system diff file %q", rootfsDiffPath)
}
defer rootfsDiffFile.Close()
_, err = io.Copy(rootfsDiffFile, rootfsTar)
if err != nil {
return err
}
includeFiles = append(includeFiles, "rootfs-diff.tar")
addToTarFiles, err := crutils.CRCreateRootFsDiffTar(&rootFsChanges, c.state.Mountpoint, c.bundlePath())
if err != nil {
return err
}
if len(deletedFiles) > 0 {
formatJSON, err := json.MarshalIndent(deletedFiles, "", " ")
if err != nil {
return errors.Wrapf(err, "error creating delete files list file %q", deleteFilesList)
}
if err := ioutil.WriteFile(deleteFilesList, formatJSON, 0600); err != nil {
return errors.Wrap(err, "error creating delete files list file")
}
includeFiles = append(includeFiles, "deleted.files")
}
includeFiles = append(includeFiles, addToTarFiles...)
}
// Folder containing archived volumes that will be included in the export
@ -1034,8 +988,9 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
return err
}
os.Remove(rootfsDiffPath)
os.Remove(deleteFilesList)
for _, file := range addToTarFiles {
os.Remove(filepath.Join(c.bundlePath(), file))
}
if !options.IgnoreVolumes {
os.RemoveAll(expVolDir)
@ -1054,23 +1009,6 @@ func (c *Container) checkpointRestoreSupported() error {
return nil
}
func (c *Container) checkpointRestoreLabelLog(fileName string) error {
// Create the CRIU log file and label it
dumpLog := filepath.Join(c.bundlePath(), fileName)
logFile, err := os.OpenFile(dumpLog, os.O_CREATE, 0600)
if err != nil {
return errors.Wrap(err, "failed to create CRIU log file")
}
if err := logFile.Close(); err != nil {
logrus.Error(err)
}
if err = label.SetFileLabel(dumpLog, c.MountLabel()); err != nil {
return err
}
return nil
}
func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointOptions) error {
if err := c.checkpointRestoreSupported(); err != nil {
return err
@ -1084,7 +1022,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
return errors.Errorf("cannot checkpoint containers that have been started with '--rm' unless '--export' is used")
}
if err := c.checkpointRestoreLabelLog("dump.log"); err != nil {
if err := crutils.CRCreateFileWithLabel(c.bundlePath(), "dump.log", c.MountLabel()); err != nil {
return err
}
@ -1095,11 +1033,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
// Save network.status. This is needed to restore the container with
// the same IP. Currently limited to one IP address in a container
// with one interface.
formatJSON, err := json.MarshalIndent(c.state.NetworkStatus, "", " ")
if err != nil {
return err
}
if err := ioutil.WriteFile(filepath.Join(c.bundlePath(), "network.status"), formatJSON, 0644); err != nil {
if _, err := metadata.WriteJSONFile(c.state.NetworkStatus, c.bundlePath(), metadata.NetworkStatusFile); err != nil {
return err
}
@ -1115,7 +1049,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
}
if options.TargetFile != "" {
if err = c.exportCheckpoint(options); err != nil {
if err := c.exportCheckpoint(options); err != nil {
return err
}
}
@ -1135,8 +1069,8 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
cleanup := []string{
"dump.log",
"stats-dump",
"config.dump",
"spec.dump",
metadata.ConfigDumpFile,
metadata.SpecDumpFile,
}
for _, del := range cleanup {
file := filepath.Join(c.bundlePath(), del)
@ -1151,28 +1085,13 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
}
func (c *Container) importCheckpoint(input string) error {
archiveFile, err := os.Open(input)
if err != nil {
return errors.Wrap(err, "failed to open checkpoint archive for import")
}
defer archiveFile.Close()
options := &archive.TarOptions{
ExcludePatterns: []string{
// config.dump and spec.dump are only required
// container creation
"config.dump",
"spec.dump",
},
}
err = archive.Untar(archiveFile, c.bundlePath(), options)
if err != nil {
return errors.Wrapf(err, "unpacking of checkpoint archive %s failed", input)
if err := crutils.CRImportCheckpointWithoutConfig(c.bundlePath(), input); err != nil {
return err
}
// Make sure the newly created config.json exists on disk
g := generate.Generator{Config: c.config.Spec}
if err = c.saveSpec(g.Config); err != nil {
if err := c.saveSpec(g.Config); err != nil {
return errors.Wrap(err, "saving imported container specification for restore failed")
}
@ -1221,7 +1140,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
return errors.Wrapf(err, "a complete checkpoint for this container cannot be found, cannot restore")
}
if err := c.checkpointRestoreLabelLog("restore.log"); err != nil {
if err := crutils.CRCreateFileWithLabel(c.bundlePath(), "restore.log", c.MountLabel()); err != nil {
return err
}
@ -1244,7 +1163,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// Read network configuration from checkpoint
// Currently only one interface with one IP is supported.
networkStatusFile, err := os.Open(filepath.Join(c.bundlePath(), "network.status"))
networkStatus, _, err := metadata.ReadContainerCheckpointNetworkStatus(c.bundlePath())
// If the restored container should get a new name, the IP address of
// the container will not be restored. This assumes that if a new name is
// specified, the container is restored multiple times.
@ -1254,43 +1173,14 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if err == nil && options.Name == "" && (!options.IgnoreStaticIP || !options.IgnoreStaticMAC) {
// The file with the network.status does exist. Let's restore the
// container with the same IP address / MAC address as during checkpointing.
defer networkStatusFile.Close()
var networkStatus []*cnitypes.Result
networkJSON, err := ioutil.ReadAll(networkStatusFile)
if err != nil {
return err
}
if err := json.Unmarshal(networkJSON, &networkStatus); err != nil {
return err
}
if !options.IgnoreStaticIP {
// Take the first IP address
var IP net.IP
if len(networkStatus) > 0 {
if len(networkStatus[0].IPs) > 0 {
IP = networkStatus[0].IPs[0].Address.IP
}
}
if IP != nil {
if IP := metadata.GetIPFromNetworkStatus(networkStatus); IP != nil {
// Tell CNI which IP address we want.
c.requestedIP = IP
}
}
if !options.IgnoreStaticMAC {
// Take the first device with a defined sandbox.
var MAC net.HardwareAddr
if len(networkStatus) > 0 {
for _, n := range networkStatus[0].Interfaces {
if n.Sandbox != "" {
MAC, err = net.ParseMAC(n.Mac)
if err != nil {
return err
}
break
}
}
}
if MAC != nil {
if MAC := metadata.GetMACFromNetworkStatus(networkStatus); MAC != nil {
// Tell CNI which MAC address we want.
c.requestedMAC = MAC
}
@ -1398,36 +1288,12 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// Before actually restarting the container, apply the root file-system changes
if !options.IgnoreRootfs {
rootfsDiffPath := filepath.Join(c.bundlePath(), "rootfs-diff.tar")
if _, err := os.Stat(rootfsDiffPath); err == nil {
// Only do this if a rootfs-diff.tar actually exists
rootfsDiffFile, err := os.Open(rootfsDiffPath)
if err != nil {
return errors.Wrap(err, "failed to open root file-system diff file")
}
defer rootfsDiffFile.Close()
if err := c.runtime.ApplyDiffTarStream(c.ID(), rootfsDiffFile); err != nil {
return errors.Wrapf(err, "failed to apply root file-system diff file %s", rootfsDiffPath)
}
if err := crutils.CRApplyRootFsDiffTar(c.bundlePath(), c.state.Mountpoint); err != nil {
return err
}
deletedFilesPath := filepath.Join(c.bundlePath(), "deleted.files")
if _, err := os.Stat(deletedFilesPath); err == nil {
var deletedFiles []string
deletedFilesJSON, err := ioutil.ReadFile(deletedFilesPath)
if err != nil {
return errors.Wrapf(err, "failed to read deleted files file")
}
if err := json.Unmarshal(deletedFilesJSON, &deletedFiles); err != nil {
return errors.Wrapf(err, "failed to unmarshal deleted files file %s", deletedFilesPath)
}
for _, deleteFile := range deletedFiles {
// Using RemoveAll as deletedFiles, which is generated from 'podman diff'
// lists completely deleted directories as a single entry: 'D /root'.
err = os.RemoveAll(filepath.Join(c.state.Mountpoint, deleteFile))
if err != nil {
return errors.Wrapf(err, "failed to delete files from container %s during restore", c.ID())
}
}
if err := crutils.CRRemoveDeletedFiles(c.ID(), c.bundlePath(), c.state.Mountpoint); err != nil {
return err
}
}
@ -1452,7 +1318,15 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if err != nil {
logrus.Debugf("Non-fatal: removal of pre-checkpoint directory (%s) failed: %v", c.PreCheckPointPath(), err)
}
cleanup := [...]string{"restore.log", "dump.log", "stats-dump", "stats-restore", "network.status", "rootfs-diff.tar", "deleted.files"}
cleanup := [...]string{
"restore.log",
"dump.log",
"stats-dump",
"stats-restore",
metadata.NetworkStatusFile,
metadata.RootFsDiffTar,
metadata.DeletedFilesFile,
}
for _, del := range cleanup {
file := filepath.Join(c.bundlePath(), del)
err = os.Remove(file)

View File

@ -28,6 +28,7 @@ import (
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/libpod/logs"
"github.com/containers/podman/v3/pkg/cgroups"
"github.com/containers/podman/v3/pkg/checkpoint/crutils"
"github.com/containers/podman/v3/pkg/errorhandling"
"github.com/containers/podman/v3/pkg/lookup"
"github.com/containers/podman/v3/pkg/rootless"
@ -837,16 +838,7 @@ func (r *ConmonOCIRuntime) CheckConmonRunning(ctr *Container) (bool, error) {
// SupportsCheckpoint checks if the OCI runtime supports checkpointing
// containers.
func (r *ConmonOCIRuntime) SupportsCheckpoint() bool {
// Check if the runtime implements checkpointing. Currently only
// runc's checkpoint/restore implementation is supported.
cmd := exec.Command(r.path, "checkpoint", "--help")
if err := cmd.Start(); err != nil {
return false
}
if err := cmd.Wait(); err == nil {
return true
}
return false
return crutils.CRRuntimeSupportsCheckpointRestore(r.path)
}
// SupportsJSONErrors checks if the OCI runtime supports JSON-formatted error

View File

@ -4,15 +4,14 @@ import (
"context"
"io/ioutil"
"os"
"path/filepath"
metadata "github.com/checkpoint-restore/checkpointctl/lib"
"github.com/containers/podman/v3/libpod"
"github.com/containers/podman/v3/libpod/image"
"github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/errorhandling"
"github.com/containers/podman/v3/pkg/util"
"github.com/containers/storage/pkg/archive"
jsoniter "github.com/json-iterator/go"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -20,21 +19,6 @@ import (
// Prefixing the checkpoint/restore related functions with 'cr'
// crImportFromJSON imports the JSON files stored in the exported
// checkpoint tarball
func crImportFromJSON(filePath string, v interface{}) error {
content, err := ioutil.ReadFile(filePath)
if err != nil {
return errors.Wrap(err, "failed to read container definition for restore")
}
json := jsoniter.ConfigCompatibleWithStandardLibrary
if err = json.Unmarshal(content, v); err != nil {
return errors.Wrapf(err, "failed to unmarshal container definition %s for restore", filePath)
}
return nil
}
// CRImportCheckpoint it the function which imports the information
// from checkpoint tarball and re-creates the container from that information
func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOptions entities.RestoreOptions) ([]*libpod.Container, error) {
@ -48,13 +32,13 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
options := &archive.TarOptions{
// Here we only need the files config.dump and spec.dump
ExcludePatterns: []string{
"checkpoint",
"artifacts",
"ctr.log",
"rootfs-diff.tar",
"network.status",
"deleted.files",
"volumes",
"ctr.log",
"artifacts",
metadata.RootFsDiffTar,
metadata.DeletedFilesFile,
metadata.NetworkStatusFile,
metadata.CheckpointDirectory,
},
}
dir, err := ioutil.TempDir("", "checkpoint")
@ -73,13 +57,13 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
// Load spec.dump from temporary directory
dumpSpec := new(spec.Spec)
if err := crImportFromJSON(filepath.Join(dir, "spec.dump"), dumpSpec); err != nil {
if _, err := metadata.ReadJSONFile(dumpSpec, dir, metadata.SpecDumpFile); err != nil {
return nil, err
}
// Load config.dump from temporary directory
config := new(libpod.ContainerConfig)
if err = crImportFromJSON(filepath.Join(dir, "config.dump"), config); err != nil {
if _, err = metadata.ReadJSONFile(config, dir, metadata.ConfigDumpFile); err != nil {
return nil, err
}

View File

@ -0,0 +1,191 @@
package crutils
import (
"io"
"os"
"os/exec"
"path/filepath"
metadata "github.com/checkpoint-restore/checkpointctl/lib"
"github.com/containers/storage/pkg/archive"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
// This file mainly exist to make the checkpoint/restore functions
// available for other users. One possible candidate would be CRI-O.
// CRImportCheckpointWithoutConfig imports the checkpoint archive (input)
// into the directory destination without "config.dump" and "spec.dump"
func CRImportCheckpointWithoutConfig(destination, input string) error {
archiveFile, err := os.Open(input)
if err != nil {
return errors.Wrapf(err, "Failed to open checkpoint archive %s for import", input)
}
defer archiveFile.Close()
options := &archive.TarOptions{
ExcludePatterns: []string{
// Import everything else besides the container config
metadata.ConfigDumpFile,
metadata.SpecDumpFile,
},
}
if err = archive.Untar(archiveFile, destination, options); err != nil {
return errors.Wrapf(err, "Unpacking of checkpoint archive %s failed", input)
}
return nil
}
// CRRemoveDeletedFiles loads the list of deleted files and if
// it exists deletes all files listed.
func CRRemoveDeletedFiles(id, baseDirectory, containerRootDirectory string) error {
deletedFiles, _, err := metadata.ReadContainerCheckpointDeletedFiles(baseDirectory)
if os.IsNotExist(errors.Unwrap(errors.Unwrap(err))) {
// No files to delete. Just return
return nil
}
if err != nil {
return errors.Wrapf(err, "failed to read deleted files file")
}
for _, deleteFile := range deletedFiles {
// Using RemoveAll as deletedFiles, which is generated from 'podman diff'
// lists completely deleted directories as a single entry: 'D /root'.
if err := os.RemoveAll(filepath.Join(containerRootDirectory, deleteFile)); err != nil {
return errors.Wrapf(err, "failed to delete files from container %s during restore", id)
}
}
return nil
}
// CRApplyRootFsDiffTar applies the tar archive found in baseDirectory with the
// root file system changes on top of containerRootDirectory
func CRApplyRootFsDiffTar(baseDirectory, containerRootDirectory string) error {
rootfsDiffPath := filepath.Join(baseDirectory, metadata.RootFsDiffTar)
if _, err := os.Stat(rootfsDiffPath); err != nil {
// Only do this if a rootfs-diff.tar actually exists
return nil
}
rootfsDiffFile, err := os.Open(rootfsDiffPath)
if err != nil {
return errors.Wrap(err, "failed to open root file-system diff file")
}
defer rootfsDiffFile.Close()
if err := archive.Untar(rootfsDiffFile, containerRootDirectory, nil); err != nil {
return errors.Wrapf(err, "failed to apply root file-system diff file %s", rootfsDiffPath)
}
return nil
}
// CRCreateRootFsDiffTar goes through the 'changes' and can create two files:
// * metadata.RootFsDiffTar will contain all new and changed files
// * metadata.DeletedFilesFile will contain a list of deleted files
// With these two files it is possible to restore the container file system to the same
// state it was during checkpointing.
// Changes to directories (owner, mode) are not handled.
func CRCreateRootFsDiffTar(changes *[]archive.Change, mountPoint, destination string) (includeFiles []string, err error) {
if len(*changes) == 0 {
return includeFiles, nil
}
var rootfsIncludeFiles []string
var deletedFiles []string
rootfsDiffPath := filepath.Join(destination, metadata.RootFsDiffTar)
for _, file := range *changes {
if file.Kind == archive.ChangeAdd {
rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path)
continue
}
if file.Kind == archive.ChangeDelete {
deletedFiles = append(deletedFiles, file.Path)
continue
}
fileName, err := os.Stat(file.Path)
if err != nil {
continue
}
if !fileName.IsDir() && file.Kind == archive.ChangeModify {
rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path)
continue
}
}
if len(rootfsIncludeFiles) > 0 {
rootfsTar, err := archive.TarWithOptions(mountPoint, &archive.TarOptions{
Compression: archive.Uncompressed,
IncludeSourceDir: true,
IncludeFiles: rootfsIncludeFiles,
})
if err != nil {
return includeFiles, errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath)
}
rootfsDiffFile, err := os.Create(rootfsDiffPath)
if err != nil {
return includeFiles, errors.Wrapf(err, "error creating root file-system diff file %q", rootfsDiffPath)
}
defer rootfsDiffFile.Close()
if _, err = io.Copy(rootfsDiffFile, rootfsTar); err != nil {
return includeFiles, err
}
includeFiles = append(includeFiles, metadata.RootFsDiffTar)
}
if len(deletedFiles) == 0 {
return includeFiles, nil
}
if _, err := metadata.WriteJSONFile(deletedFiles, destination, metadata.DeletedFilesFile); err != nil {
return includeFiles, nil
}
includeFiles = append(includeFiles, metadata.DeletedFilesFile)
return includeFiles, nil
}
// CRCreateFileWithLabel creates an empty file and sets the corresponding ('fileLabel')
// SELinux label on the file.
// This is necessary for CRIU log files because CRIU infects the processes in
// the container with a 'parasite' and this will also try to write to the log files
// from the context of the container processes.
func CRCreateFileWithLabel(directory, fileName, fileLabel string) error {
logFileName := filepath.Join(directory, fileName)
logFile, err := os.OpenFile(logFileName, os.O_CREATE, 0o600)
if err != nil {
return errors.Wrapf(err, "failed to create file %q", logFileName)
}
defer logFile.Close()
if err = label.SetFileLabel(logFileName, fileLabel); err != nil {
return errors.Wrapf(err, "failed to label file %q", logFileName)
}
return nil
}
// CRRuntimeSupportsCheckpointRestore tests if the given runtime at 'runtimePath'
// supports checkpointing. The checkpoint restore interface has no definition
// but crun implements all commands just as runc does. Whathh runc does it the
// official definition of the checkpoint/restore interface.
func CRRuntimeSupportsCheckpointRestore(runtimePath string) bool {
// Check if the runtime implements checkpointing. Currently only
// runc's and crun's checkpoint/restore implementation is supported.
cmd := exec.Command(runtimePath, "checkpoint", "--help")
if err := cmd.Start(); err != nil {
return false
}
if err := cmd.Wait(); err == nil {
return true
}
return false
}

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,221 @@
// SPDX-License-Identifier: Apache-2.0
package metadata
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"time"
cnitypes "github.com/containernetworking/cni/pkg/types/current"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
type CheckpointedPod struct {
PodUID string `json:"io.kubernetes.pod.uid,omitempty"`
ID string `json:"SandboxID,omitempty"`
Name string `json:"io.kubernetes.pod.name,omitempty"`
TerminationGracePeriod int64 `json:"io.kubernetes.pod.terminationGracePeriod,omitempty"`
Namespace string `json:"io.kubernetes.pod.namespace,omitempty"`
ConfigSource string `json:"kubernetes.io/config.source,omitempty"`
ConfigSeen string `json:"kubernetes.io/config.seen,omitempty"`
Manager string `json:"io.container.manager,omitempty"`
Containers []CheckpointedContainer `json:"Containers"`
HostIP string `json:"hostIP,omitempty"`
PodIP string `json:"podIP,omitempty"`
PodIPs []string `json:"podIPs,omitempty"`
}
type CheckpointedContainer struct {
Name string `json:"io.kubernetes.container.name,omitempty"`
ID string `json:"id,omitempty"`
TerminationMessagePath string `json:"io.kubernetes.container.terminationMessagePath,omitempty"`
TerminationMessagePolicy string `json:"io.kubernetes.container.terminationMessagePolicy,omitempty"`
RestartCounter int32 `json:"io.kubernetes.container.restartCount,omitempty"`
TerminationMessagePathUID string `json:"terminationMessagePathUID,omitempty"`
Image string `json:"Image"`
}
type CheckpointMetadata struct {
Version int `json:"version"`
CheckpointedPods []CheckpointedPod
}
const (
// kubelet archive
CheckpointedPodsFile = "checkpointed.pods"
// container archive
ConfigDumpFile = "config.dump"
SpecDumpFile = "spec.dump"
NetworkStatusFile = "network.status"
CheckpointDirectory = "checkpoint"
RootFsDiffTar = "rootfs-diff.tar"
DeletedFilesFile = "deleted.files"
// pod archive
PodOptionsFile = "pod.options"
PodDumpFile = "pod.dump"
)
type CheckpointType int
const (
// The checkpoint archive contains a kubelet checkpoint
// One or multiple pods and kubelet metadata (checkpointed.pods)
Kubelet CheckpointType = iota
// The checkpoint archive contains one pod including one or multiple containers
Pod
// The checkpoint archive contains a single container
Container
Unknown
)
// This is a reduced copy of what Podman uses to store checkpoint metadata
type ContainerConfig struct {
ID string `json:"id"`
Name string `json:"name"`
RootfsImageName string `json:"rootfsImageName,omitempty"`
OCIRuntime string `json:"runtime,omitempty"`
CreatedTime time.Time `json:"createdTime"`
}
// This is metadata stored inside of a Pod checkpoint archive
type CheckpointedPodOptions struct {
Version int `json:"version"`
Containers []string `json:"containers,omitempty"`
MountLabel string `json:"mountLabel"`
ProcessLabel string `json:"processLabel"`
}
func DetectCheckpointArchiveType(checkpointDirectory string) (CheckpointType, error) {
_, err := os.Stat(filepath.Join(checkpointDirectory, CheckpointedPodsFile))
if err != nil && !os.IsNotExist(err) {
return Unknown, errors.Wrapf(err, "Failed to access %q\n", CheckpointedPodsFile)
}
if os.IsNotExist(err) {
return Container, nil
}
return Kubelet, nil
}
func ReadContainerCheckpointSpecDump(checkpointDirectory string) (*spec.Spec, string, error) {
var specDump spec.Spec
specDumpFile, err := ReadJSONFile(&specDump, checkpointDirectory, SpecDumpFile)
return &specDump, specDumpFile, err
}
func ReadContainerCheckpointConfigDump(checkpointDirectory string) (*ContainerConfig, string, error) {
var containerConfig ContainerConfig
configDumpFile, err := ReadJSONFile(&containerConfig, checkpointDirectory, ConfigDumpFile)
return &containerConfig, configDumpFile, err
}
func ReadContainerCheckpointDeletedFiles(checkpointDirectory string) ([]string, string, error) {
var deletedFiles []string
deletedFilesFile, err := ReadJSONFile(&deletedFiles, checkpointDirectory, DeletedFilesFile)
return deletedFiles, deletedFilesFile, err
}
func ReadContainerCheckpointNetworkStatus(checkpointDirectory string) ([]*cnitypes.Result, string, error) {
var networkStatus []*cnitypes.Result
networkStatusFile, err := ReadJSONFile(&networkStatus, checkpointDirectory, NetworkStatusFile)
return networkStatus, networkStatusFile, err
}
func ReadKubeletCheckpoints(checkpointsDirectory string) (*CheckpointMetadata, string, error) {
var checkpointMetadata CheckpointMetadata
checkpointMetadataPath, err := ReadJSONFile(&checkpointMetadata, checkpointsDirectory, CheckpointedPodsFile)
return &checkpointMetadata, checkpointMetadataPath, err
}
func GetIPFromNetworkStatus(networkStatus []*cnitypes.Result) net.IP {
if len(networkStatus) == 0 {
return nil
}
// Take the first IP address
if len(networkStatus[0].IPs) == 0 {
return nil
}
IP := networkStatus[0].IPs[0].Address.IP
return IP
}
func GetMACFromNetworkStatus(networkStatus []*cnitypes.Result) net.HardwareAddr {
if len(networkStatus) == 0 {
return nil
}
// Take the first device with a defined sandbox
if len(networkStatus[0].Interfaces) == 0 {
return nil
}
var MAC net.HardwareAddr
MAC = nil
for _, n := range networkStatus[0].Interfaces {
if n.Sandbox != "" {
MAC, _ = net.ParseMAC(n.Mac)
break
}
}
return MAC
}
// WriteJSONFile marshalls and writes the given data to a JSON file
func WriteJSONFile(v interface{}, dir, file string) (string, error) {
fileJSON, err := json.MarshalIndent(v, "", " ")
if err != nil {
return "", errors.Wrapf(err, "Error marshalling JSON")
}
file = filepath.Join(dir, file)
if err := ioutil.WriteFile(file, fileJSON, 0o600); err != nil {
return "", errors.Wrapf(err, "Error writing to %q", file)
}
return file, nil
}
func ReadJSONFile(v interface{}, dir, file string) (string, error) {
file = filepath.Join(dir, file)
content, err := ioutil.ReadFile(file)
if err != nil {
return "", errors.Wrapf(err, "failed to read %s", file)
}
if err = json.Unmarshal(content, v); err != nil {
return "", errors.Wrapf(err, "failed to unmarshal %s", file)
}
return file, nil
}
func WriteKubeletCheckpointsMetadata(checkpointMetadata *CheckpointMetadata, dir string) error {
_, err := WriteJSONFile(checkpointMetadata, dir, CheckpointedPodsFile)
return err
}
func ByteToString(b int64) string {
const unit = 1024
if b < unit {
return fmt.Sprintf("%d B", b)
}
div, exp := int64(unit), 0
for n := b / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %ciB",
float64(b)/float64(div), "KMGTPE"[exp])
}

2
vendor/modules.txt vendored
View File

@ -40,6 +40,8 @@ github.com/beorn7/perks/quantile
github.com/blang/semver
# github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37
github.com/buger/goterm
# github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7
github.com/checkpoint-restore/checkpointctl/lib
# github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b
github.com/checkpoint-restore/go-criu
github.com/checkpoint-restore/go-criu/rpc