Merge pull request #16371 from alexlarsson/transient-store

Support transient store mode
This commit is contained in:
OpenShift Merge Robot
2022-12-06 09:39:19 -05:00
committed by GitHub
20 changed files with 291 additions and 45 deletions

View File

@ -464,6 +464,8 @@ func rootFlags(cmd *cobra.Command, podmanConfig *entities.PodmanConfig) {
pFlags.StringVar(&podmanConfig.Runroot, runrootFlagName, "", "Path to the 'run directory' where all state information is stored")
_ = cmd.RegisterFlagCompletionFunc(runrootFlagName, completion.AutocompleteDefault)
pFlags.BoolVar(&podmanConfig.TransientStore, "transient-store", false, "Enable transient container storage")
runtimeFlagName := "runtime"
pFlags.StringVar(&podmanConfig.RuntimePath, runtimeFlagName, podmanConfig.ContainersConfDefaultsRO.Engine.OCIRuntime, "Path to the OCI-compatible binary used to run containers.")
_ = cmd.RegisterFlagCompletionFunc(runtimeFlagName, completion.AutocompleteDefault)

View File

@ -47,6 +47,7 @@ func init() {
flags := pruneCommand.Flags()
flags.BoolVarP(&force, "force", "f", false, "Do not prompt for confirmation. The default is false")
flags.BoolVarP(&pruneOptions.All, "all", "a", false, "Remove all unused data")
flags.BoolVar(&pruneOptions.External, "external", false, "Remove container data in storage not controlled by podman")
flags.BoolVar(&pruneOptions.Volume, "volumes", false, "Prune volumes")
filterFlagName := "filter"
flags.StringArrayVar(&filters, filterFlagName, []string{}, "Provide filter values (e.g. 'label=<key>=<value>')")
@ -55,8 +56,8 @@ func init() {
func prune(cmd *cobra.Command, args []string) error {
var err error
// Prompt for confirmation if --force is not set
if !force {
// Prompt for confirmation if --force is not set, unless --external
if !force && !pruneOptions.External {
reader := bufio.NewReader(os.Stdin)
volumeString := ""
if pruneOptions.Volume {
@ -113,7 +114,9 @@ func prune(cmd *cobra.Command, args []string) error {
return err
}
fmt.Printf("Total reclaimed space: %s\n", units.HumanSize((float64)(response.ReclaimedSpace)))
if !pruneOptions.External {
fmt.Printf("Total reclaimed space: %s\n", units.HumanSize((float64)(response.ReclaimedSpace)))
}
return nil
}

View File

@ -146,6 +146,7 @@ store:
imageStore:
number: 5
runRoot: /run/user/3267/containers
transientStore: false
volumePath: /home/dwalsh/.local/share/containers/storage/volumes
version:
APIVersion: 4.0.0
@ -263,7 +264,8 @@ $ podman info --format json
"number": 5
},
"runRoot": "/run/user/3267/containers",
"volumePath": "/home/dwalsh/.local/share/containers/storage/volumes"
"volumePath": "/home/dwalsh/.local/share/containers/storage/volumes",
"transientStore": false
},
"registries": {
"search": [

View File

@ -18,6 +18,15 @@ By default, volumes are not removed to prevent important data from being deleted
Recursively remove all unused pods, containers, images, networks, and volume data. (Maximum 50 iterations.)
#### **--external**
Removes all leftover container storage files from local storage that are not managed by podman. In normal circumstances no such data should exist, but in case of an unclean shutdown the podman database may be corrupted and cause his.
However, when using transient storage mode, the podman database does not persist. This means containers can will leave the writable layers on disk after a reboot. If you use transient store
it it recommended that you run **podman system prune --external** once some time after each boot.
This option is incompatible with **--all** and **--filter** and drops the default behaviour of removing unused resources.
#### **--filter**=*filters*
Provide filter values.

View File

@ -110,14 +110,14 @@ environment variable is set, the **--remote** option defaults to true.
#### **--root**=*value*
Storage root dir in which data, including images, is stored (default: "/var/lib/containers/storage" for UID 0, "$HOME/.local/share/containers/storage" for other users).
Default root dir configured in `/etc/containers/storage.conf`.
Default root dir configured in `containers-storage.conf(5)`.
Overriding this option will cause the *storage-opt* settings in /etc/containers/storage.conf to be ignored. The user must specify additional options via the `--storage-opt` flag.
Overriding this option will cause the *storage-opt* settings in `containers-storage.conf(5)` to be ignored. The user must specify additional options via the `--storage-opt` flag.
#### **--runroot**=*value*
Storage state directory where all state information is stored (default: "/run/containers/storage" for UID 0, "/run/user/$UID/run" for other users).
Default state dir configured in `/etc/containers/storage.conf`.
Default state dir configured in `containers-storage.conf(5)`.
#### **--runtime**=*value*
@ -141,14 +141,14 @@ to use the installed ssh binary and config file declared in containers.conf.
#### **--storage-driver**=*value*
Storage driver. The default storage driver for UID 0 is configured in /etc/containers/storage.conf (`$HOME/.config/containers/storage.conf` in rootless mode), and is *vfs* for non-root users when *fuse-overlayfs* is not available. The `STORAGE_DRIVER` environment variable overrides the default. The --storage-driver specified driver overrides all.
Storage driver. The default storage driver for UID 0 is configured in `containers-storage.conf(5)` in rootless mode), and is *vfs* for non-root users when *fuse-overlayfs* is not available. The `STORAGE_DRIVER` environment variable overrides the default. The --storage-driver specified driver overrides all.
Overriding this option will cause the *storage-opt* settings in /etc/containers/storage.conf to be ignored. The user must
Overriding this option will cause the *storage-opt* settings in `containers-storage.conf(5)` to be ignored. The user must
specify additional options via the `--storage-opt` flag.
#### **--storage-opt**=*value*
Storage driver option, Default storage driver options are configured in /etc/containers/storage.conf (`$HOME/.config/containers/storage.conf` in rootless mode). The `STORAGE_OPTS` environment variable overrides the default. The --storage-opt specified options overrides all. If you specify --storage-opt="", no storage options will be used.
Storage driver option, Default storage driver options are configured in `containers-storage.conf(5)`. The `STORAGE_OPTS` environment variable overrides the default. The --storage-opt specified options overrides all. If you specify --storage-opt="", no storage options will be used.
#### **--syslog**
@ -162,6 +162,14 @@ Path to the tmp directory, for libpod runtime content. Defaults to `$XDG_RUNTIME
NOTE --tmpdir is not used for the temporary storage of downloaded images. Use the environment variable `TMPDIR` to change the temporary storage location of downloaded container images. Podman defaults to use `/var/tmp`.
#### **--transient-store**
Enables a global transient storaga mode where all container metadata is stored on non-persistant media (i.e. in the location specified by `--runroot`).
This mode allows starting containers faster, as well as guaranteeing a fresh state on boot in case of unclean shutdowns or other problems. However
it is not compabible with a traditional model where containers persist across reboots.
Default value for this is configured in `containers-storage.conf(5)`.
#### **--url**=*value*
URL to access Podman service (default from `containers.conf`, rootless `unix://run/user/$UID/podman/podman.sock` or as root `unix://run/podman/podman.sock`).
Setting this option will switch the **--remote** option to true.
@ -426,7 +434,7 @@ Currently slirp4netns or pasta is required to be installed to create a network
device, otherwise rootless containers need to run in the network namespace of
the host.
In certain environments like HPC (High Performance Computing), users cannot take advantage of the additional UIDs and GIDs from the /etc/subuid and /etc/subgid systems. However, in this environment, rootless Podman can operate with a single UID. To make this work, set the `ignore_chown_errors` option in the /etc/containers/storage.conf or in ~/.config/containers/storage.conf files. This option tells Podman when pulling an image to ignore chown errors when attempting to change a file in a container image to match the non-root UID in the image. This means all files get saved as the user's UID. Note this could cause issues when running the container.
In certain environments like HPC (High Performance Computing), users cannot take advantage of the additional UIDs and GIDs from the /etc/subuid and /etc/subgid systems. However, in this environment, rootless Podman can operate with a single UID. To make this work, set the `ignore_chown_errors` option in the `containers-storage.conf(5)` file. This option tells Podman when pulling an image to ignore chown errors when attempting to change a file in a container image to match the non-root UID in the image. This means all files get saved as the user's UID. Note this could cause issues when running the container.
### **NOTE:** Unsupported file systems in rootless mode

View File

@ -118,6 +118,7 @@ type StoreInfo struct {
ImageStore ImageStore `json:"imageStore"`
RunRoot string `json:"runRoot"`
VolumePath string `json:"volumePath"`
TransientStore bool `json:"transientStore"`
}
// ImageStore describes the image store. Right now only the number

View File

@ -226,6 +226,7 @@ func (r *Runtime) storeInfo() (*define.StoreInfo, error) {
GraphOptions: nil,
VolumePath: r.config.Engine.VolumePath,
ConfigFile: configFile,
TransientStore: r.store.TransientStore(),
}
graphOptions := map[string]interface{}{}

View File

@ -109,6 +109,18 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption {
}
}
func WithTransientStore(transientStore bool) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
return define.ErrRuntimeFinalized
}
rt.storageConfig.TransientStore = transientStore
return nil
}
}
// WithSignaturePolicy specifies the path of a file which decides how trust is
// managed for images we've pulled.
// If this is not specified, the system default configuration will be used

View File

@ -323,6 +323,11 @@ func makeRuntime(runtime *Runtime) (retErr error) {
}
}
// Create the TmpDir if needed
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0751); err != nil {
return fmt.Errorf("creating runtime temporary files directory: %w", err)
}
// Set up the state.
//
// TODO - if we further break out the state implementation into
@ -335,7 +340,11 @@ func makeRuntime(runtime *Runtime) (retErr error) {
case config.SQLiteStateStore:
return fmt.Errorf("SQLite state is currently disabled: %w", define.ErrInvalidArg)
case config.BoltDBStateStore:
dbPath := filepath.Join(runtime.config.Engine.StaticDir, "bolt_state.db")
baseDir := runtime.config.Engine.StaticDir
if runtime.storageConfig.TransientStore {
baseDir = runtime.config.Engine.TmpDir
}
dbPath := filepath.Join(baseDir, "bolt_state.db")
state, err := NewBoltState(dbPath, runtime)
if err != nil {
@ -392,6 +401,7 @@ func makeRuntime(runtime *Runtime) (retErr error) {
logrus.Debugf("Using static dir %s", runtime.config.Engine.StaticDir)
logrus.Debugf("Using tmp dir %s", runtime.config.Engine.TmpDir)
logrus.Debugf("Using volume path %s", runtime.config.Engine.VolumePath)
logrus.Debugf("Using transient store: %v", runtime.storageConfig.TransientStore)
// Validate our config against the database, now that we've set our
// final storage configuration
@ -459,14 +469,6 @@ func makeRuntime(runtime *Runtime) (retErr error) {
}
runtime.imageContext.SignaturePolicyPath = runtime.config.Engine.SignaturePolicyPath
// Create the tmpDir
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0751); err != nil {
// The directory is allowed to exist
if !errors.Is(err, os.ErrExist) {
return fmt.Errorf("creating tmpdir: %w", err)
}
}
// Get us at least one working OCI runtime.
runtime.ociRuntimes = make(map[string]OCIRuntime)
@ -517,14 +519,6 @@ func makeRuntime(runtime *Runtime) (retErr error) {
return fmt.Errorf("no default OCI runtime was configured: %w", define.ErrInvalidArg)
}
// Make the per-boot files directory if it does not exist
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0755); err != nil {
// The directory is allowed to exist
if !errors.Is(err, os.ErrExist) {
return fmt.Errorf("creating runtime temporary files directory: %w", err)
}
}
// the store is only set up when we are in the userns so we do the same for the network interface
if !needsUserns {
netBackend, netInterface, err := network.NetworkBackend(runtime.store, runtime.config, runtime.syslog)
@ -966,6 +960,10 @@ func (r *Runtime) StorageConfig() storage.StoreOptions {
return r.storageConfig
}
func (r *Runtime) GarbageCollect() error {
return r.store.GarbageCollect()
}
// RunRoot retrieves the current c/storage temporary directory in use by Libpod.
func (r *Runtime) RunRoot() string {
if r.store == nil {

View File

@ -19,8 +19,9 @@ func SystemPrune(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
query := struct {
All bool `schema:"all"`
Volumes bool `schema:"volumes"`
All bool `schema:"all"`
Volumes bool `schema:"volumes"`
External bool `schema:"external"`
}{}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
@ -38,9 +39,10 @@ func SystemPrune(w http.ResponseWriter, r *http.Request) {
containerEngine := abi.ContainerEngine{Libpod: runtime}
pruneOptions := entities.SystemPruneOptions{
All: query.All,
Volume: query.Volumes,
Filters: *filterMap,
All: query.All,
Volume: query.Volumes,
Filters: *filterMap,
External: query.External,
}
report, err := containerEngine.SystemPrune(r.Context(), pruneOptions)
if err != nil {

View File

@ -14,9 +14,10 @@ type EventsOptions struct {
//
//go:generate go run ../generator/generator.go PruneOptions
type PruneOptions struct {
All *bool
Filters map[string][]string
Volumes *bool
All *bool
Filters map[string][]string
Volumes *bool
External *bool
}
// VersionOptions are optional options for getting version info

View File

@ -61,3 +61,18 @@ func (o *PruneOptions) GetVolumes() bool {
}
return *o.Volumes
}
// WithExternal set field External to given value
func (o *PruneOptions) WithExternal(value bool) *PruneOptions {
o.External = &value
return o
}
// GetExternal returns value of field External
func (o *PruneOptions) GetExternal() bool {
if o.External == nil {
var z bool
return z
}
return *o.External
}

View File

@ -50,9 +50,10 @@ type PodmanConfig struct {
Trace bool // Hidden: Trace execution
URI string // URI to RESTful API Service
Runroot string
StorageDriver string
StorageOpts []string
SSHMode string
MachineMode bool
Runroot string
StorageDriver string
StorageOpts []string
SSHMode string
MachineMode bool
TransientStore bool
}

View File

@ -18,9 +18,10 @@ type ServiceOptions struct {
// SystemPruneOptions provides options to prune system.
type SystemPruneOptions struct {
All bool
Volume bool
Filters map[string][]string `json:"filters" schema:"filters"`
All bool
Volume bool
Filters map[string][]string `json:"filters" schema:"filters"`
External bool
}
// SystemPruneReport provides report after system prune is executed.

View File

@ -160,6 +160,18 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool)
// SystemPrune removes unused data from the system. Pruning pods, containers, networks, volumes and images.
func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.SystemPruneOptions) (*entities.SystemPruneReport, error) {
var systemPruneReport = new(entities.SystemPruneReport)
if options.External {
if options.All || options.Volume || len(options.Filters) > 0 {
return nil, fmt.Errorf("system prune --external cannot be combined with other options")
}
err := ic.Libpod.GarbageCollect()
if err != nil {
return nil, err
}
return systemPruneReport, nil
}
filters := []string{}
for k, v := range options.Filters {
filters = append(filters, fmt.Sprintf("%s=%s", k, v[0]))

View File

@ -171,6 +171,9 @@ func getRuntime(ctx context.Context, fs *flag.FlagSet, opts *engineOpts) (*libpo
storageOpts.GraphDriverOptions = cfg.StorageOpts
}
}
if fs.Changed("transient-store") {
options = append(options, libpod.WithTransientStore(cfg.TransientStore))
}
if opts.migrate {
options = append(options, libpod.WithMigrate())
if opts.name != "" {

View File

@ -19,7 +19,7 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool)
// SystemPrune prunes unused data from the system.
func (ic *ContainerEngine) SystemPrune(ctx context.Context, opts entities.SystemPruneOptions) (*entities.SystemPruneReport, error) {
options := new(system.PruneOptions).WithAll(opts.All).WithVolumes(opts.Volume).WithFilters(opts.Filters)
options := new(system.PruneOptions).WithAll(opts.All).WithVolumes(opts.Volume).WithFilters(opts.Filters).WithExternal(opts.External)
return system.Prune(ic.ClientCtx, options)
}

View File

@ -282,6 +282,7 @@ func CreateExitCommandArgs(storageConfig storageTypes.StoreOptions, config *conf
"--network-config-dir", config.Network.NetworkConfigDir,
"--network-backend", config.Network.NetworkBackend,
"--volumepath", config.Engine.VolumePath,
fmt.Sprintf("--transient-store=%t", storageConfig.TransientStore),
}
if config.Engine.OCIRuntime != "" {
command = append(command, []string{"--runtime", config.Engine.OCIRuntime}...)

View File

@ -3,6 +3,7 @@ package integration
import (
"fmt"
"os"
"path/filepath"
. "github.com/containers/podman/v4/test/utils"
. "github.com/onsi/ginkgo"
@ -522,4 +523,87 @@ var _ = Describe("Podman prune", func() {
podmanTest.Cleanup()
})
It("podman system prune --all --external fails", func() {
prune := podmanTest.Podman([]string{"system", "prune", "--all", "--enternal"})
prune.WaitWithDefaultTimeout()
Expect(prune).Should(Exit(125))
})
It("podman system prune --external leaves referenced containers", func() {
containerStorageDir := filepath.Join(podmanTest.Root, podmanTest.ImageCacheFS+"-containers")
create := podmanTest.Podman([]string{"create", "--name", "test", BB})
create.WaitWithDefaultTimeout()
Expect(create).Should(Exit(0))
// Container should exist
Expect(podmanTest.NumberOfContainers()).To(Equal(1))
// have: containers.json, containers.lock and container dir
dirents, err := os.ReadDir(containerStorageDir)
Expect(err).To(BeNil())
Expect(dirents).To(HaveLen(3))
prune := podmanTest.Podman([]string{"system", "prune", "--external", "-f"})
prune.WaitWithDefaultTimeout()
Expect(prune).Should(Exit(0))
// Container should still exist
Expect(podmanTest.NumberOfContainers()).To(Equal(1))
// still have: containers.json, containers.lock and container dir
dirents, err = os.ReadDir(containerStorageDir)
Expect(err).To(BeNil())
Expect(dirents).To(HaveLen(3))
})
It("podman system prune --external removes unreferenced containers", func() {
SkipIfRemote("Can't drop database while daemon running")
containerStorageDir := filepath.Join(podmanTest.Root, podmanTest.ImageCacheFS+"-containers")
dbDir := filepath.Join(podmanTest.Root, "libpod")
// Create container 1
create := podmanTest.Podman([]string{"create", "--name", "test", BB})
create.WaitWithDefaultTimeout()
Expect(create).Should(Exit(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(1))
// containers.json, containers.lock and container 1 dir
dirents, err := os.ReadDir(containerStorageDir)
Expect(err).To(BeNil())
Expect(dirents).To(HaveLen(3))
// Drop podman database and storage, losing track of container 1 (but directory remains)
err = os.Remove(filepath.Join(containerStorageDir, "containers.json"))
Expect(err).To(BeNil())
err = os.RemoveAll(dbDir)
Expect(err).To(BeNil())
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
// Create container 2
create = podmanTest.Podman([]string{"create", "--name", "test", BB})
create.WaitWithDefaultTimeout()
Expect(create).Should(Exit(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(1))
// containers.json, containers.lock and container 1&2 dir
dirents, err = os.ReadDir(containerStorageDir)
Expect(err).To(BeNil())
Expect(dirents).To(HaveLen(4))
prune := podmanTest.Podman([]string{"system", "prune", "--external", "-f"})
prune.WaitWithDefaultTimeout()
Expect(prune).Should(Exit(0))
// container 1 dir should be gone now
dirents, err = os.ReadDir(containerStorageDir)
Expect(err).To(BeNil())
Expect(dirents).To(HaveLen(3))
})
})

View File

@ -0,0 +1,90 @@
package integration
import (
"os"
"path/filepath"
. "github.com/containers/podman/v4/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
)
var _ = Describe("Podman run with volumes", func() {
var (
tempdir string
err error
podmanTest *PodmanTestIntegration
containerStorageDir string
dbDir string
runContainerStorageDir string
runDBDir string
)
BeforeEach(func() {
tempdir, err = CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
containerStorageDir = filepath.Join(podmanTest.Root, podmanTest.ImageCacheFS+"-containers")
dbDir = filepath.Join(podmanTest.Root, "libpod")
runContainerStorageDir = filepath.Join(podmanTest.RunRoot, podmanTest.ImageCacheFS+"-containers")
runDBDir = tempdir
})
AfterEach(func() {
podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
})
It("podman run with no transient-store", func() {
session := podmanTest.Podman([]string{"run", ALPINE, "true"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
_ = SystemExec("ls", []string{"-l", containerStorageDir})
// All files should be in permament store, not volatile
Expect(filepath.Join(containerStorageDir, "containers.json")).Should(BeARegularFile())
Expect(filepath.Join(containerStorageDir, "volatile-containers.json")).Should(Not(BeAnExistingFile()))
Expect(filepath.Join(runContainerStorageDir, "containers.json")).Should(Not(BeAnExistingFile()))
Expect(filepath.Join(runContainerStorageDir, "volatile-containers.json")).Should(Not(BeAnExistingFile()))
Expect(filepath.Join(dbDir, "bolt_state.db")).Should(BeARegularFile())
Expect(filepath.Join(runDBDir, "bolt_state.db")).Should(Not(BeAnExistingFile()))
})
It("podman run --rm with no transient-store", func() {
session := podmanTest.Podman([]string{"run", "--rm", ALPINE, "true"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
// All files should be in permament store, volatile
Expect(filepath.Join(containerStorageDir, "containers.json")).Should(Not(BeAnExistingFile()))
Expect(filepath.Join(containerStorageDir, "volatile-containers.json")).Should(BeARegularFile())
Expect(filepath.Join(runContainerStorageDir, "containers.json")).Should(Not(BeAnExistingFile()))
Expect(filepath.Join(runContainerStorageDir, "volatile-containers.json")).Should(Not(BeAnExistingFile()))
Expect(filepath.Join(dbDir, "bolt_state.db")).Should(BeARegularFile())
Expect(filepath.Join(runDBDir, "bolt_state.db")).Should(Not(BeAnExistingFile()))
})
It("podman run --transient-store", func() {
SkipIfRemote("Can't change store options remotely")
session := podmanTest.Podman([]string{"run", "--transient-store", ALPINE, "true"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
// All files should be in runroot store, volatile
Expect(filepath.Join(containerStorageDir, "containers.json")).Should(Not(BeAnExistingFile()))
Expect(filepath.Join(containerStorageDir, "volatile-containers.json")).Should(Not(BeAnExistingFile()))
Expect(filepath.Join(runContainerStorageDir, "containers.json")).Should(Not(BeAnExistingFile()))
Expect(filepath.Join(runContainerStorageDir, "volatile-containers.json")).Should(BeARegularFile())
Expect(filepath.Join(dbDir, "bolt_state.db")).Should(Not(BeAnExistingFile()))
Expect(filepath.Join(runDBDir, "bolt_state.db")).Should(BeARegularFile())
})
})