vendor buildah, image, storage, cni

Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
This commit is contained in:
Valentin Rothberg
2019-03-28 10:30:09 +01:00
parent e7a2eecf5f
commit a5443a532b
79 changed files with 1875 additions and 1110 deletions

View File

@ -7,11 +7,11 @@ import (
"strconv" "strconv"
"strings" "strings"
"github.com/containers/buildah/pkg/chrootuser"
"github.com/containers/buildah/util" "github.com/containers/buildah/util"
"github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime" "github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/chrootuser"
"github.com/containers/libpod/pkg/rootless" "github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage" "github.com/containers/storage"
"github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/archive"

View File

@ -6,6 +6,7 @@ import (
"strings" "strings"
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage" "github.com/containers/storage"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -95,22 +96,26 @@ func checkRuntimeConfig(db *bolt.DB, rt *Runtime) error {
return err return err
} }
storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID())
if err != nil {
return err
}
if err := validateDBAgainstConfig(configBkt, "storage temporary directory (runroot)", if err := validateDBAgainstConfig(configBkt, "storage temporary directory (runroot)",
rt.config.StorageConfig.RunRoot, runRootKey, rt.config.StorageConfig.RunRoot, runRootKey,
storage.DefaultStoreOptions.RunRoot); err != nil { storeOpts.RunRoot); err != nil {
return err return err
} }
if err := validateDBAgainstConfig(configBkt, "storage graph root directory (graphroot)", if err := validateDBAgainstConfig(configBkt, "storage graph root directory (graphroot)",
rt.config.StorageConfig.GraphRoot, graphRootKey, rt.config.StorageConfig.GraphRoot, graphRootKey,
storage.DefaultStoreOptions.GraphRoot); err != nil { storeOpts.GraphRoot); err != nil {
return err return err
} }
if err := validateDBAgainstConfig(configBkt, "storage graph driver", if err := validateDBAgainstConfig(configBkt, "storage graph driver",
rt.config.StorageConfig.GraphDriverName, rt.config.StorageConfig.GraphDriverName,
graphDriverKey, graphDriverKey,
storage.DefaultStoreOptions.GraphDriverName); err != nil { storeOpts.GraphDriverName); err != nil {
return err return err
} }

View File

@ -820,7 +820,7 @@ func (c *Container) makeBindMounts() error {
} }
// Add Secret Mounts // Add Secret Mounts
secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.DefaultMountsFile, c.state.DestinationRunDir, c.RootUID(), c.RootGID()) secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.DefaultMountsFile, c.state.DestinationRunDir, c.RootUID(), c.RootGID(), rootless.IsRootless())
for _, mount := range secretMounts { for _, mount := range secretMounts {
if _, ok := c.state.BindMounts[mount.Destination]; !ok { if _, ok := c.state.BindMounts[mount.Destination]; !ok {
c.state.BindMounts[mount.Destination] = mount.Source c.state.BindMounts[mount.Destination] = mount.Source

View File

@ -13,8 +13,8 @@ import (
"github.com/containers/buildah" "github.com/containers/buildah"
"github.com/containers/libpod/pkg/rootless" "github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/util"
"github.com/containers/libpod/utils" "github.com/containers/libpod/utils"
"github.com/containers/storage"
"github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/system"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -116,12 +116,17 @@ func (r *Runtime) hostInfo() (map[string]interface{}, error) {
func (r *Runtime) storeInfo() (map[string]interface{}, error) { func (r *Runtime) storeInfo() (map[string]interface{}, error) {
// lets say storage driver in use, number of images, number of containers // lets say storage driver in use, number of images, number of containers
info := map[string]interface{}{} info := map[string]interface{}{}
info["ConfigFile"] = util.StorageConfigFile()
info["GraphRoot"] = r.store.GraphRoot() info["GraphRoot"] = r.store.GraphRoot()
info["RunRoot"] = r.store.RunRoot() info["RunRoot"] = r.store.RunRoot()
info["GraphDriverName"] = r.store.GraphDriverName() info["GraphDriverName"] = r.store.GraphDriverName()
info["GraphOptions"] = r.store.GraphOptions() info["GraphOptions"] = r.store.GraphOptions()
info["VolumePath"] = r.config.VolumePath info["VolumePath"] = r.config.VolumePath
configFile, err := storage.DefaultConfigFile(rootless.IsRootless())
if err != nil {
return nil, err
}
info["ConfigFile"] = configFile
statusPairs, err := r.store.Status() statusPairs, err := r.store.Status()
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -9,6 +9,7 @@ import (
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/libpod/pkg/namespaces" "github.com/containers/libpod/pkg/namespaces"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage" "github.com/containers/storage"
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/cri-o/ocicni/pkg/ocicni" "github.com/cri-o/ocicni/pkg/ocicni"
@ -82,11 +83,15 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption {
// or graphdriveroptions are set, then GraphRoot and RunRoot // or graphdriveroptions are set, then GraphRoot and RunRoot
// must be set // must be set
if setField { if setField {
storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID())
if err != nil {
return err
}
if rt.config.StorageConfig.GraphRoot == "" { if rt.config.StorageConfig.GraphRoot == "" {
rt.config.StorageConfig.GraphRoot = storage.DefaultStoreOptions.GraphRoot rt.config.StorageConfig.GraphRoot = storeOpts.GraphRoot
} }
if rt.config.StorageConfig.RunRoot == "" { if rt.config.StorageConfig.RunRoot == "" {
rt.config.StorageConfig.RunRoot = storage.DefaultStoreOptions.RunRoot rt.config.StorageConfig.RunRoot = storeOpts.RunRoot
} }
} }

View File

@ -248,11 +248,16 @@ type runtimeConfiguredFrom struct {
noPivotRoot bool noPivotRoot bool
} }
var ( func defaultRuntimeConfig() (RuntimeConfig, error) {
defaultRuntimeConfig = RuntimeConfig{ storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID())
if err != nil {
return RuntimeConfig{}, err
}
return RuntimeConfig{
// Leave this empty so containers/storage will use its defaults // Leave this empty so containers/storage will use its defaults
StorageConfig: storage.StoreOptions{}, StorageConfig: storage.StoreOptions{},
VolumePath: filepath.Join(storage.DefaultStoreOptions.GraphRoot, "volumes"), VolumePath: filepath.Join(storeOpts.GraphRoot, "volumes"),
ImageDefaultTransport: DefaultTransport, ImageDefaultTransport: DefaultTransport,
StateType: BoltDBStateStore, StateType: BoltDBStateStore,
OCIRuntime: "runc", OCIRuntime: "runc",
@ -281,7 +286,7 @@ var (
}, },
InitPath: DefaultInitPath, InitPath: DefaultInitPath,
CgroupManager: SystemdCgroupsManager, CgroupManager: SystemdCgroupsManager,
StaticDir: filepath.Join(storage.DefaultStoreOptions.GraphRoot, "libpod"), StaticDir: filepath.Join(storeOpts.GraphRoot, "libpod"),
TmpDir: "", TmpDir: "",
MaxLogSize: -1, MaxLogSize: -1,
NoPivotRoot: false, NoPivotRoot: false,
@ -292,8 +297,8 @@ var (
EnablePortReservation: true, EnablePortReservation: true,
EnableLabeling: true, EnableLabeling: true,
NumLocks: 2048, NumLocks: 2048,
} }, nil
) }
func getDefaultTmpDir() (string, error) { func getDefaultTmpDir() (string, error) {
if !rootless.IsRootless() { if !rootless.IsRootless() {
@ -354,12 +359,17 @@ func newRuntimeFromConfig(userConfigPath string, options ...RuntimeOption) (runt
if err != nil { if err != nil {
return nil, err return nil, err
} }
if err := JSONDeepCopy(defaultRuntimeConfig, runtime.config); err != nil {
defRunConf, err := defaultRuntimeConfig()
if err != nil {
return nil, err
}
if err := JSONDeepCopy(defRunConf, runtime.config); err != nil {
return nil, errors.Wrapf(err, "error copying runtime default config") return nil, errors.Wrapf(err, "error copying runtime default config")
} }
runtime.config.TmpDir = tmpDir runtime.config.TmpDir = tmpDir
storageConf, err := util.GetDefaultStoreOptions() storageConf, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID())
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error retrieving storage config") return nil, errors.Wrapf(err, "error retrieving storage config")
} }
@ -508,7 +518,10 @@ func newRuntimeFromConfig(userConfigPath string, options ...RuntimeOption) (runt
} }
if rootlessConfigPath != "" { if rootlessConfigPath != "" {
// storage.conf // storage.conf
storageConfFile := util.StorageConfigFile() storageConfFile, err := storage.DefaultConfigFile(rootless.IsRootless())
if err != nil {
return nil, err
}
if _, err := os.Stat(storageConfFile); os.IsNotExist(err) { if _, err := os.Stat(storageConfFile); os.IsNotExist(err) {
if err := util.WriteStorageConfigFile(&runtime.config.StorageConfig, storageConfFile); err != nil { if err := util.WriteStorageConfigFile(&runtime.config.StorageConfig, storageConfFile); err != nil {
return nil, errors.Wrapf(err, "cannot write config file %s", storageConfFile) return nil, errors.Wrapf(err, "cannot write config file %s", storageConfFile)

View File

@ -3,7 +3,6 @@ package util
import ( import (
"fmt" "fmt"
"os" "os"
"os/exec"
"path/filepath" "path/filepath"
"strings" "strings"
"syscall" "syscall"
@ -241,25 +240,6 @@ func GetRootlessDirInfo() (string, string, error) {
return dataDir, rootlessRuntime, nil return dataDir, rootlessRuntime, nil
} }
// GetRootlessStorageOpts returns the storage opts for containers running as non root
func GetRootlessStorageOpts() (storage.StoreOptions, error) {
var opts storage.StoreOptions
dataDir, rootlessRuntime, err := GetRootlessDirInfo()
if err != nil {
return opts, err
}
opts.RunRoot = rootlessRuntime
opts.GraphRoot = filepath.Join(dataDir, "containers", "storage")
if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
opts.GraphDriverName = "overlay"
opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)}
} else {
opts.GraphDriverName = "vfs"
}
return opts, nil
}
type tomlOptionsConfig struct { type tomlOptionsConfig struct {
MountProgram string `toml:"mount_program"` MountProgram string `toml:"mount_program"`
} }
@ -289,42 +269,6 @@ func getTomlStorage(storeOptions *storage.StoreOptions) *tomlConfig {
return config return config
} }
// GetDefaultStoreOptions returns the default storage ops for containers
func GetDefaultStoreOptions() (storage.StoreOptions, error) {
var (
defaultRootlessRunRoot string
defaultRootlessGraphRoot string
err error
)
storageOpts := storage.DefaultStoreOptions
if rootless.IsRootless() {
storageOpts, err = GetRootlessStorageOpts()
if err != nil {
return storageOpts, err
}
}
storageConf := StorageConfigFile()
if _, err = os.Stat(storageConf); err == nil {
defaultRootlessRunRoot = storageOpts.RunRoot
defaultRootlessGraphRoot = storageOpts.GraphRoot
storageOpts = storage.StoreOptions{}
storage.ReloadConfigurationFile(storageConf, &storageOpts)
}
if rootless.IsRootless() && err == nil {
// If the file did not specify a graphroot or runroot,
// set sane defaults so we don't try and use root-owned
// directories
if storageOpts.RunRoot == "" {
storageOpts.RunRoot = defaultRootlessRunRoot
}
if storageOpts.GraphRoot == "" {
storageOpts.GraphRoot = defaultRootlessGraphRoot
}
}
return storageOpts, nil
}
// WriteStorageConfigFile writes the configuration to a file // WriteStorageConfigFile writes the configuration to a file
func WriteStorageConfigFile(storageOpts *storage.StoreOptions, storageConf string) error { func WriteStorageConfigFile(storageOpts *storage.StoreOptions, storageConf string) error {
os.MkdirAll(filepath.Dir(storageConf), 0755) os.MkdirAll(filepath.Dir(storageConf), 0755)
@ -342,14 +286,6 @@ func WriteStorageConfigFile(storageOpts *storage.StoreOptions, storageConf strin
return nil return nil
} }
// StorageConfigFile returns the path to the storage config file used
func StorageConfigFile() string {
if rootless.IsRootless() {
return filepath.Join(os.Getenv("HOME"), ".config/containers/storage.conf")
}
return storage.DefaultConfigFile
}
// ParseInputTime takes the users input and to determine if it is valid and // ParseInputTime takes the users input and to determine if it is valid and
// returns a time format and error. The input is compared to known time formats // returns a time format and error. The input is compared to known time formats
// or a duration which implies no-duration // or a duration which implies no-duration

View File

@ -13,16 +13,16 @@ github.com/buger/goterm c206103e1f37c0c6c5c039706305ea2aa6e8ad3b
github.com/checkpoint-restore/go-criu v3.11 github.com/checkpoint-restore/go-criu v3.11
github.com/containerd/cgroups 39b18af02c4120960f517a3a4c2588fabb61d02c github.com/containerd/cgroups 39b18af02c4120960f517a3a4c2588fabb61d02c
github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
github.com/containernetworking/cni v0.7.0-alpha1 github.com/containernetworking/cni v0.7.0-rc2
github.com/containernetworking/plugins v0.7.4 github.com/containernetworking/plugins v0.7.4
github.com/containers/image v1.5 github.com/containers/image v1.5.1
github.com/vbauerster/mpb v3.3.4 github.com/vbauerster/mpb v3.3.4
github.com/mattn/go-isatty v0.0.4 github.com/mattn/go-isatty v0.0.4
github.com/VividCortex/ewma v1.1.1 github.com/VividCortex/ewma v1.1.1
github.com/containers/storage v1.10 github.com/containers/storage v1.12.1
github.com/containers/psgo v1.2 github.com/containers/psgo v1.2
github.com/coreos/go-systemd v14 github.com/coreos/go-systemd v14
github.com/cri-o/ocicni 2d2983e40c242322a56c22a903785e7f83eb378c github.com/cri-o/ocicni 0c180f981b27ef6036fa5be29bcb4dd666e406eb
github.com/cyphar/filepath-securejoin v0.2.1 github.com/cyphar/filepath-securejoin v0.2.1
github.com/davecgh/go-spew v1.1.0 github.com/davecgh/go-spew v1.1.0
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
@ -93,7 +93,7 @@ k8s.io/apimachinery kubernetes-1.10.13-beta.0 https://github.com/kubernetes/apim
k8s.io/client-go kubernetes-1.10.13-beta.0 https://github.com/kubernetes/client-go k8s.io/client-go kubernetes-1.10.13-beta.0 https://github.com/kubernetes/client-go
github.com/mrunalp/fileutils 7d4729fb36185a7c1719923406c9d40e54fb93c7 github.com/mrunalp/fileutils 7d4729fb36185a7c1719923406c9d40e54fb93c7
github.com/varlink/go 3ac79db6fd6aec70924193b090962f92985fe199 github.com/varlink/go 3ac79db6fd6aec70924193b090962f92985fe199
github.com/containers/buildah 3ba8822d309128f7d76599432b8d9cdf77d4032f github.com/containers/buildah c933fe4bc608676d308ffcb276b7d8561a18e94d
# TODO: Gotty has not been updated since 2012. Can we find replacement? # TODO: Gotty has not been updated since 2012. Can we find replacement?
github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512 github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512
github.com/fsouza/go-dockerclient v1.3.0 github.com/fsouza/go-dockerclient v1.3.0

View File

@ -9,9 +9,9 @@
# Community Sync Meeting # Community Sync Meeting
There is a community sync meeting for users and developers every 1-2 months. The next meeting will help on a Google Hangout and the link is in the [agenda](https://docs.google.com/document/d/10ECyT2mBGewsJUcmYmS8QNo1AcNgy2ZIe2xS7lShYhE/edit?usp=sharing) (Notes from previous meeting are also in this doc). There is a community sync meeting for users and developers every 1-2 months. The next meeting will help on a Google Hangout and the link is in the [agenda](https://docs.google.com/document/d/10ECyT2mBGewsJUcmYmS8QNo1AcNgy2ZIe2xS7lShYhE/edit?usp=sharing) (Notes from previous meeting are also in this doc).
The next meeting will be held on *Wednesday, October 4th* at *3:00pm UTC / 11:00am EDT / 8:00am PDT* [Add to Calendar](https://www.worldtimebuddy.com/?qm=1&lid=100,5,2643743,5391959&h=100&date=2017-10-04&sln=15-16). The next meeting will be held on *Wednesday, January 30th, 2019* at *4:00pm UTC / 11:00am EDT / 8:00am PDT* [Add to Calendar](https://www.worldtimebuddy.com/?qm=1&lid=100,5,2643743,5391959&h=100&date=2019-01-30&sln=16-17).
--- ---
@ -38,11 +38,13 @@ To avoid duplication, we think it is prudent to define a common interface betwee
## Who is using CNI? ## Who is using CNI?
### Container runtimes ### Container runtimes
- [rkt - container engine](https://coreos.com/blog/rkt-cni-networking.html) - [rkt - container engine](https://coreos.com/blog/rkt-cni-networking.html)
- [Kubernetes - a system to simplify container operations](http://kubernetes.io/docs/admin/network-plugins/) - [Kubernetes - a system to simplify container operations](https://kubernetes.io/docs/admin/network-plugins/)
- [OpenShift - Kubernetes with additional enterprise features](https://github.com/openshift/origin/blob/master/docs/openshift_networking_requirements.md) - [OpenShift - Kubernetes with additional enterprise features](https://github.com/openshift/origin/blob/master/docs/openshift_networking_requirements.md)
- [Cloud Foundry - a platform for cloud applications](https://github.com/cloudfoundry-incubator/cf-networking-release) - [Cloud Foundry - a platform for cloud applications](https://github.com/cloudfoundry-incubator/cf-networking-release)
- [Apache Mesos - a distributed systems kernel](https://github.com/apache/mesos/blob/master/docs/cni.md) - [Apache Mesos - a distributed systems kernel](https://github.com/apache/mesos/blob/master/docs/cni.md)
- [Amazon ECS - a highly scalable, high performance container management service](https://aws.amazon.com/ecs/) - [Amazon ECS - a highly scalable, high performance container management service](https://aws.amazon.com/ecs/)
- [Singularity - container platform optimized for HPC, EPC, and AI](https://github.com/sylabs/singularity)
- [OpenSVC - orchestrator for legacy and containerized application stacks](https://docs.opensvc.com/latest/fr/agent.configure.cni.html)
### 3rd party plugins ### 3rd party plugins
- [Project Calico - a layer 3 virtual network](https://github.com/projectcalico/calico-cni) - [Project Calico - a layer 3 virtual network](https://github.com/projectcalico/calico-cni)
@ -61,6 +63,10 @@ To avoid duplication, we think it is prudent to define a common interface betwee
- [Amazon ECS CNI Plugins - a collection of CNI Plugins to configure containers with Amazon EC2 elastic network interfaces (ENIs)](https://github.com/aws/amazon-ecs-cni-plugins) - [Amazon ECS CNI Plugins - a collection of CNI Plugins to configure containers with Amazon EC2 elastic network interfaces (ENIs)](https://github.com/aws/amazon-ecs-cni-plugins)
- [Bonding CNI - a Link aggregating plugin to address failover and high availability network](https://github.com/Intel-Corp/bond-cni) - [Bonding CNI - a Link aggregating plugin to address failover and high availability network](https://github.com/Intel-Corp/bond-cni)
- [ovn-kubernetes - an container network plugin built on Open vSwitch (OVS) and Open Virtual Networking (OVN) with support for both Linux and Windows](https://github.com/openvswitch/ovn-kubernetes) - [ovn-kubernetes - an container network plugin built on Open vSwitch (OVS) and Open Virtual Networking (OVN) with support for both Linux and Windows](https://github.com/openvswitch/ovn-kubernetes)
- [Juniper Contrail](https://www.juniper.net/cloud) / [TungstenFabric](https://tungstenfabric.io) - Provides overlay SDN solution, delivering multicloud networking, hybrid cloud networking, simultaneous overlay-underlay support, network policy enforcement, network isolation, service chaining and flexible load balancing
- [Knitter - a CNI plugin supporting multiple networking for Kubernetes](https://github.com/ZTE/Knitter)
- [DANM - a CNI-compliant networking solution for TelCo workloads running on Kubernetes](https://github.com/nokia/danm)
- [VMware NSX a CNI plugin that enables automated NSX L2/L3 networking and L4/L7 Load Balancing; network isolation at the pod, node, and cluster level; and zero-trust security policy for your Kubernetes cluster.](https://docs.vmware.com/en/VMware-NSX-T/2.2/com.vmware.nsxt.ncp_kubernetes.doc/GUID-6AFA724E-BB62-4693-B95C-321E8DDEA7E1.html)
The CNI team also maintains some [core plugins in a separate repository](https://github.com/containernetworking/plugins). The CNI team also maintains some [core plugins in a separate repository](https://github.com/containernetworking/plugins).
@ -74,7 +80,7 @@ If you intend to contribute to code or documentation, please read [CONTRIBUTING.
### Requirements ### Requirements
The CNI spec is language agnostic. To use the Go language libraries in this repository, you'll need a recent version of Go. Our [automated tests](https://travis-ci.org/containernetworking/cni/builds) cover Go versions 1.7 and 1.8. The CNI spec is language agnostic. To use the Go language libraries in this repository, you'll need a recent version of Go. You can find the Go versions covered by our [automated tests](https://travis-ci.org/containernetworking/cni/builds) in [.travis.yaml](.travis.yml).
### Reference Plugins ### Reference Plugins
@ -111,6 +117,7 @@ EOF
$ cat >/etc/cni/net.d/99-loopback.conf <<EOF $ cat >/etc/cni/net.d/99-loopback.conf <<EOF
{ {
"cniVersion": "0.2.0", "cniVersion": "0.2.0",
"name": "lo",
"type": "loopback" "type": "loopback"
} }
EOF EOF
@ -122,7 +129,7 @@ Next, build the plugins:
```bash ```bash
$ cd $GOPATH/src/github.com/containernetworking/plugins $ cd $GOPATH/src/github.com/containernetworking/plugins
$ ./build.sh $ ./build_linux.sh # or build_windows.sh
``` ```
Finally, execute a command (`ifconfig` in this example) in a private network namespace that has joined the `mynet` network: Finally, execute a command (`ifconfig` in this example) in a private network namespace that has joined the `mynet` network:

View File

@ -15,6 +15,7 @@
package libcni package libcni
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@ -57,20 +58,25 @@ type NetworkConfig struct {
} }
type NetworkConfigList struct { type NetworkConfigList struct {
Name string Name string
CNIVersion string CNIVersion string
Plugins []*NetworkConfig DisableCheck bool
Bytes []byte Plugins []*NetworkConfig
Bytes []byte
} }
type CNI interface { type CNI interface {
AddNetworkList(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error)
GetNetworkList(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error
DelNetworkList(net *NetworkConfigList, rt *RuntimeConf) error DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error
AddNetwork(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
GetNetwork(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error
DelNetwork(net *NetworkConfig, rt *RuntimeConf) error DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error
GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error)
ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error)
} }
type CNIConfig struct { type CNIConfig struct {
@ -120,7 +126,7 @@ func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult typ
// These capabilities arguments are filtered through the plugin's advertised // These capabilities arguments are filtered through the plugin's advertised
// capabilities from its config JSON, and any keys in the CapabilityArgs // capabilities from its config JSON, and any keys in the CapabilityArgs
// matching plugin capabilities are added to the "runtimeConfig" dictionary // matching plugin capabilities are added to the "runtimeConfig" dictionary
// sent to the plugin via JSON on stdin. For exmaple, if the plugin's // sent to the plugin via JSON on stdin. For example, if the plugin's
// capabilities include "portMappings", and the CapabilityArgs map includes a // capabilities include "portMappings", and the CapabilityArgs map includes a
// "portMappings" key, that key and its value are added to the "runtimeConfig" // "portMappings" key, that key and its value are added to the "runtimeConfig"
// dictionary to be passed to the plugin's stdin. // dictionary to be passed to the plugin's stdin.
@ -158,40 +164,12 @@ func (c *CNIConfig) ensureExec() invoke.Exec {
return c.exec return c.exec
} }
func (c *CNIConfig) addOrGetNetwork(command, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) {
c.ensureExec()
pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
if err != nil {
return nil, err
}
newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt)
if err != nil {
return nil, err
}
return invoke.ExecPluginWithResult(pluginPath, newConf.Bytes, c.args(command, rt), c.exec)
}
// Note that only GET requests should pass an initial prevResult
func (c *CNIConfig) addOrGetNetworkList(command string, prevResult types.Result, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) {
var err error
for _, net := range list.Plugins {
prevResult, err = c.addOrGetNetwork(command, list.Name, list.CNIVersion, net, prevResult, rt)
if err != nil {
return nil, err
}
}
return prevResult, nil
}
func getResultCacheFilePath(netName string, rt *RuntimeConf) string { func getResultCacheFilePath(netName string, rt *RuntimeConf) string {
cacheDir := rt.CacheDir cacheDir := rt.CacheDir
if cacheDir == "" { if cacheDir == "" {
cacheDir = CacheDir cacheDir = CacheDir
} }
return filepath.Join(cacheDir, "results", fmt.Sprintf("%s-%s", netName, rt.ContainerID)) return filepath.Join(cacheDir, "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName))
} }
func setCachedResult(result types.Result, netName string, rt *RuntimeConf) error { func setCachedResult(result types.Result, netName string, rt *RuntimeConf) error {
@ -243,37 +221,52 @@ func getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result,
return result, err return result, err
} }
// AddNetworkList executes a sequence of plugins with the ADD command // GetNetworkListCachedResult returns the cached Result of the previous
func (c *CNIConfig) AddNetworkList(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { // previous AddNetworkList() operation for a network list, or an error.
result, err := c.addOrGetNetworkList("ADD", nil, list, rt) func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) {
return getCachedResult(list.Name, list.CNIVersion, rt)
}
// GetNetworkCachedResult returns the cached Result of the previous
// previous AddNetwork() operation for a network, or an error.
func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) {
return getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
}
func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) {
c.ensureExec()
pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
if err != nil { if err != nil {
return nil, err return nil, err
} }
newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt)
if err != nil {
return nil, err
}
return invoke.ExecPluginWithResult(ctx, pluginPath, newConf.Bytes, c.args("ADD", rt), c.exec)
}
// AddNetworkList executes a sequence of plugins with the ADD command
func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) {
var err error
var result types.Result
for _, net := range list.Plugins {
result, err = c.addNetwork(ctx, list.Name, list.CNIVersion, net, result, rt)
if err != nil {
return nil, err
}
}
if err = setCachedResult(result, list.Name, rt); err != nil { if err = setCachedResult(result, list.Name, rt); err != nil {
return nil, fmt.Errorf("failed to set network '%s' cached result: %v", list.Name, err) return nil, fmt.Errorf("failed to set network %q cached result: %v", list.Name, err)
} }
return result, nil return result, nil
} }
// GetNetworkList executes a sequence of plugins with the GET command func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error {
func (c *CNIConfig) GetNetworkList(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) {
// GET was added in CNI spec version 0.4.0 and higher
if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil {
return nil, err
} else if !gtet {
return nil, fmt.Errorf("configuration version %q does not support the GET command", list.CNIVersion)
}
cachedResult, err := getCachedResult(list.Name, list.CNIVersion, rt)
if err != nil {
return nil, fmt.Errorf("failed to get network '%s' cached result: %v", list.Name, err)
}
return c.addOrGetNetworkList("GET", cachedResult, list, rt)
}
func (c *CNIConfig) delNetwork(name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error {
c.ensureExec() c.ensureExec()
pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
if err != nil { if err != nil {
@ -285,11 +278,53 @@ func (c *CNIConfig) delNetwork(name, cniVersion string, net *NetworkConfig, prev
return err return err
} }
return invoke.ExecPluginWithoutResult(pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec) return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("CHECK", rt), c.exec)
}
// CheckNetworkList executes a sequence of plugins with the CHECK command
func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error {
// CHECK was added in CNI spec version 0.4.0 and higher
if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil {
return err
} else if !gtet {
return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion)
}
if list.DisableCheck {
return nil
}
cachedResult, err := getCachedResult(list.Name, list.CNIVersion, rt)
if err != nil {
return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err)
}
for _, net := range list.Plugins {
if err := c.checkNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil {
return err
}
}
return nil
}
func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error {
c.ensureExec()
pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
if err != nil {
return err
}
newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt)
if err != nil {
return err
}
return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec)
} }
// DelNetworkList executes a sequence of plugins with the DEL command // DelNetworkList executes a sequence of plugins with the DEL command
func (c *CNIConfig) DelNetworkList(list *NetworkConfigList, rt *RuntimeConf) error { func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error {
var cachedResult types.Result var cachedResult types.Result
// Cached result on DEL was added in CNI spec version 0.4.0 and higher // Cached result on DEL was added in CNI spec version 0.4.0 and higher
@ -298,13 +333,13 @@ func (c *CNIConfig) DelNetworkList(list *NetworkConfigList, rt *RuntimeConf) err
} else if gtet { } else if gtet {
cachedResult, err = getCachedResult(list.Name, list.CNIVersion, rt) cachedResult, err = getCachedResult(list.Name, list.CNIVersion, rt)
if err != nil { if err != nil {
return fmt.Errorf("failed to get network '%s' cached result: %v", list.Name, err) return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err)
} }
} }
for i := len(list.Plugins) - 1; i >= 0; i-- { for i := len(list.Plugins) - 1; i >= 0; i-- {
net := list.Plugins[i] net := list.Plugins[i]
if err := c.delNetwork(list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { if err := c.delNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil {
return err return err
} }
} }
@ -314,37 +349,37 @@ func (c *CNIConfig) DelNetworkList(list *NetworkConfigList, rt *RuntimeConf) err
} }
// AddNetwork executes the plugin with the ADD command // AddNetwork executes the plugin with the ADD command
func (c *CNIConfig) AddNetwork(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) {
result, err := c.addOrGetNetwork("ADD", net.Network.Name, net.Network.CNIVersion, net, nil, rt) result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if err = setCachedResult(result, net.Network.Name, rt); err != nil { if err = setCachedResult(result, net.Network.Name, rt); err != nil {
return nil, fmt.Errorf("failed to set network '%s' cached result: %v", net.Network.Name, err) return nil, fmt.Errorf("failed to set network %q cached result: %v", net.Network.Name, err)
} }
return result, nil return result, nil
} }
// GetNetwork executes the plugin with the GET command // CheckNetwork executes the plugin with the CHECK command
func (c *CNIConfig) GetNetwork(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error {
// GET was added in CNI spec version 0.4.0 and higher // CHECK was added in CNI spec version 0.4.0 and higher
if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil {
return nil, err return err
} else if !gtet { } else if !gtet {
return nil, fmt.Errorf("configuration version %q does not support the GET command", net.Network.CNIVersion) return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion)
} }
cachedResult, err := getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) cachedResult, err := getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get network '%s' cached result: %v", net.Network.Name, err) return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err)
} }
return c.addOrGetNetwork("GET", net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt) return c.checkNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt)
} }
// DelNetwork executes the plugin with the DEL command // DelNetwork executes the plugin with the DEL command
func (c *CNIConfig) DelNetwork(net *NetworkConfig, rt *RuntimeConf) error { func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error {
var cachedResult types.Result var cachedResult types.Result
// Cached result on DEL was added in CNI spec version 0.4.0 and higher // Cached result on DEL was added in CNI spec version 0.4.0 and higher
@ -353,27 +388,99 @@ func (c *CNIConfig) DelNetwork(net *NetworkConfig, rt *RuntimeConf) error {
} else if gtet { } else if gtet {
cachedResult, err = getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) cachedResult, err = getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
if err != nil { if err != nil {
return fmt.Errorf("failed to get network '%s' cached result: %v", net.Network.Name, err) return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err)
} }
} }
if err := c.delNetwork(net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil { if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil {
return err return err
} }
_ = delCachedResult(net.Network.Name, rt) _ = delCachedResult(net.Network.Name, rt)
return nil return nil
} }
// ValidateNetworkList checks that a configuration is reasonably valid.
// - all the specified plugins exist on disk
// - every plugin supports the desired version.
//
// Returns a list of all capabilities supported by the configuration, or error
func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfigList) ([]string, error) {
version := list.CNIVersion
// holding map for seen caps (in case of duplicates)
caps := map[string]interface{}{}
errs := []error{}
for _, net := range list.Plugins {
if err := c.validatePlugin(ctx, net.Network.Type, version); err != nil {
errs = append(errs, err)
}
for c, enabled := range net.Network.Capabilities {
if !enabled {
continue
}
caps[c] = struct{}{}
}
}
if len(errs) > 0 {
return nil, fmt.Errorf("%v", errs)
}
// make caps list
cc := make([]string, 0, len(caps))
for c := range caps {
cc = append(cc, c)
}
return cc, nil
}
// ValidateNetwork checks that a configuration is reasonably valid.
// It uses the same logic as ValidateNetworkList)
// Returns a list of capabilities
func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) {
caps := []string{}
for c, ok := range net.Network.Capabilities {
if ok {
caps = append(caps, c)
}
}
if err := c.validatePlugin(ctx, net.Network.Type, net.Network.CNIVersion); err != nil {
return nil, err
}
return caps, nil
}
// validatePlugin checks that an individual plugin's configuration is sane
func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error {
pluginPath, err := invoke.FindInPath(pluginName, c.Path)
if err != nil {
return err
}
vi, err := invoke.GetVersionInfo(ctx, pluginPath, c.exec)
if err != nil {
return err
}
for _, vers := range vi.SupportedVersions() {
if vers == expectedVersion {
return nil
}
}
return fmt.Errorf("plugin %s does not support config version %q", pluginName, expectedVersion)
}
// GetVersionInfo reports which versions of the CNI spec are supported by // GetVersionInfo reports which versions of the CNI spec are supported by
// the given plugin. // the given plugin.
func (c *CNIConfig) GetVersionInfo(pluginType string) (version.PluginInfo, error) { func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) {
c.ensureExec() c.ensureExec()
pluginPath, err := c.exec.FindInPath(pluginType, c.Path) pluginPath, err := c.exec.FindInPath(pluginType, c.Path)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return invoke.GetVersionInfo(pluginPath, c.exec) return invoke.GetVersionInfo(ctx, pluginPath, c.exec)
} }
// ===== // =====

View File

@ -83,10 +83,19 @@ func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) {
} }
} }
disableCheck := false
if rawDisableCheck, ok := rawList["disableCheck"]; ok {
disableCheck, ok = rawDisableCheck.(bool)
if !ok {
return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck)
}
}
list := &NetworkConfigList{ list := &NetworkConfigList{
Name: name, Name: name,
CNIVersion: cniVersion, DisableCheck: disableCheck,
Bytes: bytes, CNIVersion: cniVersion,
Bytes: bytes,
} }
var plugins []interface{} var plugins []interface{}

View File

@ -15,6 +15,7 @@
package invoke package invoke
import ( import (
"context"
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
@ -22,54 +23,53 @@ import (
"github.com/containernetworking/cni/pkg/types" "github.com/containernetworking/cni/pkg/types"
) )
func delegateAddOrGet(command, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { func delegateCommon(expectedCommand, delegatePlugin string, exec Exec) (string, Exec, error) {
if exec == nil { if exec == nil {
exec = defaultExec exec = defaultExec
} }
if os.Getenv("CNI_COMMAND") != expectedCommand {
return "", nil, fmt.Errorf("CNI_COMMAND is not " + expectedCommand)
}
paths := filepath.SplitList(os.Getenv("CNI_PATH")) paths := filepath.SplitList(os.Getenv("CNI_PATH"))
pluginPath, err := exec.FindInPath(delegatePlugin, paths) pluginPath, err := exec.FindInPath(delegatePlugin, paths)
if err != nil { if err != nil {
return nil, err return "", nil, err
} }
return ExecPluginWithResult(pluginPath, netconf, ArgsFromEnv(), exec) return pluginPath, exec, nil
} }
// DelegateAdd calls the given delegate plugin with the CNI ADD action and // DelegateAdd calls the given delegate plugin with the CNI ADD action and
// JSON configuration // JSON configuration
func DelegateAdd(delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) {
if os.Getenv("CNI_COMMAND") != "ADD" { pluginPath, realExec, err := delegateCommon("ADD", delegatePlugin, exec)
return nil, fmt.Errorf("CNI_COMMAND is not ADD") if err != nil {
return nil, err
} }
return delegateAddOrGet("ADD", delegatePlugin, netconf, exec)
return ExecPluginWithResult(ctx, pluginPath, netconf, ArgsFromEnv(), realExec)
} }
// DelegateGet calls the given delegate plugin with the CNI GET action and // DelegateCheck calls the given delegate plugin with the CNI CHECK action and
// JSON configuration // JSON configuration
func DelegateGet(delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error {
if os.Getenv("CNI_COMMAND") != "GET" { pluginPath, realExec, err := delegateCommon("CHECK", delegatePlugin, exec)
return nil, fmt.Errorf("CNI_COMMAND is not GET")
}
return delegateAddOrGet("GET", delegatePlugin, netconf, exec)
}
// DelegateDel calls the given delegate plugin with the CNI DEL action and
// JSON configuration
func DelegateDel(delegatePlugin string, netconf []byte, exec Exec) error {
if exec == nil {
exec = defaultExec
}
if os.Getenv("CNI_COMMAND") != "DEL" {
return fmt.Errorf("CNI_COMMAND is not DEL")
}
paths := filepath.SplitList(os.Getenv("CNI_PATH"))
pluginPath, err := exec.FindInPath(delegatePlugin, paths)
if err != nil { if err != nil {
return err return err
} }
return ExecPluginWithoutResult(pluginPath, netconf, ArgsFromEnv(), exec) return ExecPluginWithoutResult(ctx, pluginPath, netconf, ArgsFromEnv(), realExec)
}
// DelegateDel calls the given delegate plugin with the CNI DEL action and
// JSON configuration
func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error {
pluginPath, realExec, err := delegateCommon("DEL", delegatePlugin, exec)
if err != nil {
return err
}
return ExecPluginWithoutResult(ctx, pluginPath, netconf, ArgsFromEnv(), realExec)
} }

View File

@ -15,6 +15,7 @@
package invoke package invoke
import ( import (
"context"
"fmt" "fmt"
"os" "os"
@ -26,7 +27,7 @@ import (
// and executing a CNI plugin. Tests may provide a fake implementation // and executing a CNI plugin. Tests may provide a fake implementation
// to avoid writing fake plugins to temporary directories during the test. // to avoid writing fake plugins to temporary directories during the test.
type Exec interface { type Exec interface {
ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error)
FindInPath(plugin string, paths []string) (string, error) FindInPath(plugin string, paths []string) (string, error)
Decode(jsonBytes []byte) (version.PluginInfo, error) Decode(jsonBytes []byte) (version.PluginInfo, error)
} }
@ -72,12 +73,12 @@ type Exec interface {
// return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths) // return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths)
//} //}
func ExecPluginWithResult(pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) { func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) {
if exec == nil { if exec == nil {
exec = defaultExec exec = defaultExec
} }
stdoutBytes, err := exec.ExecPlugin(pluginPath, netconf, args.AsEnv()) stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -92,11 +93,11 @@ func ExecPluginWithResult(pluginPath string, netconf []byte, args CNIArgs, exec
return version.NewResult(confVersion, stdoutBytes) return version.NewResult(confVersion, stdoutBytes)
} }
func ExecPluginWithoutResult(pluginPath string, netconf []byte, args CNIArgs, exec Exec) error { func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error {
if exec == nil { if exec == nil {
exec = defaultExec exec = defaultExec
} }
_, err := exec.ExecPlugin(pluginPath, netconf, args.AsEnv()) _, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv())
return err return err
} }
@ -104,7 +105,7 @@ func ExecPluginWithoutResult(pluginPath string, netconf []byte, args CNIArgs, ex
// For recent-enough plugins, it uses the information returned by the VERSION // For recent-enough plugins, it uses the information returned by the VERSION
// command. For older plugins which do not recognize that command, it reports // command. For older plugins which do not recognize that command, it reports
// version 0.1.0 // version 0.1.0
func GetVersionInfo(pluginPath string, exec Exec) (version.PluginInfo, error) { func GetVersionInfo(ctx context.Context, pluginPath string, exec Exec) (version.PluginInfo, error) {
if exec == nil { if exec == nil {
exec = defaultExec exec = defaultExec
} }
@ -117,7 +118,7 @@ func GetVersionInfo(pluginPath string, exec Exec) (version.PluginInfo, error) {
Path: "dummy", Path: "dummy",
} }
stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current())) stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current()))
stdoutBytes, err := exec.ExecPlugin(pluginPath, stdin, args.AsEnv()) stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, stdin, args.AsEnv())
if err != nil { if err != nil {
if err.Error() == "unknown CNI_COMMAND: VERSION" { if err.Error() == "unknown CNI_COMMAND: VERSION" {
return version.PluginSupports("0.1.0"), nil return version.PluginSupports("0.1.0"), nil

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// +build darwin dragonfly freebsd linux netbsd opensbd solaris // +build darwin dragonfly freebsd linux netbsd openbsd solaris
package invoke package invoke

View File

@ -16,6 +16,7 @@ package invoke
import ( import (
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@ -28,17 +29,13 @@ type RawExec struct {
Stderr io.Writer Stderr io.Writer
} }
func (e *RawExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) {
stdout := &bytes.Buffer{} stdout := &bytes.Buffer{}
c := exec.CommandContext(ctx, pluginPath)
c := exec.Cmd{ c.Env = environ
Env: environ, c.Stdin = bytes.NewBuffer(stdinData)
Path: pluginPath, c.Stdout = stdout
Args: []string{pluginPath}, c.Stderr = e.Stderr
Stdin: bytes.NewBuffer(stdinData),
Stdout: stdout,
Stderr: e.Stderr,
}
if err := c.Run(); err != nil { if err := c.Run(); err != nil {
return nil, pluginErr(err, stdout.Bytes()) return nil, pluginErr(err, stdout.Bytes())
} }

View File

@ -17,6 +17,7 @@ package types020
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"net" "net"
"os" "os"
@ -73,11 +74,15 @@ func (r *Result) GetAsVersion(version string) (types.Result, error) {
} }
func (r *Result) Print() error { func (r *Result) Print() error {
return r.PrintTo(os.Stdout)
}
func (r *Result) PrintTo(writer io.Writer) error {
data, err := json.MarshalIndent(r, "", " ") data, err := json.MarshalIndent(r, "", " ")
if err != nil { if err != nil {
return err return err
} }
_, err = os.Stdout.Write(data) _, err = writer.Write(data)
return err return err
} }

View File

@ -17,6 +17,7 @@ package current
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"net" "net"
"os" "os"
@ -75,13 +76,9 @@ func convertFrom020(result types.Result) (*Result, error) {
Gateway: oldResult.IP4.Gateway, Gateway: oldResult.IP4.Gateway,
}) })
for _, route := range oldResult.IP4.Routes { for _, route := range oldResult.IP4.Routes {
gw := route.GW
if gw == nil {
gw = oldResult.IP4.Gateway
}
newResult.Routes = append(newResult.Routes, &types.Route{ newResult.Routes = append(newResult.Routes, &types.Route{
Dst: route.Dst, Dst: route.Dst,
GW: gw, GW: route.GW,
}) })
} }
} }
@ -93,21 +90,13 @@ func convertFrom020(result types.Result) (*Result, error) {
Gateway: oldResult.IP6.Gateway, Gateway: oldResult.IP6.Gateway,
}) })
for _, route := range oldResult.IP6.Routes { for _, route := range oldResult.IP6.Routes {
gw := route.GW
if gw == nil {
gw = oldResult.IP6.Gateway
}
newResult.Routes = append(newResult.Routes, &types.Route{ newResult.Routes = append(newResult.Routes, &types.Route{
Dst: route.Dst, Dst: route.Dst,
GW: gw, GW: route.GW,
}) })
} }
} }
if len(newResult.IPs) == 0 {
return nil, fmt.Errorf("cannot convert: no valid IP addresses")
}
return newResult, nil return newResult, nil
} }
@ -206,11 +195,15 @@ func (r *Result) GetAsVersion(version string) (types.Result, error) {
} }
func (r *Result) Print() error { func (r *Result) Print() error {
return r.PrintTo(os.Stdout)
}
func (r *Result) PrintTo(writer io.Writer) error {
data, err := json.MarshalIndent(r, "", " ") data, err := json.MarshalIndent(r, "", " ")
if err != nil { if err != nil {
return err return err
} }
_, err = os.Stdout.Write(data) _, err = writer.Write(data)
return err return err
} }

View File

@ -18,6 +18,7 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"io"
"net" "net"
"os" "os"
) )
@ -65,6 +66,9 @@ type NetConf struct {
Capabilities map[string]bool `json:"capabilities,omitempty"` Capabilities map[string]bool `json:"capabilities,omitempty"`
IPAM IPAM `json:"ipam,omitempty"` IPAM IPAM `json:"ipam,omitempty"`
DNS DNS `json:"dns"` DNS DNS `json:"dns"`
RawPrevResult map[string]interface{} `json:"prevResult,omitempty"`
PrevResult Result `json:"-"`
} }
type IPAM struct { type IPAM struct {
@ -75,15 +79,16 @@ type IPAM struct {
type NetConfList struct { type NetConfList struct {
CNIVersion string `json:"cniVersion,omitempty"` CNIVersion string `json:"cniVersion,omitempty"`
Name string `json:"name,omitempty"` Name string `json:"name,omitempty"`
Plugins []*NetConf `json:"plugins,omitempty"` DisableCheck bool `json:"disableCheck,omitempty"`
Plugins []*NetConf `json:"plugins,omitempty"`
} }
type ResultFactoryFunc func([]byte) (Result, error) type ResultFactoryFunc func([]byte) (Result, error)
// Result is an interface that provides the result of plugin execution // Result is an interface that provides the result of plugin execution
type Result interface { type Result interface {
// The highest CNI specification result verison the result supports // The highest CNI specification result version the result supports
// without having to convert // without having to convert
Version() string Version() string
@ -94,6 +99,9 @@ type Result interface {
// Prints the result in JSON format to stdout // Prints the result in JSON format to stdout
Print() error Print() error
// Prints the result in JSON format to provided writer
PrintTo(writer io.Writer) error
// Returns a JSON string representation of the result // Returns a JSON string representation of the result
String() string String() string
} }

View File

@ -86,9 +86,13 @@ func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) {
// minor, and micro numbers or returns an error // minor, and micro numbers or returns an error
func ParseVersion(version string) (int, int, int, error) { func ParseVersion(version string) (int, int, int, error) {
var major, minor, micro int var major, minor, micro int
if version == "" {
return -1, -1, -1, fmt.Errorf("invalid version %q: the version is empty", version)
}
parts := strings.Split(version, ".") parts := strings.Split(version, ".")
if len(parts) == 0 || len(parts) >= 4 { if len(parts) >= 4 {
return -1, -1, -1, fmt.Errorf("invalid version %q: too many or too few parts", version) return -1, -1, -1, fmt.Errorf("invalid version %q: too many parts", version)
} }
major, err := strconv.Atoi(parts[0]) major, err := strconv.Atoi(parts[0])
@ -114,7 +118,7 @@ func ParseVersion(version string) (int, int, int, error) {
} }
// GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro // GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro
// nubmers, and compares them to determine whether the first version is greater // numbers, and compares them to determine whether the first version is greater
// than or equal to the second // than or equal to the second
func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) { func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) {
firstMajor, firstMinor, firstMicro, err := ParseVersion(version) firstMajor, firstMinor, firstMicro, err := ParseVersion(version)

View File

@ -15,6 +15,7 @@
package version package version
import ( import (
"encoding/json"
"fmt" "fmt"
"github.com/containernetworking/cni/pkg/types" "github.com/containernetworking/cni/pkg/types"
@ -59,3 +60,24 @@ func NewResult(version string, resultBytes []byte) (types.Result, error) {
return nil, fmt.Errorf("unsupported CNI result version %q", version) return nil, fmt.Errorf("unsupported CNI result version %q", version)
} }
// ParsePrevResult parses a prevResult in a NetConf structure and sets
// the NetConf's PrevResult member to the parsed Result object.
func ParsePrevResult(conf *types.NetConf) error {
if conf.RawPrevResult == nil {
return nil
}
resultBytes, err := json.Marshal(conf.RawPrevResult)
if err != nil {
return fmt.Errorf("could not serialize prevResult: %v", err)
}
conf.RawPrevResult = nil
conf.PrevResult, err = NewResult(conf.CNIVersion, resultBytes)
if err != nil {
return fmt.Errorf("could not parse prevResult: %v", err)
}
return nil
}

View File

@ -78,21 +78,21 @@ From [`./examples/lighttpd.sh`](examples/lighttpd.sh):
$ cat > lighttpd.sh <<"EOF" $ cat > lighttpd.sh <<"EOF"
#!/bin/bash -x #!/bin/bash -x
ctr1=`buildah from ${1:-fedora}` ctr1=$(buildah from "${1:-fedora}")
## Get all updates and install our minimal httpd server ## Get all updates and install our minimal httpd server
buildah run $ctr1 -- dnf update -y buildah run "$ctr1" -- dnf update -y
buildah run $ctr1 -- dnf install -y lighttpd buildah run "$ctr1" -- dnf install -y lighttpd
## Include some buildtime annotations ## Include some buildtime annotations
buildah config --annotation "com.example.build.host=$(uname -n)" $ctr1 buildah config --annotation "com.example.build.host=$(uname -n)" "$ctr1"
## Run our server and expose the port ## Run our server and expose the port
buildah config --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf" $ctr1 buildah config --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf" "$ctr1"
buildah config --port 80 $ctr1 buildah config --port 80 "$ctr1"
## Commit this container to an image name ## Commit this container to an image name
buildah commit $ctr1 ${2:-$USER/lighttpd} buildah commit "$ctr1" "${2:-$USER/lighttpd}"
EOF EOF
$ chmod +x lighttpd.sh $ chmod +x lighttpd.sh

View File

@ -11,8 +11,8 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/containers/buildah/pkg/chrootuser"
"github.com/containers/buildah/util" "github.com/containers/buildah/util"
"github.com/containers/libpod/pkg/chrootuser"
"github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-spec/specs-go"
@ -32,6 +32,10 @@ type AddAndCopyOptions struct {
// If the sources include directory trees, Hasher will be passed // If the sources include directory trees, Hasher will be passed
// tar-format archives of the directory trees. // tar-format archives of the directory trees.
Hasher io.Writer Hasher io.Writer
// Exludes contents in the .dockerignore file
Excludes []string
// current directory on host
ContextDir string
} }
// addURL copies the contents of the source URL to the destination. This is // addURL copies the contents of the source URL to the destination. This is
@ -84,6 +88,7 @@ func addURL(destination, srcurl string, owner idtools.IDPair, hasher io.Writer)
// filesystem, optionally extracting contents of local files that look like // filesystem, optionally extracting contents of local files that look like
// non-empty archives. // non-empty archives.
func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, source ...string) error { func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, source ...string) error {
excludes := DockerIgnoreHelper(options.Excludes, options.ContextDir)
mountPoint, err := b.Mount(b.MountLabel) mountPoint, err := b.Mount(b.MountLabel)
if err != nil { if err != nil {
return err return err
@ -139,78 +144,9 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
copyFileWithTar := b.copyFileWithTar(&containerOwner, options.Hasher) copyFileWithTar := b.copyFileWithTar(&containerOwner, options.Hasher)
copyWithTar := b.copyWithTar(&containerOwner, options.Hasher) copyWithTar := b.copyWithTar(&containerOwner, options.Hasher)
untarPath := b.untarPath(nil, options.Hasher) untarPath := b.untarPath(nil, options.Hasher)
for _, src := range source { err = addHelper(excludes, extract, dest, destfi, hostOwner, options, copyFileWithTar, copyWithTar, untarPath, source...)
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { if err != nil {
// We assume that source is a file, and we're copying return err
// it to the destination. If the destination is
// already a directory, create a file inside of it.
// Otherwise, the destination is the file to which
// we'll save the contents.
url, err := url.Parse(src)
if err != nil {
return errors.Wrapf(err, "error parsing URL %q", src)
}
d := dest
if destfi != nil && destfi.IsDir() {
d = filepath.Join(dest, path.Base(url.Path))
}
if err = addURL(d, src, hostOwner, options.Hasher); err != nil {
return err
}
continue
}
glob, err := filepath.Glob(src)
if err != nil {
return errors.Wrapf(err, "invalid glob %q", src)
}
if len(glob) == 0 {
return errors.Wrapf(syscall.ENOENT, "no files found matching %q", src)
}
for _, gsrc := range glob {
esrc, err := filepath.EvalSymlinks(gsrc)
if err != nil {
return errors.Wrapf(err, "error evaluating symlinks %q", gsrc)
}
srcfi, err := os.Stat(esrc)
if err != nil {
return errors.Wrapf(err, "error reading %q", esrc)
}
if srcfi.IsDir() {
// The source is a directory, so copy the contents of
// the source directory into the target directory. Try
// to create it first, so that if there's a problem,
// we'll discover why that won't work.
if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil {
return errors.Wrapf(err, "error creating directory %q", dest)
}
logrus.Debugf("copying %q to %q", esrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*")
if err = copyWithTar(esrc, dest); err != nil {
return errors.Wrapf(err, "error copying %q to %q", esrc, dest)
}
continue
}
if !extract || !archive.IsArchivePath(esrc) {
// This source is a file, and either it's not an
// archive, or we don't care whether or not it's an
// archive.
d := dest
if destfi != nil && destfi.IsDir() {
d = filepath.Join(dest, filepath.Base(gsrc))
}
// Copy the file, preserving attributes.
logrus.Debugf("copying %q to %q", esrc, d)
if err = copyFileWithTar(esrc, d); err != nil {
return errors.Wrapf(err, "error copying %q to %q", esrc, d)
}
continue
}
// We're extracting an archive into the destination directory.
logrus.Debugf("extracting contents of %q into %q", esrc, dest)
if err = untarPath(esrc, dest); err != nil {
return errors.Wrapf(err, "error extracting %q into %q", esrc, dest)
}
}
} }
return nil return nil
} }
@ -240,3 +176,160 @@ func (b *Builder) user(mountPoint string, userspec string) (specs.User, error) {
} }
return u, err return u, err
} }
// DockerIgnore struct keep info from .dockerignore
type DockerIgnore struct {
ExcludePath string
IsExcluded bool
}
// DockerIgnoreHelper returns the lines from .dockerignore file without the comments
// and reverses the order
func DockerIgnoreHelper(lines []string, contextDir string) []DockerIgnore {
var excludes []DockerIgnore
// the last match of a file in the .dockerignmatches determines whether it is included or excluded
// reverse the order
for i := len(lines) - 1; i >= 0; i-- {
exclude := lines[i]
// ignore the comment in .dockerignore
if strings.HasPrefix(exclude, "#") || len(exclude) == 0 {
continue
}
excludeFlag := true
if strings.HasPrefix(exclude, "!") {
exclude = strings.TrimPrefix(exclude, "!")
excludeFlag = false
}
excludes = append(excludes, DockerIgnore{ExcludePath: filepath.Join(contextDir, exclude), IsExcluded: excludeFlag})
}
if len(excludes) != 0 {
excludes = append(excludes, DockerIgnore{ExcludePath: filepath.Join(contextDir, ".dockerignore"), IsExcluded: true})
}
return excludes
}
func addHelper(excludes []DockerIgnore, extract bool, dest string, destfi os.FileInfo, hostOwner idtools.IDPair, options AddAndCopyOptions, copyFileWithTar, copyWithTar, untarPath func(src, dest string) error, source ...string) error {
for _, src := range source {
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
// We assume that source is a file, and we're copying
// it to the destination. If the destination is
// already a directory, create a file inside of it.
// Otherwise, the destination is the file to which
// we'll save the contents.
url, err := url.Parse(src)
if err != nil {
return errors.Wrapf(err, "error parsing URL %q", src)
}
d := dest
if destfi != nil && destfi.IsDir() {
d = filepath.Join(dest, path.Base(url.Path))
}
if err = addURL(d, src, hostOwner, options.Hasher); err != nil {
return err
}
continue
}
glob, err := filepath.Glob(src)
if err != nil {
return errors.Wrapf(err, "invalid glob %q", src)
}
if len(glob) == 0 {
return errors.Wrapf(syscall.ENOENT, "no files found matching %q", src)
}
outer:
for _, gsrc := range glob {
esrc, err := filepath.EvalSymlinks(gsrc)
if err != nil {
return errors.Wrapf(err, "error evaluating symlinks %q", gsrc)
}
srcfi, err := os.Stat(esrc)
if err != nil {
return errors.Wrapf(err, "error reading %q", esrc)
}
if srcfi.IsDir() {
// The source is a directory, so copy the contents of
// the source directory into the target directory. Try
// to create it first, so that if there's a problem,
// we'll discover why that won't work.
if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil {
return errors.Wrapf(err, "error creating directory %q", dest)
}
logrus.Debugf("copying %q to %q", esrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*")
if len(excludes) == 0 {
if err = copyWithTar(esrc, dest); err != nil {
return errors.Wrapf(err, "error copying %q to %q", esrc, dest)
}
continue
}
err := filepath.Walk(esrc, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
for _, exclude := range excludes {
match, err := filepath.Match(filepath.Clean(exclude.ExcludePath), filepath.Clean(path))
if err != nil {
return err
}
if !match {
continue
}
if exclude.IsExcluded {
return nil
}
break
}
// combine the filename with the dest directory
fpath := strings.TrimPrefix(path, options.ContextDir)
if err = copyFileWithTar(path, filepath.Join(dest, fpath)); err != nil {
return errors.Wrapf(err, "error copying %q to %q", path, dest)
}
return nil
})
if err != nil {
return err
}
continue
}
for _, exclude := range excludes {
match, err := filepath.Match(filepath.Clean(exclude.ExcludePath), esrc)
if err != nil {
return err
}
if !match {
continue
}
if exclude.IsExcluded {
continue outer
}
break
}
if !extract || !archive.IsArchivePath(esrc) {
// This source is a file, and either it's not an
// archive, or we don't care whether or not it's an
// archive.
d := dest
if destfi != nil && destfi.IsDir() {
d = filepath.Join(dest, filepath.Base(gsrc))
}
// Copy the file, preserving attributes.
logrus.Debugf("copying %q to %q", esrc, d)
if err = copyFileWithTar(esrc, d); err != nil {
return errors.Wrapf(err, "error copying %q to %q", esrc, d)
}
continue
}
// We're extracting an archive into the destination directory.
logrus.Debugf("extracting contents of %q into %q", esrc, dest)
if err = untarPath(esrc, dest); err != nil {
return errors.Wrapf(err, "error extracting %q into %q", esrc, dest)
}
}
}
return nil
}

View File

@ -336,10 +336,10 @@ type BuilderOptions struct {
// needs to be pulled and the image name alone can not be resolved to a // needs to be pulled and the image name alone can not be resolved to a
// reference to a source image. No separator is implicitly added. // reference to a source image. No separator is implicitly added.
Registry string Registry string
// PullBlobDirectory is the name of a directory in which we'll attempt // BlobDirectory is the name of a directory in which we'll attempt
// to store copies of layer blobs that we pull down, if any. It should // to store copies of layer blobs that we pull down, if any. It should
// already exist. // already exist.
PullBlobDirectory string BlobDirectory string
// Mount signals to NewBuilder() that the container should be mounted // Mount signals to NewBuilder() that the container should be mounted
// immediately. // immediately.
Mount bool Mount bool

View File

@ -114,7 +114,7 @@ type PushOptions struct {
func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options CommitOptions) (string, reference.Canonical, digest.Digest, error) { func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options CommitOptions) (string, reference.Canonical, digest.Digest, error) {
var imgID string var imgID string
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) systemContext := getSystemContext(b.store, options.SystemContext, options.SignaturePolicyPath)
blocked, err := isReferenceBlocked(dest, systemContext) blocked, err := isReferenceBlocked(dest, systemContext)
if err != nil { if err != nil {
@ -152,8 +152,8 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
if err != nil { if err != nil {
return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID) return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID)
} }
var maybeCachedSrc types.ImageReference = src var maybeCachedSrc = types.ImageReference(src)
var maybeCachedDest types.ImageReference = dest var maybeCachedDest = types.ImageReference(dest)
if options.BlobDirectory != "" { if options.BlobDirectory != "" {
compress := types.PreserveOriginal compress := types.PreserveOriginal
if options.Compression != archive.Uncompressed { if options.Compression != archive.Uncompressed {
@ -178,7 +178,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
systemContext.DirForceCompress = true systemContext.DirForceCompress = true
} }
var manifestBytes []byte var manifestBytes []byte
if manifestBytes, err = cp.Image(ctx, policyContext, maybeCachedDest, maybeCachedSrc, getCopyOptions(options.ReportWriter, maybeCachedSrc, nil, maybeCachedDest, systemContext, "")); err != nil { if manifestBytes, err = cp.Image(ctx, policyContext, maybeCachedDest, maybeCachedSrc, getCopyOptions(b.store, options.ReportWriter, maybeCachedSrc, nil, maybeCachedDest, systemContext, "")); err != nil {
return imgID, nil, "", errors.Wrapf(err, "error copying layers and metadata for container %q", b.ContainerID) return imgID, nil, "", errors.Wrapf(err, "error copying layers and metadata for container %q", b.ContainerID)
} }
if len(options.AdditionalTags) > 0 { if len(options.AdditionalTags) > 0 {
@ -230,7 +230,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
// Push copies the contents of the image to a new location. // Push copies the contents of the image to a new location.
func Push(ctx context.Context, image string, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) { func Push(ctx context.Context, image string, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) {
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) systemContext := getSystemContext(options.Store, options.SystemContext, options.SignaturePolicyPath)
if options.Quiet { if options.Quiet {
options.ReportWriter = nil // Turns off logging output options.ReportWriter = nil // Turns off logging output
@ -256,7 +256,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }
var maybeCachedSrc types.ImageReference = src var maybeCachedSrc = types.ImageReference(src)
if options.BlobDirectory != "" { if options.BlobDirectory != "" {
compress := types.PreserveOriginal compress := types.PreserveOriginal
if options.Compression != archive.Uncompressed { if options.Compression != archive.Uncompressed {
@ -276,7 +276,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
systemContext.DirForceCompress = true systemContext.DirForceCompress = true
} }
var manifestBytes []byte var manifestBytes []byte
if manifestBytes, err = cp.Image(ctx, policyContext, dest, maybeCachedSrc, getCopyOptions(options.ReportWriter, maybeCachedSrc, nil, dest, systemContext, options.ManifestType)); err != nil { if manifestBytes, err = cp.Image(ctx, policyContext, dest, maybeCachedSrc, getCopyOptions(options.Store, options.ReportWriter, maybeCachedSrc, nil, dest, systemContext, options.ManifestType)); err != nil {
return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(maybeCachedSrc), transports.ImageName(dest)) return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(maybeCachedSrc), transports.ImageName(dest))
} }
if options.ReportWriter != nil { if options.ReportWriter != nil {

View File

@ -5,9 +5,10 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"github.com/containers/buildah/unshare"
cp "github.com/containers/image/copy" cp "github.com/containers/image/copy"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/containers/libpod/pkg/rootless" "github.com/containers/storage"
) )
const ( const (
@ -17,33 +18,16 @@ const (
DOCKER = "docker" DOCKER = "docker"
) )
// userRegistriesFile is the path to the per user registry configuration file. func getCopyOptions(store storage.Store, reportWriter io.Writer, sourceReference types.ImageReference, sourceSystemContext *types.SystemContext, destinationReference types.ImageReference, destinationSystemContext *types.SystemContext, manifestType string) *cp.Options {
var userRegistriesFile = filepath.Join(os.Getenv("HOME"), ".config/containers/registries.conf") sourceCtx := getSystemContext(store, nil, "")
func getCopyOptions(reportWriter io.Writer, sourceReference types.ImageReference, sourceSystemContext *types.SystemContext, destinationReference types.ImageReference, destinationSystemContext *types.SystemContext, manifestType string) *cp.Options {
sourceCtx := &types.SystemContext{}
if sourceSystemContext != nil { if sourceSystemContext != nil {
*sourceCtx = *sourceSystemContext *sourceCtx = *sourceSystemContext
} else {
if rootless.IsRootless() {
if _, err := os.Stat(userRegistriesFile); err == nil {
sourceCtx.SystemRegistriesConfPath = userRegistriesFile
}
}
} }
destinationCtx := &types.SystemContext{} destinationCtx := getSystemContext(store, nil, "")
if destinationSystemContext != nil { if destinationSystemContext != nil {
*destinationCtx = *destinationSystemContext *destinationCtx = *destinationSystemContext
} else {
if rootless.IsRootless() {
if _, err := os.Stat(userRegistriesFile); err == nil {
destinationCtx.SystemRegistriesConfPath = userRegistriesFile
}
}
} }
return &cp.Options{ return &cp.Options{
ReportWriter: reportWriter, ReportWriter: reportWriter,
SourceCtx: sourceCtx, SourceCtx: sourceCtx,
@ -52,7 +36,7 @@ func getCopyOptions(reportWriter io.Writer, sourceReference types.ImageReference
} }
} }
func getSystemContext(defaults *types.SystemContext, signaturePolicyPath string) *types.SystemContext { func getSystemContext(store storage.Store, defaults *types.SystemContext, signaturePolicyPath string) *types.SystemContext {
sc := &types.SystemContext{} sc := &types.SystemContext{}
if defaults != nil { if defaults != nil {
*sc = *defaults *sc = *defaults
@ -60,11 +44,16 @@ func getSystemContext(defaults *types.SystemContext, signaturePolicyPath string)
if signaturePolicyPath != "" { if signaturePolicyPath != "" {
sc.SignaturePolicyPath = signaturePolicyPath sc.SignaturePolicyPath = signaturePolicyPath
} }
if sc.SystemRegistriesConfPath == "" && rootless.IsRootless() { if store != nil {
if _, err := os.Stat(userRegistriesFile); err == nil { if sc.BlobInfoCacheDir == "" {
sc.SystemRegistriesConfPath = userRegistriesFile sc.BlobInfoCacheDir = filepath.Join(store.GraphRoot(), "cache")
}
if sc.SystemRegistriesConfPath == "" && unshare.IsRootless() {
userRegistriesFile := filepath.Join(store.GraphRoot(), "registries.conf")
if _, err := os.Stat(userRegistriesFile); err == nil {
sc.SystemRegistriesConfPath = userRegistriesFile
}
} }
} }
return sc return sc
} }

File diff suppressed because it is too large Load Diff

View File

@ -111,28 +111,3 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
func InitReexec() bool { func InitReexec() bool {
return buildah.InitReexec() return buildah.InitReexec()
} }
// ReposToMap parses the specified repotags and returns a map with repositories
// as keys and the corresponding arrays of tags as values.
func ReposToMap(repotags []string) map[string][]string {
// map format is repo -> tag
repos := make(map[string][]string)
for _, repo := range repotags {
var repository, tag string
if strings.Contains(repo, ":") {
li := strings.LastIndex(repo, ":")
repository = repo[0:li]
tag = repo[li+1:]
} else if len(repo) > 0 {
repository = repo
tag = "<none>"
} else {
logrus.Warnf("Found image with empty name")
}
repos[repository] = append(repos[repository], tag)
}
if len(repos) == 0 {
repos["<none>"] = []string{"<none>"}
}
return repos
}

View File

@ -17,7 +17,11 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
return nil, errors.Errorf("Internal error: imageID is empty in importBuilderDataFromImage") return nil, errors.Errorf("Internal error: imageID is empty in importBuilderDataFromImage")
} }
uidmap, gidmap := convertStorageIDMaps(storage.DefaultStoreOptions.UIDMap, storage.DefaultStoreOptions.GIDMap) storeopts, err := storage.DefaultStoreOptions(false, 0)
if err != nil {
return nil, err
}
uidmap, gidmap := convertStorageIDMaps(storeopts.UIDMap, storeopts.GIDMap)
ref, err := is.Transport.ParseStoreReference(store, imageID) ref, err := is.Transport.ParseStoreReference(store, imageID)
if err != nil { if err != nil {
@ -83,7 +87,7 @@ func importBuilder(ctx context.Context, store storage.Store, options ImportOptio
return nil, err return nil, err
} }
systemContext := getSystemContext(&types.SystemContext{}, options.SignaturePolicyPath) systemContext := getSystemContext(store, &types.SystemContext{}, options.SignaturePolicyPath)
builder, err := importBuilderDataFromImage(ctx, store, systemContext, c.ImageID, options.Container, c.ID) builder, err := importBuilderDataFromImage(ctx, store, systemContext, c.ImageID, options.Container, c.ID)
if err != nil { if err != nil {
@ -115,7 +119,7 @@ func importBuilderFromImage(ctx context.Context, store storage.Store, options Im
return nil, errors.Errorf("image name must be specified") return nil, errors.Errorf("image name must be specified")
} }
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath)
_, img, err := util.FindImage(store, "", systemContext, options.Image) _, img, err := util.FindImage(store, "", systemContext, options.Image)
if err != nil { if err != nil {

View File

@ -11,7 +11,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/containers/libpod/pkg/rootless" "github.com/containers/buildah/unshare"
"github.com/containers/storage" "github.com/containers/storage"
"github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/system"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -47,7 +47,7 @@ func hostInfo() (map[string]interface{}, error) {
info["os"] = runtime.GOOS info["os"] = runtime.GOOS
info["arch"] = runtime.GOARCH info["arch"] = runtime.GOARCH
info["cpus"] = runtime.NumCPU() info["cpus"] = runtime.NumCPU()
info["rootless"] = rootless.IsRootless() info["rootless"] = unshare.IsRootless()
mi, err := system.ReadMemInfo() mi, err := system.ReadMemInfo()
if err != nil { if err != nil {
logrus.Error(err, "err reading memory info") logrus.Error(err, "err reading memory info")

View File

@ -29,7 +29,7 @@ func pullAndFindImage(ctx context.Context, store storage.Store, srcRef types.Ima
ReportWriter: options.ReportWriter, ReportWriter: options.ReportWriter,
Store: store, Store: store,
SystemContext: options.SystemContext, SystemContext: options.SystemContext,
BlobDirectory: options.PullBlobDirectory, BlobDirectory: options.BlobDirectory,
} }
ref, err := pullImage(ctx, store, srcRef, pullOptions, sc) ref, err := pullImage(ctx, store, srcRef, pullOptions, sc)
if err != nil { if err != nil {
@ -244,7 +244,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
options.FromImage = "" options.FromImage = ""
} }
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath)
if options.FromImage != "" && options.FromImage != "scratch" { if options.FromImage != "" && options.FromImage != "scratch" {
ref, _, img, err = resolveImage(ctx, systemContext, store, options) ref, _, img, err = resolveImage(ctx, systemContext, store, options)

View File

@ -111,17 +111,13 @@ func (t StdoutTemplateArray) Out() error {
if err != nil { if err != nil {
return errors.Wrapf(err, parsingErrorStr) return errors.Wrapf(err, parsingErrorStr)
} }
for i, raw := range t.Output { for _, raw := range t.Output {
basicTmpl := tmpl.Funcs(basicFunctions) basicTmpl := tmpl.Funcs(basicFunctions)
if err := basicTmpl.Execute(w, raw); err != nil { if err := basicTmpl.Execute(w, raw); err != nil {
return errors.Wrapf(err, parsingErrorStr) return errors.Wrapf(err, parsingErrorStr)
} }
if i != len(t.Output)-1 { fmt.Fprintln(w, "")
fmt.Fprintln(w, "")
continue
}
} }
fmt.Fprintln(w, "")
return w.Flush() return w.Flush()
} }

View File

@ -9,7 +9,6 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"net" "net"
"os" "os"
"os/exec"
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
@ -393,25 +392,11 @@ func IDMappingOptions(c *cobra.Command, isolation buildah.Isolation) (usernsOpti
gidmap = uidmap gidmap = uidmap
} }
useSlirp4netns := false
if isolation == buildah.IsolationOCIRootless {
_, err := exec.LookPath("slirp4netns")
if execerr, ok := err.(*exec.Error); ok && !strings.Contains(execerr.Error(), "not found") {
return nil, nil, errors.Wrapf(err, "cannot lookup slirp4netns %v", execerr)
}
if err == nil {
useSlirp4netns = true
} else {
logrus.Warningf("could not find slirp4netns. Using host network namespace")
}
}
// By default, having mappings configured means we use a user // By default, having mappings configured means we use a user
// namespace. Otherwise, we don't. // namespace. Otherwise, we don't.
usernsOption := buildah.NamespaceOption{ usernsOption := buildah.NamespaceOption{
Name: string(specs.UserNamespace), Name: string(specs.UserNamespace),
Host: len(uidmap) == 0 && len(gidmap) == 0 && !useSlirp4netns, Host: len(uidmap) == 0 && len(gidmap) == 0,
} }
// If the user specifically requested that we either use or don't use // If the user specifically requested that we either use or don't use
// user namespaces, override that default. // user namespaces, override that default.

View File

@ -7,7 +7,6 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
rspec "github.com/opencontainers/runtime-spec/specs-go" rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label" "github.com/opencontainers/selinux/go-selinux/label"
@ -133,12 +132,12 @@ func getMountsMap(path string) (string, string, error) {
} }
// SecretMounts copies, adds, and mounts the secrets to the container root filesystem // SecretMounts copies, adds, and mounts the secrets to the container root filesystem
func SecretMounts(mountLabel, containerWorkingDir, mountFile string) []rspec.Mount { func SecretMounts(mountLabel, containerWorkingDir, mountFile string, rootless bool) []rspec.Mount {
return SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, containerWorkingDir, 0, 0) return SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, containerWorkingDir, 0, 0, rootless)
} }
// SecretMountsWithUIDGID specifies the uid/gid of the owner // SecretMountsWithUIDGID specifies the uid/gid of the owner
func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPrefix string, uid, gid int) []rspec.Mount { func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPrefix string, uid, gid int, rootless bool) []rspec.Mount {
var ( var (
secretMounts []rspec.Mount secretMounts []rspec.Mount
mountFiles []string mountFiles []string
@ -148,17 +147,8 @@ func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPre
// Note for testing purposes only // Note for testing purposes only
if mountFile == "" { if mountFile == "" {
mountFiles = append(mountFiles, []string{OverrideMountsFile, DefaultMountsFile}...) mountFiles = append(mountFiles, []string{OverrideMountsFile, DefaultMountsFile}...)
if rootless.IsRootless() { if rootless {
mountFiles = append([]string{UserOverrideMountsFile}, mountFiles...) mountFiles = append([]string{UserOverrideMountsFile}, mountFiles...)
_, err := os.Stat(UserOverrideMountsFile)
if err != nil && os.IsNotExist(err) {
os.MkdirAll(filepath.Dir(UserOverrideMountsFile), 0755)
if f, err := os.Create(UserOverrideMountsFile); err != nil {
logrus.Warnf("could not create file %s: %v", UserOverrideMountsFile, err)
} else {
f.Close()
}
}
} }
} else { } else {
mountFiles = append(mountFiles, mountFile) mountFiles = append(mountFiles, mountFile)

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"strings" "strings"
"github.com/containers/buildah/pkg/blobcache" "github.com/containers/buildah/pkg/blobcache"
@ -153,13 +154,13 @@ func localImageNameForReference(ctx context.Context, store storage.Store, srcRef
// Pull copies the contents of the image from somewhere else to local storage. // Pull copies the contents of the image from somewhere else to local storage.
func Pull(ctx context.Context, imageName string, options PullOptions) error { func Pull(ctx context.Context, imageName string, options PullOptions) error {
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) systemContext := getSystemContext(options.Store, options.SystemContext, options.SignaturePolicyPath)
boptions := BuilderOptions{ boptions := BuilderOptions{
FromImage: imageName, FromImage: imageName,
SignaturePolicyPath: options.SignaturePolicyPath, SignaturePolicyPath: options.SignaturePolicyPath,
SystemContext: systemContext, SystemContext: systemContext,
PullBlobDirectory: options.BlobDirectory, BlobDirectory: options.BlobDirectory,
ReportWriter: options.ReportWriter, ReportWriter: options.ReportWriter,
} }
@ -236,7 +237,7 @@ func pullImage(ctx context.Context, store storage.Store, srcRef types.ImageRefer
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error parsing image name %q", destName) return nil, errors.Wrapf(err, "error parsing image name %q", destName)
} }
var maybeCachedDestRef types.ImageReference = destRef var maybeCachedDestRef = types.ImageReference(destRef)
if options.BlobDirectory != "" { if options.BlobDirectory != "" {
cachedRef, err := blobcache.NewBlobCache(destRef, options.BlobDirectory, types.PreserveOriginal) cachedRef, err := blobcache.NewBlobCache(destRef, options.BlobDirectory, types.PreserveOriginal)
if err != nil { if err != nil {
@ -262,7 +263,7 @@ func pullImage(ctx context.Context, store storage.Store, srcRef types.ImageRefer
}() }()
logrus.Debugf("copying %q to %q", transports.ImageName(srcRef), destName) logrus.Debugf("copying %q to %q", transports.ImageName(srcRef), destName)
if _, err := cp.Image(ctx, policyContext, maybeCachedDestRef, srcRef, getCopyOptions(options.ReportWriter, srcRef, sc, maybeCachedDestRef, nil, "")); err != nil { if _, err := cp.Image(ctx, policyContext, maybeCachedDestRef, srcRef, getCopyOptions(store, options.ReportWriter, srcRef, sc, maybeCachedDestRef, nil, "")); err != nil {
logrus.Debugf("error copying src image [%q] to dest image [%q] err: %v", transports.ImageName(srcRef), destName, err) logrus.Debugf("error copying src image [%q] to dest image [%q] err: %v", transports.ImageName(srcRef), destName, err)
return nil, err return nil, err
} }

View File

@ -2,6 +2,7 @@ package buildah
import ( import (
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@ -21,6 +22,7 @@ import (
"github.com/containers/buildah/bind" "github.com/containers/buildah/bind"
"github.com/containers/buildah/chroot" "github.com/containers/buildah/chroot"
"github.com/containers/buildah/pkg/secrets" "github.com/containers/buildah/pkg/secrets"
"github.com/containers/buildah/unshare"
"github.com/containers/buildah/util" "github.com/containers/buildah/util"
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/ioutils"
@ -416,7 +418,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
} }
// Get the list of secrets mounts. // Get the list of secrets mounts.
secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, cdir, int(rootUID), int(rootGID)) secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, cdir, int(rootUID), int(rootGID), unshare.IsRootless())
// Add temporary copies of the contents of volume locations at the // Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there. // volume locations, unless we already have something there.
@ -1720,7 +1722,7 @@ func setupRootlessNetwork(pid int) (teardown func(), err error) {
unix.CloseOnExec(fd) unix.CloseOnExec(fd)
} }
cmd := exec.Command(slirp4netns, "-r", "3", "-c", fmt.Sprintf("%d", pid), "tap0") cmd := exec.Command(slirp4netns, "--mtu", "65520", "-r", "3", "-c", fmt.Sprintf("%d", pid), "tap0")
cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil
cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW} cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW}
@ -1765,7 +1767,7 @@ func runConfigureNetwork(isolation Isolation, options RunOptions, configureNetwo
var netconf, undo []*libcni.NetworkConfigList var netconf, undo []*libcni.NetworkConfigList
if isolation == IsolationOCIRootless { if isolation == IsolationOCIRootless {
if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host { if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host && ns.Path == "" {
return setupRootlessNetwork(pid) return setupRootlessNetwork(pid)
} }
} }
@ -1835,7 +1837,7 @@ func runConfigureNetwork(isolation Isolation, options RunOptions, configureNetwo
rtconf := make(map[*libcni.NetworkConfigList]*libcni.RuntimeConf) rtconf := make(map[*libcni.NetworkConfigList]*libcni.RuntimeConf)
teardown = func() { teardown = func() {
for _, nc := range undo { for _, nc := range undo {
if err = cni.DelNetworkList(nc, rtconf[nc]); err != nil { if err = cni.DelNetworkList(context.Background(), nc, rtconf[nc]); err != nil {
logrus.Errorf("error cleaning up network %v for %v: %v", rtconf[nc].IfName, command, err) logrus.Errorf("error cleaning up network %v for %v: %v", rtconf[nc].IfName, command, err)
} }
} }
@ -1851,7 +1853,7 @@ func runConfigureNetwork(isolation Isolation, options RunOptions, configureNetwo
CapabilityArgs: map[string]interface{}{}, CapabilityArgs: map[string]interface{}{},
} }
// Bring it up. // Bring it up.
_, err := cni.AddNetworkList(nc, rtconf[nc]) _, err := cni.AddNetworkList(context.Background(), nc, rtconf[nc])
if err != nil { if err != nil {
return teardown, errors.Wrapf(err, "error configuring network list %v for %v", rtconf[nc].IfName, command) return teardown, errors.Wrapf(err, "error configuring network list %v for %v", rtconf[nc].IfName, command)
} }

View File

@ -11,6 +11,7 @@ import (
"runtime" "runtime"
"strconv" "strconv"
"strings" "strings"
"sync"
"syscall" "syscall"
"github.com/containers/buildah/util" "github.com/containers/buildah/util"
@ -57,8 +58,8 @@ func (c *Cmd) Start() error {
// Please the libpod "rootless" package to find the expected env variables. // Please the libpod "rootless" package to find the expected env variables.
if os.Geteuid() != 0 { if os.Geteuid() != 0 {
c.Env = append(c.Env, "_LIBPOD_USERNS_CONFIGURED=done") c.Env = append(c.Env, "_CONTAINERS_USERNS_CONFIGURED=done")
c.Env = append(c.Env, fmt.Sprintf("_LIBPOD_ROOTLESS_UID=%d", os.Geteuid())) c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%d", os.Geteuid()))
} }
// Create the pipe for reading the child's PID. // Create the pipe for reading the child's PID.
@ -272,3 +273,36 @@ func (c *Cmd) CombinedOutput() ([]byte, error) {
func (c *Cmd) Output() ([]byte, error) { func (c *Cmd) Output() ([]byte, error) {
return nil, errors.New("unshare: Output() not implemented") return nil, errors.New("unshare: Output() not implemented")
} }
var (
isRootlessOnce sync.Once
isRootless bool
)
const (
// UsernsEnvName is the environment variable, if set indicates in rootless mode
UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
)
// IsRootless tells us if we are running in rootless mode
func IsRootless() bool {
isRootlessOnce.Do(func() {
isRootless = os.Geteuid() != 0 || os.Getenv(UsernsEnvName) != ""
})
return isRootless
}
// GetRootlessUID returns the UID of the user in the parent userNS
func GetRootlessUID() int {
uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
if uidEnv != "" {
u, _ := strconv.Atoi(uidEnv)
return u
}
return os.Getuid()
}
// RootlessEnv returns the environment settings for the rootless containers
func RootlessEnv() []string {
return append(os.Environ(), UsernsEnvName+"=")
}

View File

@ -0,0 +1,27 @@
// +build !linux
package unshare
import (
"os"
)
const (
// UsernsEnvName is the environment variable, if set indicates in rootless mode
UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
)
// IsRootless tells us if we are running in rootless mode
func IsRootless() bool {
return false
}
// GetRootlessUID returns the UID of the user in the parent userNS
func GetRootlessUID() int {
return os.Getuid()
}
// RootlessEnv returns the environment settings for the rootless containers
func RootlessEnv() []string {
return append(os.Environ(), UsernsEnvName+"=")
}

View File

@ -2,14 +2,13 @@ github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
github.com/blang/semver v3.5.0 github.com/blang/semver v3.5.0
github.com/BurntSushi/toml v0.2.0 github.com/BurntSushi/toml v0.2.0
github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
github.com/containernetworking/cni v0.7.0-alpha1 github.com/containernetworking/cni v0.7.0-rc2
github.com/containers/image v1.5 github.com/containers/image f52cf78ebfa1916da406f8b6210d8f7764ec1185
github.com/vbauerster/mpb v3.3.4 github.com/vbauerster/mpb v3.3.4
github.com/mattn/go-isatty v0.0.4 github.com/mattn/go-isatty v0.0.4
github.com/VividCortex/ewma v1.1.1 github.com/VividCortex/ewma v1.1.1
github.com/boltdb/bolt v1.3.1 github.com/boltdb/bolt v1.3.1
github.com/containers/libpod v1.0 github.com/containers/storage v1.12.1
github.com/containers/storage v1.11
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
github.com/docker/docker 54dddadc7d5d89fe0be88f76979f6f6ab0dede83 github.com/docker/docker 54dddadc7d5d89fe0be88f76979f6f6ab0dede83
github.com/docker/docker-credential-helpers v0.6.1 github.com/docker/docker-credential-helpers v0.6.1
@ -39,7 +38,7 @@ github.com/opencontainers/runc v1.0.0-rc6
github.com/opencontainers/runtime-spec v1.0.0 github.com/opencontainers/runtime-spec v1.0.0
github.com/opencontainers/runtime-tools v0.8.0 github.com/opencontainers/runtime-tools v0.8.0
github.com/opencontainers/selinux v1.1 github.com/opencontainers/selinux v1.1
github.com/openshift/imagebuilder 705fe9255c57f8505efb9723a9ac4082b67973bc github.com/openshift/imagebuilder v1.1.0
github.com/ostreedev/ostree-go 9ab99253d365aac3a330d1f7281cf29f3d22820b github.com/ostreedev/ostree-go 9ab99253d365aac3a330d1f7281cf29f3d22820b
github.com/pkg/errors v0.8.1 github.com/pkg/errors v0.8.1
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac

View File

@ -65,7 +65,7 @@ the primary downside is that creating new signatures with the Golang-only implem
- `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries. The `github.com/containers/image/ostree` package is completely disabled - `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries. The `github.com/containers/image/ostree` package is completely disabled
and impossible to import when this build tag is in use. and impossible to import when this build tag is in use.
## [Contributing](CONTRIBUTING.md)** ## [Contributing](CONTRIBUTING.md)
Information about contributing to this project. Information about contributing to this project.

View File

@ -468,7 +468,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
} }
data := make([]copyLayerData, numLayers) data := make([]copyLayerData, numLayers)
copyLayerHelper := func(index int, srcLayer types.BlobInfo, bar *mpb.Bar) { copyLayerHelper := func(index int, srcLayer types.BlobInfo, pool *mpb.Progress) {
defer copySemaphore.Release(1) defer copySemaphore.Release(1)
defer copyGroup.Done() defer copyGroup.Done()
cld := copyLayerData{} cld := copyLayerData{}
@ -483,24 +483,18 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
} }
} else { } else {
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, bar) cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, pool)
} }
data[index] = cld data[index] = cld
bar.SetTotal(srcLayer.Size, true)
} }
func() { // A scope for defer func() { // A scope for defer
progressPool, progressCleanup := ic.c.newProgressPool(ctx) progressPool, progressCleanup := ic.c.newProgressPool(ctx)
defer progressCleanup() defer progressCleanup()
progressBars := make([]*mpb.Bar, numLayers)
for i, srcInfo := range srcInfos {
progressBars[i] = ic.c.createProgressBar(progressPool, srcInfo, "blob")
}
for i, srcLayer := range srcInfos { for i, srcLayer := range srcInfos {
copySemaphore.Acquire(ctx, 1) copySemaphore.Acquire(ctx, 1)
go copyLayerHelper(i, srcLayer, progressBars[i]) go copyLayerHelper(i, srcLayer, progressPool)
} }
// Wait for all layers to be copied // Wait for all layers to be copied
@ -592,7 +586,7 @@ func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) {
// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter // createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
// is ioutil.Discard, the progress bar's output will be discarded // is ioutil.Discard, the progress bar's output will be discarded
func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string) *mpb.Bar { func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string, onComplete string) *mpb.Bar {
// shortDigestLen is the length of the digest used for blobs. // shortDigestLen is the length of the digest used for blobs.
const shortDigestLen = 12 const shortDigestLen = 12
@ -604,11 +598,12 @@ func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind
} }
bar := pool.AddBar(info.Size, bar := pool.AddBar(info.Size,
mpb.BarClearOnComplete(),
mpb.PrependDecorators( mpb.PrependDecorators(
decor.Name(prefix), decor.Name(prefix),
), ),
mpb.AppendDecorators( mpb.AppendDecorators(
decor.CountersKibiByte("%.1f / %.1f"), decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), " "+onComplete),
), ),
) )
if c.progressOutput == ioutil.Discard { if c.progressOutput == ioutil.Discard {
@ -629,7 +624,7 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
progressPool, progressCleanup := c.newProgressPool(ctx) progressPool, progressCleanup := c.newProgressPool(ctx)
defer progressCleanup() defer progressCleanup()
bar := c.createProgressBar(progressPool, srcInfo, "config") bar := c.createProgressBar(progressPool, srcInfo, "config", "done")
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar) destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
if err != nil { if err != nil {
return types.BlobInfo{}, err return types.BlobInfo{}, err
@ -656,7 +651,7 @@ type diffIDResult struct {
// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress, // copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, bar *mpb.Bar) (types.BlobInfo, digest.Digest, error) { func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) {
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
@ -668,6 +663,8 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba
} }
if reused { if reused {
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
bar := ic.c.createProgressBar(pool, srcInfo, "blob", "skipped: already exists")
bar.SetTotal(0, true)
return blobInfo, cachedDiffID, nil return blobInfo, cachedDiffID, nil
} }
} }
@ -679,10 +676,14 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba
} }
defer srcStream.Close() defer srcStream.Close()
bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done")
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, diffIDIsNeeded, bar) blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, diffIDIsNeeded, bar)
if err != nil { if err != nil {
return types.BlobInfo{}, "", err return types.BlobInfo{}, "", err
} }
diffID := cachedDiffID
if diffIDIsNeeded { if diffIDIsNeeded {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@ -695,11 +696,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba
// This is safe because we have just computed diffIDResult.Digest ourselves, and in the process // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
// we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
return blobInfo, diffIDResult.digest, nil diffID = diffIDResult.digest
} }
} else {
return blobInfo, cachedDiffID, nil
} }
bar.SetTotal(srcInfo.Size, true)
return blobInfo, diffID, nil
} }
// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. // copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.

View File

@ -197,7 +197,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) // “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
registry := reference.Domain(ref.ref) registry := reference.Domain(ref.ref)
username, password, err := config.GetAuthentication(sys, reference.Domain(ref.ref)) username, password, err := config.GetAuthentication(sys, registry)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error getting username and password") return nil, errors.Wrapf(err, "error getting username and password")
} }

View File

@ -16,7 +16,7 @@ import (
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/pkg/blobinfocache" "github.com/containers/image/pkg/blobinfocache/none"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/api/v2"
@ -129,7 +129,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// This should not really be necessary, at least the copy code calls TryReusingBlob automatically. // This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
// But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_. // But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_.
haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, blobinfocache.NoCache, false) haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, none.NoCache, false)
if err != nil { if err != nil {
return types.BlobInfo{}, err return types.BlobInfo{}, err
} }

View File

@ -11,7 +11,7 @@ import (
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/pkg/blobinfocache" "github.com/containers/image/pkg/blobinfocache/none"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@ -96,7 +96,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.src == nil { if m.src == nil {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
} }
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), blobinfocache.NoCache) stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -252,7 +252,7 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ
logrus.Debugf("Uploading empty layer during conversion to schema 1") logrus.Debugf("Uploading empty layer during conversion to schema 1")
// Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
// and anyway this blob is so small that its easier to just copy it than to worry about figuring out another location where to get it. // and anyway this blob is so small that its easier to just copy it than to worry about figuring out another location where to get it.
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, blobinfocache.NoCache, false) info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, none.NoCache, false)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Error uploading empty layer") return nil, errors.Wrap(err, "Error uploading empty layer")
} }

View File

@ -7,7 +7,7 @@ import (
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/pkg/blobinfocache" "github.com/containers/image/pkg/blobinfocache/none"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@ -61,7 +61,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.src == nil { if m.src == nil {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
} }
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), blobinfocache.NoCache) stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,4 +1,5 @@
package blobinfocache // Package boltdb implements a BlobInfoCache backed by BoltDB.
package boltdb
import ( import (
"fmt" "fmt"
@ -7,6 +8,7 @@ import (
"time" "time"
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
"github.com/containers/image/pkg/blobinfocache/internal/prioritize"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -81,22 +83,23 @@ func unlockPath(path string) {
} }
} }
// boltDBCache si a BlobInfoCache implementation which uses a BoltDB file at the specified path. // cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path.
// //
// Note that we dont keep the database open across operations, because that would lock the file and block any other // Note that we dont keep the database open across operations, because that would lock the file and block any other
// users; instead, we need to open/close it for every single write or lookup. // users; instead, we need to open/close it for every single write or lookup.
type boltDBCache struct { type cache struct {
path string path string
} }
// NewBoltDBCache returns a BlobInfoCache implementation which uses a BoltDB file at path. // New returns a BlobInfoCache implementation which uses a BoltDB file at path.
// Most users should call DefaultCache instead. //
func NewBoltDBCache(path string) types.BlobInfoCache { // Most users should call blobinfocache.DefaultCache instead.
return &boltDBCache{path: path} func New(path string) types.BlobInfoCache {
return &cache{path: path}
} }
// view returns runs the specified fn within a read-only transaction on the database. // view returns runs the specified fn within a read-only transaction on the database.
func (bdc *boltDBCache) view(fn func(tx *bolt.Tx) error) (retErr error) { func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) {
// bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist, // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist,
// nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding
// a read lock, blocking any future writes. // a read lock, blocking any future writes.
@ -122,7 +125,7 @@ func (bdc *boltDBCache) view(fn func(tx *bolt.Tx) error) (retErr error) {
} }
// update returns runs the specified fn within a read-write transaction on the database. // update returns runs the specified fn within a read-write transaction on the database.
func (bdc *boltDBCache) update(fn func(tx *bolt.Tx) error) (retErr error) { func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) {
lockPath(bdc.path) lockPath(bdc.path)
defer unlockPath(bdc.path) defer unlockPath(bdc.path)
db, err := bolt.Open(bdc.path, 0600, nil) db, err := bolt.Open(bdc.path, 0600, nil)
@ -139,7 +142,7 @@ func (bdc *boltDBCache) update(fn func(tx *bolt.Tx) error) (retErr error) {
} }
// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction. // uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction.
func (bdc *boltDBCache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest { func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
if b := tx.Bucket(uncompressedDigestBucket); b != nil { if b := tx.Bucket(uncompressedDigestBucket); b != nil {
if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil { if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil {
d, err := digest.Parse(string(uncompressedBytes)) d, err := digest.Parse(string(uncompressedBytes))
@ -166,7 +169,7 @@ func (bdc *boltDBCache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest)
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. // UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
// May return anyDigest if it is known to be uncompressed. // May return anyDigest if it is known to be uncompressed.
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). // Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
func (bdc *boltDBCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
var res digest.Digest var res digest.Digest
if err := bdc.view(func(tx *bolt.Tx) error { if err := bdc.view(func(tx *bolt.Tx) error {
res = bdc.uncompressedDigest(tx, anyDigest) res = bdc.uncompressedDigest(tx, anyDigest)
@ -182,7 +185,7 @@ func (bdc *boltDBCache) UncompressedDigest(anyDigest digest.Digest) digest.Diges
// WARNING: Only call this for LOCALLY VERIFIED data; dont record a digest pair just because some remote author claims so (e.g. // WARNING: Only call this for LOCALLY VERIFIED data; dont record a digest pair just because some remote author claims so (e.g.
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
func (bdc *boltDBCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
_ = bdc.update(func(tx *bolt.Tx) error { _ = bdc.update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket) b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket)
if err != nil { if err != nil {
@ -219,7 +222,7 @@ func (bdc *boltDBCache) RecordDigestUncompressedPair(anyDigest digest.Digest, un
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
// and can be reused given the opaque location data. // and can be reused given the opaque location data.
func (bdc *boltDBCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
_ = bdc.update(func(tx *bolt.Tx) error { _ = bdc.update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists(knownLocationsBucket) b, err := tx.CreateBucketIfNotExists(knownLocationsBucket)
if err != nil { if err != nil {
@ -248,8 +251,8 @@ func (bdc *boltDBCache) RecordKnownLocation(transport types.ImageTransport, scop
}) // FIXME? Log error (but throttle the log volume on repeated accesses)? }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
} }
// appendReplacementCandiates creates candidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates. // appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates.
func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []candidateWithTime { func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []prioritize.CandidateWithTime {
b := scopeBucket.Bucket([]byte(digest.String())) b := scopeBucket.Bucket([]byte(digest.String()))
if b == nil { if b == nil {
return candidates return candidates
@ -259,12 +262,12 @@ func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTi
if err := t.UnmarshalBinary(v); err != nil { if err := t.UnmarshalBinary(v); err != nil {
return err return err
} }
candidates = append(candidates, candidateWithTime{ candidates = append(candidates, prioritize.CandidateWithTime{
candidate: types.BICReplacementCandidate{ Candidate: types.BICReplacementCandidate{
Digest: digest, Digest: digest,
Location: types.BICLocationReference{Opaque: string(k)}, Location: types.BICLocationReference{Opaque: string(k)},
}, },
lastSeen: t, LastSeen: t,
}) })
return nil return nil
}) // FIXME? Log error (but throttle the log volume on repeated accesses)? }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
@ -277,8 +280,8 @@ func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTi
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest. // uncompressed digest.
func (bdc *boltDBCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
res := []candidateWithTime{} res := []prioritize.CandidateWithTime{}
var uncompressedDigestValue digest.Digest // = "" var uncompressedDigestValue digest.Digest // = ""
if err := bdc.view(func(tx *bolt.Tx) error { if err := bdc.view(func(tx *bolt.Tx) error {
scopeBucket := tx.Bucket(knownLocationsBucket) scopeBucket := tx.Bucket(knownLocationsBucket)
@ -325,5 +328,5 @@ func (bdc *boltDBCache) CandidateLocations(transport types.ImageTransport, scope
return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)? return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
} }
return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue) return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
} }

View File

@ -5,6 +5,8 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"github.com/containers/image/pkg/blobinfocache/boltdb"
"github.com/containers/image/pkg/blobinfocache/memory"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
@ -50,14 +52,14 @@ func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
dir, err := blobInfoCacheDir(sys, os.Geteuid()) dir, err := blobInfoCacheDir(sys, os.Geteuid())
if err != nil { if err != nil {
logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename) logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename)
return NewMemoryCache() return memory.New()
} }
path := filepath.Join(dir, blobInfoCacheFilename) path := filepath.Join(dir, blobInfoCacheFilename)
if err := os.MkdirAll(dir, 0700); err != nil { if err := os.MkdirAll(dir, 0700); err != nil {
logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err) logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err)
return NewMemoryCache() return memory.New()
} }
logrus.Debugf("Using blob info cache at %s", path) logrus.Debugf("Using blob info cache at %s", path)
return NewBoltDBCache(path) return boltdb.New(path)
} }

View File

@ -1,4 +1,6 @@
package blobinfocache // Package prioritize provides utilities for prioritizing locations in
// types.BlobInfoCache.CandidateLocations.
package prioritize
import ( import (
"sort" "sort"
@ -13,16 +15,16 @@ import (
// This is a heuristic/guess, and could well use a different value. // This is a heuristic/guess, and could well use a different value.
const replacementAttempts = 5 const replacementAttempts = 5
// candidateWithTime is the input to types.BICReplacementCandidate prioritization. // CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
type candidateWithTime struct { type CandidateWithTime struct {
candidate types.BICReplacementCandidate // The replacement candidate Candidate types.BICReplacementCandidate // The replacement candidate
lastSeen time.Time // Time the candidate was last known to exist (either read or written) LastSeen time.Time // Time the candidate was last known to exist (either read or written)
} }
// candidateSortState is a local state implementing sort.Interface on candidates to prioritize, // candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
// along with the specially-treated digest values for the implementation of sort.Interface.Less // along with the specially-treated digest values for the implementation of sort.Interface.Less
type candidateSortState struct { type candidateSortState struct {
cs []candidateWithTime // The entries to sort cs []CandidateWithTime // The entries to sort
primaryDigest digest.Digest // The digest the user actually asked for primaryDigest digest.Digest // The digest the user actually asked for
uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
} }
@ -40,35 +42,35 @@ func (css *candidateSortState) Less(i, j int) bool {
// Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order) // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
// First, deal with the primaryDigest/uncompressedDigest cases: // First, deal with the primaryDigest/uncompressedDigest cases:
if xi.candidate.Digest != xj.candidate.Digest { if xi.Candidate.Digest != xj.Candidate.Digest {
// - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
if xi.candidate.Digest == css.primaryDigest { if xi.Candidate.Digest == css.primaryDigest {
return true return true
} }
if xj.candidate.Digest == css.primaryDigest { if xj.Candidate.Digest == css.primaryDigest {
return false return false
} }
if css.uncompressedDigest != "" { if css.uncompressedDigest != "" {
if xi.candidate.Digest == css.uncompressedDigest { if xi.Candidate.Digest == css.uncompressedDigest {
return false return false
} }
if xj.candidate.Digest == css.uncompressedDigest { if xj.Candidate.Digest == css.uncompressedDigest {
return true return true
} }
} }
} else { // xi.candidate.Digest == xj.candidate.Digest } else { // xi.Candidate.Digest == xj.Candidate.Digest
// The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) { if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) {
return xi.lastSeen.After(xj.lastSeen) return xi.LastSeen.After(xj.LastSeen)
} }
} }
// Neither of the digests are primaryDigest/uncompressedDigest: // Neither of the digests are primaryDigest/uncompressedDigest:
if !xi.lastSeen.Equal(xj.lastSeen) { // Order primarily by time if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time
return xi.lastSeen.After(xj.lastSeen) return xi.LastSeen.After(xj.LastSeen)
} }
// Fall back to digest, if timestamps end up _exactly_ the same (how?!) // Fall back to digest, if timestamps end up _exactly_ the same (how?!)
return xi.candidate.Digest < xj.candidate.Digest return xi.Candidate.Digest < xj.Candidate.Digest
} }
func (css *candidateSortState) Swap(i, j int) { func (css *candidateSortState) Swap(i, j int) {
@ -77,7 +79,7 @@ func (css *candidateSortState) Swap(i, j int) {
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the // destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
// number of entries to limit, only to make testing simpler. // number of entries to limit, only to make testing simpler.
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate { func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate {
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
// compare equal. // compare equal.
sort.Sort(&candidateSortState{ sort.Sort(&candidateSortState{
@ -92,17 +94,17 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []candidateWithTime,
} }
res := make([]types.BICReplacementCandidate, resLength) res := make([]types.BICReplacementCandidate, resLength)
for i := range res { for i := range res {
res[i] = cs[i].candidate res[i] = cs[i].Candidate
} }
return res return res
} }
// destructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times, // DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest), // the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest),
// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. // and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
// //
// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course // WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) // make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
func destructivelyPrioritizeReplacementCandidates(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate { func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate {
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts) return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
} }

View File

@ -1,11 +1,13 @@
package blobinfocache // Package memory implements an in-memory BlobInfoCache.
package memory
import ( import (
"sync" "sync"
"time" "time"
"github.com/containers/image/pkg/blobinfocache/internal/prioritize"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
@ -16,21 +18,25 @@ type locationKey struct {
blobDigest digest.Digest blobDigest digest.Digest
} }
// memoryCache implements an in-memory-only BlobInfoCache // cache implements an in-memory-only BlobInfoCache
type memoryCache struct { type cache struct {
mutex *sync.Mutex // synchronizes concurrent accesses mutex sync.Mutex
// The following fields can only be accessed with mutex held.
uncompressedDigests map[digest.Digest]digest.Digest uncompressedDigests map[digest.Digest]digest.Digest
digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
} }
// NewMemoryCache returns a BlobInfoCache implementation which is in-memory only. // New returns a BlobInfoCache implementation which is in-memory only.
// This is primarily intended for tests, but also used as a fallback if DefaultCache //
// cant determine, or set up, the location for a persistent cache. // This is primarily intended for tests, but also used as a fallback
// Manual users of types.{ImageSource,ImageDestination} might also use this instead of a persistent cache. // if blobinfocache.DefaultCache cant determine, or set up, the
func NewMemoryCache() types.BlobInfoCache { // location for a persistent cache. Most users should use
return &memoryCache{ // blobinfocache.DefaultCache. instead of calling this directly.
mutex: new(sync.Mutex), // Manual users of types.{ImageSource,ImageDestination} might also use
// this instead of a persistent cache.
func New() types.BlobInfoCache {
return &cache{
uncompressedDigests: map[digest.Digest]digest.Digest{}, uncompressedDigests: map[digest.Digest]digest.Digest{},
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{}, digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
@ -40,16 +46,14 @@ func NewMemoryCache() types.BlobInfoCache {
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. // UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
// May return anyDigest if it is known to be uncompressed. // May return anyDigest if it is known to be uncompressed.
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). // Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
mem.mutex.Lock() mem.mutex.Lock()
defer mem.mutex.Unlock() defer mem.mutex.Unlock()
return mem.uncompressedDigest(anyDigest) return mem.uncompressedDigestLocked(anyDigest)
} }
// uncompressedDigest returns an uncompressed digest corresponding to anyDigest. // uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held.
// May return anyDigest if it is known to be uncompressed. func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest {
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
func (mem *memoryCache) uncompressedDigest(anyDigest digest.Digest) digest.Digest {
if d, ok := mem.uncompressedDigests[anyDigest]; ok { if d, ok := mem.uncompressedDigests[anyDigest]; ok {
return d return d
} }
@ -67,7 +71,7 @@ func (mem *memoryCache) uncompressedDigest(anyDigest digest.Digest) digest.Diges
// WARNING: Only call this for LOCALLY VERIFIED data; dont record a digest pair just because some remote author claims so (e.g. // WARNING: Only call this for LOCALLY VERIFIED data; dont record a digest pair just because some remote author claims so (e.g.
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
mem.mutex.Lock() mem.mutex.Lock()
defer mem.mutex.Unlock() defer mem.mutex.Unlock()
if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed { if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed {
@ -85,7 +89,7 @@ func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, un
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
// and can be reused given the opaque location data. // and can be reused given the opaque location data.
func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
mem.mutex.Lock() mem.mutex.Lock()
defer mem.mutex.Unlock() defer mem.mutex.Unlock()
key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest} key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest}
@ -97,16 +101,16 @@ func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scop
locationScope[location] = time.Now() // Possibly overwriting an older entry. locationScope[location] = time.Now() // Possibly overwriting an older entry.
} }
// appendReplacementCandiates creates candidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. // appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []candidateWithTime { func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []prioritize.CandidateWithTime {
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
for l, t := range locations { for l, t := range locations {
candidates = append(candidates, candidateWithTime{ candidates = append(candidates, prioritize.CandidateWithTime{
candidate: types.BICReplacementCandidate{ Candidate: types.BICReplacementCandidate{
Digest: digest, Digest: digest,
Location: l, Location: l,
}, },
lastSeen: t, LastSeen: t,
}) })
} }
return candidates return candidates
@ -118,14 +122,14 @@ func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTi
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest. // uncompressed digest.
func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
mem.mutex.Lock() mem.mutex.Lock()
defer mem.mutex.Unlock() defer mem.mutex.Unlock()
res := []candidateWithTime{} res := []prioritize.CandidateWithTime{}
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest) res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest)
var uncompressedDigest digest.Digest // = "" var uncompressedDigest digest.Digest // = ""
if canSubstitute { if canSubstitute {
if uncompressedDigest = mem.uncompressedDigest(primaryDigest); uncompressedDigest != "" { if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
for d := range otherDigests { for d := range otherDigests {
if d != primaryDigest && d != uncompressedDigest { if d != primaryDigest && d != uncompressedDigest {
@ -137,5 +141,5 @@ func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope
} }
} }
} }
return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
} }

View File

@ -1,4 +1,5 @@
package blobinfocache // Package none implements a dummy BlobInfoCache which records no data.
package none
import ( import (
"github.com/containers/image/types" "github.com/containers/image/types"
@ -11,9 +12,10 @@ type noCache struct {
// NoCache implements BlobInfoCache by not recording any data. // NoCache implements BlobInfoCache by not recording any data.
// //
// This exists primarily for implementations of configGetter for Manifest.Inspect, // This exists primarily for implementations of configGetter for
// because configs only have one representation. // Manifest.Inspect, because configs only have one representation.
// Any use of BlobInfoCache with blobs should usually use at least a short-lived cache. // Any use of BlobInfoCache with blobs should usually use at least a
// short-lived cache, ideally blobinfocache.DefaultCache.
var NoCache types.BlobInfoCache = noCache{} var NoCache types.BlobInfoCache = noCache{}
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. // UncompressedDigest returns an uncompressed digest corresponding to anyDigest.

View File

@ -30,7 +30,7 @@ import (
// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path' // -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path'
var systemDefaultPolicyPath = builtinDefaultPolicyPath var systemDefaultPolicyPath = builtinDefaultPolicyPath
// builtinDefaultPolicyPath is the policy pat used for DefaultPolicy(). // builtinDefaultPolicyPath is the policy path used for DefaultPolicy().
// DO NOT change this, instead see systemDefaultPolicyPath above. // DO NOT change this, instead see systemDefaultPolicyPath above.
const builtinDefaultPolicyPath = "/etc/containers/policy.json" const builtinDefaultPolicyPath = "/etc/containers/policy.json"

View File

@ -18,7 +18,7 @@ import (
"github.com/containers/image/image" "github.com/containers/image/image"
"github.com/containers/image/internal/tmpdir" "github.com/containers/image/internal/tmpdir"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/pkg/blobinfocache" "github.com/containers/image/pkg/blobinfocache/none"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/containers/storage" "github.com/containers/storage"
"github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/archive"
@ -595,12 +595,12 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
if !haveDiffID { if !haveDiffID {
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
// or to even check if we had it. // or to even check if we had it.
// Use blobinfocache.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
// that relies on using a blob digest that has never been seeen by the store had better call // that relies on using a blob digest that has never been seeen by the store had better call
// TryReusingBlob; not calling PutBlob already violates the documented API, so theres only // TryReusingBlob; not calling PutBlob already violates the documented API, so theres only
// so far we are going to accommodate that (if we should be doing that at all). // so far we are going to accommodate that (if we should be doing that at all).
logrus.Debugf("looking for diffID for blob %+v", blob.Digest) logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, blobinfocache.NoCache, false) has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false)
if err != nil { if err != nil {
return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String()) return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
} }
@ -732,7 +732,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
if err != nil { if err != nil {
return errors.Wrapf(err, "error copying non-layer blob %q to image", blob) return errors.Wrapf(err, "error copying non-layer blob %q to image", blob)
} }
if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v); err != nil { if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
} }
@ -765,14 +765,14 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
if err != nil { if err != nil {
return errors.Wrapf(err, "error computing manifest digest") return errors.Wrapf(err, "error computing manifest digest")
} }
if err := s.imageRef.transport.store.SetImageBigData(img.ID, manifestBigDataKey(manifestDigest), s.manifest); err != nil { if err := s.imageRef.transport.store.SetImageBigData(img.ID, manifestBigDataKey(manifestDigest), s.manifest, manifest.Digest); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
} }
logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
return err return err
} }
if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest); err != nil { if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest, manifest.Digest); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
} }
@ -781,7 +781,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
} }
// Save the signatures, if we have any. // Save the signatures, if we have any.
if len(s.signatures) > 0 { if len(s.signatures) > 0 {
if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures); err != nil { if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
} }

View File

@ -4,6 +4,7 @@ package storage
import ( import (
"fmt" "fmt"
"os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -180,7 +181,10 @@ func (s *storageTransport) GetStore() (storage.Store, error) {
// Return the transport's previously-set store. If we don't have one // Return the transport's previously-set store. If we don't have one
// of those, initialize one now. // of those, initialize one now.
if s.store == nil { if s.store == nil {
options := storage.DefaultStoreOptions options, err := storage.DefaultStoreOptions(os.Getuid() != 0, os.Getuid())
if err != nil {
return nil, err
}
options.UIDMap = s.defaultUIDMap options.UIDMap = s.defaultUIDMap
options.GIDMap = s.defaultGIDMap options.GIDMap = s.defaultGIDMap
store, err := storage.GetStore(options) store, err := storage.GetStore(options)

View File

@ -1,7 +1,7 @@
github.com/containers/image github.com/containers/image
github.com/sirupsen/logrus v1.0.0 github.com/sirupsen/logrus v1.0.0
github.com/containers/storage master github.com/containers/storage v1.12.1
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716

View File

@ -8,10 +8,10 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner // VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 1 VersionMinor = 1
// VersionPatch is for backwards-compatible bug fixes // VersionPatch is for backwards-compatible bug fixes
VersionPatch = 5 VersionPatch = 6
// VersionDev indicates development branch. Releases will be empty string. // VersionDev indicates development branch. Releases will be empty string.
VersionDev = "" VersionDev = "-dev"
) )
// Version is the specification version that the package types support. // Version is the specification version that the package types support.

View File

@ -71,7 +71,7 @@ type Container struct {
type ContainerStore interface { type ContainerStore interface {
FileBasedStore FileBasedStore
MetadataStore MetadataStore
BigDataStore ContainerBigDataStore
FlaggableStore FlaggableStore
// Create creates a container that has a specified ID (or generates a // Create creates a container that has a specified ID (or generates a
@ -456,7 +456,7 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) {
return size, nil return size, nil
} }
if data, err := r.BigData(id, key); err == nil && data != nil { if data, err := r.BigData(id, key); err == nil && data != nil {
if r.SetBigData(id, key, data) == nil { if err = r.SetBigData(id, key, data); err == nil {
c, ok := r.lookup(id) c, ok := r.lookup(id)
if !ok { if !ok {
return -1, ErrContainerUnknown return -1, ErrContainerUnknown
@ -464,6 +464,8 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) {
if size, ok := c.BigDataSizes[key]; ok { if size, ok := c.BigDataSizes[key]; ok {
return size, nil return size, nil
} }
} else {
return -1, err
} }
} }
return -1, ErrSizeUnknown return -1, ErrSizeUnknown
@ -484,7 +486,7 @@ func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
return d, nil return d, nil
} }
if data, err := r.BigData(id, key); err == nil && data != nil { if data, err := r.BigData(id, key); err == nil && data != nil {
if r.SetBigData(id, key, data) == nil { if err = r.SetBigData(id, key, data); err == nil {
c, ok := r.lookup(id) c, ok := r.lookup(id)
if !ok { if !ok {
return "", ErrContainerUnknown return "", ErrContainerUnknown
@ -492,6 +494,8 @@ func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
if d, ok := c.BigDataDigests[key]; ok { if d, ok := c.BigDataDigests[key]; ok {
return d, nil return d, nil
} }
} else {
return "", err
} }
} }
return "", ErrDigestUnknown return "", ErrDigestUnknown

View File

@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT. // Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
// source: containers.go // source: ./containers.go
package storage package storage

View File

@ -19,6 +19,7 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/pools" "github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/system"
rsystem "github.com/opencontainers/runc/libcontainer/system" rsystem "github.com/opencontainers/runc/libcontainer/system"
@ -212,7 +213,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
return nil return nil
} }
if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { if err := idtools.SafeLchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil {
return err return err
} }

View File

@ -8,7 +8,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/containers/image/manifest"
"github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex" "github.com/containers/storage/pkg/truncindex"
@ -117,7 +116,7 @@ type ImageStore interface {
ROImageStore ROImageStore
RWFileBasedStore RWFileBasedStore
RWMetadataStore RWMetadataStore
RWBigDataStore RWImageBigDataStore
FlaggableStore FlaggableStore
// Create creates an image that has a specified ID (or a random one) and // Create creates an image that has a specified ID (or a random one) and
@ -272,7 +271,7 @@ func (r *imageStore) Load() error {
} }
} }
} }
if shouldSave && !r.IsReadWrite() { if shouldSave && (!r.IsReadWrite() || !r.Locked()) {
return ErrDuplicateImageNames return ErrDuplicateImageNames
} }
r.images = images r.images = images
@ -291,7 +290,7 @@ func (r *imageStore) Save() error {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath()) return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath())
} }
if !r.Locked() { if !r.Locked() {
return errors.New("image store is not locked") return errors.New("image store is not locked for writing")
} }
rpath := r.imagespath() rpath := r.imagespath()
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
@ -595,15 +594,7 @@ func (r *imageStore) BigDataSize(id, key string) (int64, error) {
return size, nil return size, nil
} }
if data, err := r.BigData(id, key); err == nil && data != nil { if data, err := r.BigData(id, key); err == nil && data != nil {
if r.SetBigData(id, key, data) == nil { return int64(len(data)), nil
image, ok := r.lookup(id)
if !ok {
return -1, ErrImageUnknown
}
if size, ok := image.BigDataSizes[key]; ok {
return size, nil
}
}
} }
return -1, ErrSizeUnknown return -1, ErrSizeUnknown
} }
@ -622,17 +613,6 @@ func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) {
if d, ok := image.BigDataDigests[key]; ok { if d, ok := image.BigDataDigests[key]; ok {
return d, nil return d, nil
} }
if data, err := r.BigData(id, key); err == nil && data != nil {
if r.SetBigData(id, key, data) == nil {
image, ok := r.lookup(id)
if !ok {
return "", ErrImageUnknown
}
if d, ok := image.BigDataDigests[key]; ok {
return d, nil
}
}
}
return "", ErrDigestUnknown return "", ErrDigestUnknown
} }
@ -655,7 +635,7 @@ func imageSliceWithoutValue(slice []*Image, value *Image) []*Image {
return modified return modified
} }
func (r *imageStore) SetBigData(id, key string, data []byte) error { func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
if key == "" { if key == "" {
return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item") return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item")
} }
@ -672,7 +652,10 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
} }
var newDigest digest.Digest var newDigest digest.Digest
if bigDataNameIsManifest(key) { if bigDataNameIsManifest(key) {
if newDigest, err = manifest.Digest(data); err != nil { if digestManifest == nil {
return errors.Wrapf(ErrDigestUnknown, "error digesting manifest: no manifest digest callback provided")
}
if newDigest, err = digestManifest(data); err != nil {
return errors.Wrapf(err, "error digesting manifest") return errors.Wrapf(err, "error digesting manifest")
} }
} else { } else {

View File

@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT. // Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
// source: images.go // source: ./images.go
package storage package storage

View File

@ -229,6 +229,7 @@ type LayerStore interface {
type layerStore struct { type layerStore struct {
lockfile Locker lockfile Locker
mountsLockfile Locker
rundir string rundir string
driver drivers.Driver driver drivers.Driver
layerdir string layerdir string
@ -291,7 +292,6 @@ func (r *layerStore) Load() error {
idlist := []string{} idlist := []string{}
ids := make(map[string]*Layer) ids := make(map[string]*Layer)
names := make(map[string]*Layer) names := make(map[string]*Layer)
mounts := make(map[string]*Layer)
compressedsums := make(map[digest.Digest][]string) compressedsums := make(map[digest.Digest][]string)
uncompressedsums := make(map[digest.Digest][]string) uncompressedsums := make(map[digest.Digest][]string)
if r.lockfile.IsReadWrite() { if r.lockfile.IsReadWrite() {
@ -319,39 +319,29 @@ func (r *layerStore) Load() error {
label.ReserveLabel(layer.MountLabel) label.ReserveLabel(layer.MountLabel)
} }
} }
err = nil
} }
if shouldSave && !r.IsReadWrite() { if shouldSave && (!r.IsReadWrite() || !r.Locked()) {
return ErrDuplicateLayerNames return ErrDuplicateLayerNames
} }
mpath := r.mountspath()
data, err = ioutil.ReadFile(mpath)
if err != nil && !os.IsNotExist(err) {
return err
}
layerMounts := []layerMountPoint{}
if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil {
for _, mount := range layerMounts {
if mount.MountPoint != "" {
if layer, ok := ids[mount.ID]; ok {
mounts[mount.MountPoint] = layer
layer.MountPoint = mount.MountPoint
layer.MountCount = mount.MountCount
}
}
}
}
r.layers = layers r.layers = layers
r.idindex = truncindex.NewTruncIndex(idlist) r.idindex = truncindex.NewTruncIndex(idlist)
r.byid = ids r.byid = ids
r.byname = names r.byname = names
r.bymount = mounts
r.bycompressedsum = compressedsums r.bycompressedsum = compressedsums
r.byuncompressedsum = uncompressedsums r.byuncompressedsum = uncompressedsums
err = nil // Load and merge information about which layers are mounted, and where.
if r.IsReadWrite() {
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
if err = r.loadMounts(); err != nil {
return err
}
}
// Last step: if we're writable, try to remove anything that a previous // Last step: if we're writable, try to remove anything that a previous
// user of this storage area marked for deletion but didn't manage to // user of this storage area marked for deletion but didn't manage to
// actually delete. // actually delete.
if r.IsReadWrite() { if r.IsReadWrite() && r.Locked() {
for _, layer := range r.layers { for _, layer := range r.layers {
if layer.Flags == nil { if layer.Flags == nil {
layer.Flags = make(map[string]interface{}) layer.Flags = make(map[string]interface{})
@ -373,12 +363,36 @@ func (r *layerStore) Load() error {
return err return err
} }
func (r *layerStore) loadMounts() error {
mounts := make(map[string]*Layer)
mpath := r.mountspath()
data, err := ioutil.ReadFile(mpath)
if err != nil && !os.IsNotExist(err) {
return err
}
layerMounts := []layerMountPoint{}
if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil {
for _, mount := range layerMounts {
if mount.MountPoint != "" {
if layer, ok := r.lookup(mount.ID); ok {
mounts[mount.MountPoint] = layer
layer.MountPoint = mount.MountPoint
layer.MountCount = mount.MountCount
}
}
}
err = nil
}
r.bymount = mounts
return err
}
func (r *layerStore) Save() error { func (r *layerStore) Save() error {
if !r.IsReadWrite() { if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath()) return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
} }
if !r.Locked() { if !r.Locked() {
return errors.New("layer store is not locked") return errors.New("layer store is not locked for writing")
} }
rpath := r.layerspath() rpath := r.layerspath()
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
@ -388,6 +402,25 @@ func (r *layerStore) Save() error {
if err != nil { if err != nil {
return err return err
} }
if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil {
return err
}
if !r.IsReadWrite() {
return nil
}
r.mountsLockfile.Lock()
defer r.mountsLockfile.Unlock()
defer r.mountsLockfile.Touch()
return r.saveMounts()
}
func (r *layerStore) saveMounts() error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
}
if !r.mountsLockfile.Locked() {
return errors.New("layer store mount information is not locked for writing")
}
mpath := r.mountspath() mpath := r.mountspath()
if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil { if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil {
return err return err
@ -406,11 +439,10 @@ func (r *layerStore) Save() error {
if err != nil { if err != nil {
return err return err
} }
if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil { if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil {
return err return err
} }
defer r.Touch() return r.loadMounts()
return ioutils.AtomicWriteFile(mpath, jmdata, 0600)
} }
func newLayerStore(rundir string, layerdir string, driver drivers.Driver, uidMap, gidMap []idtools.IDMap) (LayerStore, error) { func newLayerStore(rundir string, layerdir string, driver drivers.Driver, uidMap, gidMap []idtools.IDMap) (LayerStore, error) {
@ -426,16 +458,21 @@ func newLayerStore(rundir string, layerdir string, driver drivers.Driver, uidMap
} }
lockfile.Lock() lockfile.Lock()
defer lockfile.Unlock() defer lockfile.Unlock()
mountsLockfile, err := GetLockfile(filepath.Join(rundir, "mountpoints.lock"))
if err != nil {
return nil, err
}
rlstore := layerStore{ rlstore := layerStore{
lockfile: lockfile, lockfile: lockfile,
driver: driver, mountsLockfile: mountsLockfile,
rundir: rundir, driver: driver,
layerdir: layerdir, rundir: rundir,
byid: make(map[string]*Layer), layerdir: layerdir,
bymount: make(map[string]*Layer), byid: make(map[string]*Layer),
byname: make(map[string]*Layer), bymount: make(map[string]*Layer),
uidMap: copyIDMap(uidMap), byname: make(map[string]*Layer),
gidMap: copyIDMap(gidMap), uidMap: copyIDMap(uidMap),
gidMap: copyIDMap(gidMap),
} }
if err := rlstore.Load(); err != nil { if err := rlstore.Load(); err != nil {
return nil, err return nil, err
@ -451,13 +488,14 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROL
lockfile.Lock() lockfile.Lock()
defer lockfile.Unlock() defer lockfile.Unlock()
rlstore := layerStore{ rlstore := layerStore{
lockfile: lockfile, lockfile: lockfile,
driver: driver, mountsLockfile: nil,
rundir: rundir, driver: driver,
layerdir: layerdir, rundir: rundir,
byid: make(map[string]*Layer), layerdir: layerdir,
bymount: make(map[string]*Layer), byid: make(map[string]*Layer),
byname: make(map[string]*Layer), bymount: make(map[string]*Layer),
byname: make(map[string]*Layer),
} }
if err := rlstore.Load(); err != nil { if err := rlstore.Load(); err != nil {
return nil, err return nil, err
@ -673,6 +711,16 @@ func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel
} }
func (r *layerStore) Mounted(id string) (int, error) { func (r *layerStore) Mounted(id string) (int, error) {
if !r.IsReadWrite() {
return 0, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath())
}
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
if err = r.loadMounts(); err != nil {
return 0, err
}
}
layer, ok := r.lookup(id) layer, ok := r.lookup(id)
if !ok { if !ok {
return 0, ErrLayerUnknown return 0, ErrLayerUnknown
@ -684,13 +732,21 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
if !r.IsReadWrite() { if !r.IsReadWrite() {
return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
} }
r.mountsLockfile.Lock()
defer r.mountsLockfile.Unlock()
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
if err = r.loadMounts(); err != nil {
return "", err
}
}
defer r.mountsLockfile.Touch()
layer, ok := r.lookup(id) layer, ok := r.lookup(id)
if !ok { if !ok {
return "", ErrLayerUnknown return "", ErrLayerUnknown
} }
if layer.MountCount > 0 { if layer.MountCount > 0 {
layer.MountCount++ layer.MountCount++
return layer.MountPoint, r.Save() return layer.MountPoint, r.saveMounts()
} }
if options.MountLabel == "" { if options.MountLabel == "" {
options.MountLabel = layer.MountLabel options.MountLabel = layer.MountLabel
@ -709,7 +765,7 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
layer.MountPoint = filepath.Clean(mountpoint) layer.MountPoint = filepath.Clean(mountpoint)
layer.MountCount++ layer.MountCount++
r.bymount[layer.MountPoint] = layer r.bymount[layer.MountPoint] = layer
err = r.Save() err = r.saveMounts()
} }
return mountpoint, err return mountpoint, err
} }
@ -718,6 +774,14 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
if !r.IsReadWrite() { if !r.IsReadWrite() {
return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
} }
r.mountsLockfile.Lock()
defer r.mountsLockfile.Unlock()
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
if err = r.loadMounts(); err != nil {
return false, err
}
}
defer r.mountsLockfile.Touch()
layer, ok := r.lookup(id) layer, ok := r.lookup(id)
if !ok { if !ok {
layerByMount, ok := r.bymount[filepath.Clean(id)] layerByMount, ok := r.bymount[filepath.Clean(id)]
@ -731,7 +795,7 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
} }
if layer.MountCount > 1 { if layer.MountCount > 1 {
layer.MountCount-- layer.MountCount--
return true, r.Save() return true, r.saveMounts()
} }
err := r.driver.Put(id) err := r.driver.Put(id)
if err == nil || os.IsNotExist(err) { if err == nil || os.IsNotExist(err) {
@ -740,12 +804,22 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
} }
layer.MountCount-- layer.MountCount--
layer.MountPoint = "" layer.MountPoint = ""
return false, r.Save() return false, r.saveMounts()
} }
return true, err return true, err
} }
func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) { func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
if !r.IsReadWrite() {
return nil, nil, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath())
}
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
if err = r.loadMounts(); err != nil {
return nil, nil, err
}
}
layer, ok := r.lookup(id) layer, ok := r.lookup(id)
if !ok { if !ok {
return nil, nil, ErrLayerUnknown return nil, nil, ErrLayerUnknown
@ -862,14 +936,23 @@ func (r *layerStore) Delete(id string) error {
return ErrLayerUnknown return ErrLayerUnknown
} }
id = layer.ID id = layer.ID
// This check is needed for idempotency of delete where the layer could have been // The layer may already have been explicitly unmounted, but if not, we
// already unmounted (since c/storage gives you that API directly) // should try to clean that up before we start deleting anything at the
for layer.MountCount > 0 { // driver level.
mountCount, err := r.Mounted(id)
if err != nil {
return errors.Wrapf(err, "error checking if layer %q is still mounted", id)
}
for mountCount > 0 {
if _, err := r.Unmount(id, false); err != nil { if _, err := r.Unmount(id, false); err != nil {
return err return err
} }
mountCount, err = r.Mounted(id)
if err != nil {
return errors.Wrapf(err, "error checking if layer %q is still mounted", id)
}
} }
err := r.driver.Remove(id) err = r.driver.Remove(id)
if err == nil { if err == nil {
os.Remove(r.tspath(id)) os.Remove(r.tspath(id))
delete(r.byid, id) delete(r.byid, id)
@ -1235,7 +1318,20 @@ func (r *layerStore) Touch() error {
} }
func (r *layerStore) Modified() (bool, error) { func (r *layerStore) Modified() (bool, error) {
return r.lockfile.Modified() var mmodified bool
lmodified, err := r.lockfile.Modified()
if err != nil {
return lmodified, err
}
if r.IsReadWrite() {
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
mmodified, err = r.mountsLockfile.Modified()
if err != nil {
return lmodified, err
}
}
return lmodified || mmodified, nil
} }
func (r *layerStore) IsReadWrite() bool { func (r *layerStore) IsReadWrite() bool {

View File

@ -35,7 +35,7 @@ type Locker interface {
// IsReadWrite() checks if the lock file is read-write // IsReadWrite() checks if the lock file is read-write
IsReadWrite() bool IsReadWrite() bool
// Locked() checks if lock is locked // Locked() checks if lock is locked for writing by a thread in this process
Locked() bool Locked() bool
} }
@ -66,7 +66,10 @@ func getLockfile(path string, ro bool) (Locker, error) {
if lockfiles == nil { if lockfiles == nil {
lockfiles = make(map[string]Locker) lockfiles = make(map[string]Locker)
} }
cleanPath := filepath.Clean(path) cleanPath, err := filepath.Abs(path)
if err != nil {
return nil, errors.Wrapf(err, "error ensuring that path %q is an absolute path", path)
}
if locker, ok := lockfiles[cleanPath]; ok { if locker, ok := lockfiles[cleanPath]; ok {
if ro && locker.IsReadWrite() { if ro && locker.IsReadWrite() {
return nil, errors.Errorf("lock %q is not a read-only lock", cleanPath) return nil, errors.Errorf("lock %q is not a read-only lock", cleanPath)

View File

@ -32,7 +32,7 @@ func getLockFile(path string, ro bool) (Locker, error) {
} }
return &lockfile{ return &lockfile{
stateMutex: &sync.Mutex{}, stateMutex: &sync.Mutex{},
writeMutex: &sync.Mutex{}, rwMutex: &sync.RWMutex{},
file: path, file: path,
fd: uintptr(fd), fd: uintptr(fd),
lw: stringid.GenerateRandomID(), lw: stringid.GenerateRandomID(),
@ -42,10 +42,10 @@ func getLockFile(path string, ro bool) (Locker, error) {
} }
type lockfile struct { type lockfile struct {
// stateMutex is used to synchronize concurrent accesses // rwMutex serializes concurrent reader-writer acquisitions in the same process space
rwMutex *sync.RWMutex
// stateMutex is used to synchronize concurrent accesses to the state below
stateMutex *sync.Mutex stateMutex *sync.Mutex
// writeMutex is used to serialize and avoid recursive writer locks
writeMutex *sync.Mutex
counter int64 counter int64
file string file string
fd uintptr fd uintptr
@ -65,23 +65,24 @@ func (l *lockfile) lock(l_type int16) {
Len: 0, Len: 0,
Pid: int32(os.Getpid()), Pid: int32(os.Getpid()),
} }
if l_type == unix.F_WRLCK { switch l_type {
// If we try to lock as a writer, lock the writerMutex first to case unix.F_RDLCK:
// avoid multiple writer acquisitions of the same process. l.rwMutex.RLock()
// Note: it's important to lock it prior to the stateMutex to case unix.F_WRLCK:
// avoid a deadlock. l.rwMutex.Lock()
l.writeMutex.Lock() default:
panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", l_type))
} }
l.stateMutex.Lock() l.stateMutex.Lock()
l.locktype = l_type
if l.counter == 0 { if l.counter == 0 {
// Optimization: only use the (expensive) fcntl syscall when // Optimization: only use the (expensive) fcntl syscall when
// the counter is 0. If it's greater than that, we're owning // the counter is 0. In this case, we're either the first
// the lock already and can only be a reader. // reader lock or a writer lock.
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil { for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond) time.Sleep(10 * time.Millisecond)
} }
} }
l.locktype = l_type
l.locked = true l.locked = true
l.counter++ l.counter++
l.stateMutex.Unlock() l.stateMutex.Unlock()
@ -133,19 +134,28 @@ func (l *lockfile) Unlock() {
time.Sleep(10 * time.Millisecond) time.Sleep(10 * time.Millisecond)
} }
} }
if l.locktype == unix.F_WRLCK { if l.locktype == unix.F_RDLCK {
l.writeMutex.Unlock() l.rwMutex.RUnlock()
} else {
l.rwMutex.Unlock()
} }
l.stateMutex.Unlock() l.stateMutex.Unlock()
} }
// Locked checks if lockfile is locked. // Locked checks if lockfile is locked for writing by a thread in this process.
func (l *lockfile) Locked() bool { func (l *lockfile) Locked() bool {
return l.locked l.stateMutex.Lock()
defer l.stateMutex.Unlock()
return l.locked && (l.locktype == unix.F_WRLCK)
} }
// Touch updates the lock file with the UID of the user. // Touch updates the lock file with the UID of the user.
func (l *lockfile) Touch() error { func (l *lockfile) Touch() error {
l.stateMutex.Lock()
if !l.locked || (l.locktype != unix.F_WRLCK) {
panic("attempted to update last-writer in lockfile without the write lock")
}
l.stateMutex.Unlock()
l.lw = stringid.GenerateRandomID() l.lw = stringid.GenerateRandomID()
id := []byte(l.lw) id := []byte(l.lw)
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET) _, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
@ -170,6 +180,11 @@ func (l *lockfile) Touch() error {
// was loaded. // was loaded.
func (l *lockfile) Modified() (bool, error) { func (l *lockfile) Modified() (bool, error) {
id := []byte(l.lw) id := []byte(l.lw)
l.stateMutex.Lock()
if !l.locked {
panic("attempted to check last-writer in lockfile without locking it first")
}
l.stateMutex.Unlock()
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET) _, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
if err != nil { if err != nil {
return true, err return true, err
@ -179,7 +194,7 @@ func (l *lockfile) Modified() (bool, error) {
return true, err return true, err
} }
if n != len(id) { if n != len(id) {
return true, unix.ENOSPC return true, nil
} }
lw := l.lw lw := l.lw
l.lw = string(id) l.lw = string(id)

View File

@ -636,7 +636,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
if chownOpts == nil { if chownOpts == nil {
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
} }
if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { if err := idtools.SafeLchown(path, chownOpts.UID, chownOpts.GID); err != nil {
return err return err
} }
} }

View File

@ -7,6 +7,7 @@ import (
"strings" "strings"
"syscall" "syscall"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/system"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@ -130,7 +131,7 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool,
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
return false, err return false, err
} }
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { if err := idtools.SafeChown(originalPath, hdr.Uid, hdr.Gid); err != nil {
return false, err return false, err
} }

View File

@ -7,6 +7,9 @@ import (
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"syscall"
"github.com/pkg/errors"
) )
// IDMap contains a single entry for user namespace range remapping. An array // IDMap contains a single entry for user namespace range remapping. An array
@ -277,3 +280,18 @@ func parseSubidFile(path, username string) (ranges, error) {
} }
return rangeList, nil return rangeList, nil
} }
func checkChownErr(err error, name string, uid, gid int) error {
if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL {
return errors.Wrapf(err, "there might not be enough IDs available in the namespace (requested %d:%d for %s)", uid, gid, name)
}
return err
}
func SafeChown(name string, uid, gid int) error {
return checkChownErr(os.Chown(name, uid, gid), name, uid, gid)
}
func SafeLchown(name string, uid, gid int) error {
return checkChownErr(os.Lchown(name, uid, gid), name, uid, gid)
}

View File

@ -30,7 +30,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
paths = []string{path} paths = []string{path}
} else if err == nil && chownExisting { } else if err == nil && chownExisting {
// short-circuit--we were called with an existing directory and chown was requested // short-circuit--we were called with an existing directory and chown was requested
return os.Chown(path, ownerUID, ownerGID) return SafeChown(path, ownerUID, ownerGID)
} else if err == nil { } else if err == nil {
// nothing to do; directory path fully exists already and chown was NOT requested // nothing to do; directory path fully exists already and chown was NOT requested
return nil return nil
@ -60,7 +60,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
// even if it existed, we will chown the requested path + any subpaths that // even if it existed, we will chown the requested path + any subpaths that
// didn't exist when we called MkdirAll // didn't exist when we called MkdirAll
for _, pathComponent := range paths { for _, pathComponent := range paths {
if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { if err := SafeChown(pathComponent, ownerUID, ownerGID); err != nil {
return err return err
} }
} }

View File

@ -3,6 +3,7 @@
package reexec package reexec
import ( import (
"context"
"os/exec" "os/exec"
"syscall" "syscall"
@ -20,11 +21,23 @@ func Self() string {
// This will use the in-memory version (/proc/self/exe) of the current binary, // This will use the in-memory version (/proc/self/exe) of the current binary,
// it is thus safe to delete or replace the on-disk binary (os.Args[0]). // it is thus safe to delete or replace the on-disk binary (os.Args[0]).
func Command(args ...string) *exec.Cmd { func Command(args ...string) *exec.Cmd {
return &exec.Cmd{ cmd := exec.Command(Self())
Path: Self(), cmd.Args = args
Args: args, cmd.SysProcAttr = &syscall.SysProcAttr{
SysProcAttr: &syscall.SysProcAttr{ Pdeathsig: unix.SIGTERM,
Pdeathsig: unix.SIGTERM,
},
} }
return cmd
}
// CommandContext returns *exec.Cmd which has Path as current binary, and also
// sets SysProcAttr.Pdeathsig to SIGTERM.
// This will use the in-memory version (/proc/self/exe) of the current binary,
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
cmd := exec.CommandContext(ctx, Self())
cmd.Args = args
cmd.SysProcAttr = &syscall.SysProcAttr{
Pdeathsig: unix.SIGTERM,
}
return cmd
} }

View File

@ -3,6 +3,7 @@
package reexec package reexec
import ( import (
"context"
"os/exec" "os/exec"
) )
@ -16,8 +17,14 @@ func Self() string {
// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will // For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
// be set to "/usr/bin/docker". // be set to "/usr/bin/docker".
func Command(args ...string) *exec.Cmd { func Command(args ...string) *exec.Cmd {
return &exec.Cmd{ cmd := exec.Command(Self())
Path: Self(), cmd.Args = args
Args: args, return cmd
} }
// CommandContext returns *exec.Cmd which has Path as current binary.
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
cmd := exec.CommandContext(ctx, Self())
cmd.Args = args
return cmd
} }

View File

@ -3,6 +3,7 @@
package reexec package reexec
import ( import (
"context"
"os/exec" "os/exec"
) )
@ -10,3 +11,8 @@ import (
func Command(args ...string) *exec.Cmd { func Command(args ...string) *exec.Cmd {
return nil return nil
} }
// CommandContext is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin.
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
return nil
}

View File

@ -3,6 +3,7 @@
package reexec package reexec
import ( import (
"context"
"os/exec" "os/exec"
) )
@ -16,8 +17,16 @@ func Self() string {
// For example if current binary is "docker.exe" at "C:\", then cmd.Path will // For example if current binary is "docker.exe" at "C:\", then cmd.Path will
// be set to "C:\docker.exe". // be set to "C:\docker.exe".
func Command(args ...string) *exec.Cmd { func Command(args ...string) *exec.Cmd {
return &exec.Cmd{ cmd := exec.Command(Self())
Path: Self(), cmd.Args = args
Args: args, return cmd
} }
// Command returns *exec.Cmd which has Path as current binary.
// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
// be set to "C:\docker.exe".
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
cmd := exec.CommandContext(ctx, Self())
cmd.Args = args
return cmd
} }

View File

@ -32,7 +32,7 @@ import (
var ( var (
// DefaultStoreOptions is a reasonable default set of options. // DefaultStoreOptions is a reasonable default set of options.
DefaultStoreOptions StoreOptions defaultStoreOptions StoreOptions
stores []*store stores []*store
storesLock sync.Mutex storesLock sync.Mutex
) )
@ -102,19 +102,21 @@ type ROBigDataStore interface {
BigDataNames(id string) ([]string, error) BigDataNames(id string) ([]string, error)
} }
// A RWBigDataStore wraps up the read-write big-data related methods of the // A RWImageBigDataStore wraps up how we store big-data associated with images.
// various types of file-based lookaside stores that we implement. type RWImageBigDataStore interface {
type RWBigDataStore interface { // SetBigData stores a (potentially large) piece of data associated
// SetBigData stores a (potentially large) piece of data associated with this // with this ID.
// ID. // Pass github.com/containers/image/manifest.Digest as digestManifest
SetBigData(id, key string, data []byte) error // to allow ByDigest to find images by their correct digests.
SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
} }
// A BigDataStore wraps up the most common big-data related methods of the // A ContainerBigDataStore wraps up how we store big-data associated with containers.
// various types of file-based lookaside stores that we implement. type ContainerBigDataStore interface {
type BigDataStore interface {
ROBigDataStore ROBigDataStore
RWBigDataStore // SetBigData stores a (potentially large) piece of data associated
// with this ID.
SetBigData(id, key string, data []byte) error
} }
// A FlaggableStore can have flags set and cleared on items which it manages. // A FlaggableStore can have flags set and cleared on items which it manages.
@ -352,9 +354,11 @@ type Store interface {
// of named data associated with an image. // of named data associated with an image.
ImageBigDataDigest(id, key string) (digest.Digest, error) ImageBigDataDigest(id, key string) (digest.Digest, error)
// SetImageBigData stores a (possibly large) chunk of named data associated // SetImageBigData stores a (possibly large) chunk of named data
// with an image. // associated with an image. Pass
SetImageBigData(id, key string, data []byte) error // github.com/containers/image/manifest.Digest as digestManifest to
// allow ImagesByDigest to find images by their correct digests.
SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
// ImageSize computes the size of the image's layers and ancillary data. // ImageSize computes the size of the image's layers and ancillary data.
ImageSize(id string) (int64, error) ImageSize(id string) (int64, error)
@ -546,14 +550,22 @@ type store struct {
// } // }
func GetStore(options StoreOptions) (Store, error) { func GetStore(options StoreOptions) (Store, error) {
if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 { if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 {
options = DefaultStoreOptions options = defaultStoreOptions
} }
if options.GraphRoot != "" { if options.GraphRoot != "" {
options.GraphRoot = filepath.Clean(options.GraphRoot) dir, err := filepath.Abs(options.GraphRoot)
if err != nil {
return nil, errors.Wrapf(err, "error deriving an absolute path from %q", options.GraphRoot)
}
options.GraphRoot = dir
} }
if options.RunRoot != "" { if options.RunRoot != "" {
options.RunRoot = filepath.Clean(options.RunRoot) dir, err := filepath.Abs(options.RunRoot)
if err != nil {
return nil, errors.Wrapf(err, "error deriving an absolute path from %q", options.RunRoot)
}
options.RunRoot = dir
} }
storesLock.Lock() storesLock.Lock()
@ -1321,7 +1333,7 @@ func (s *store) Metadata(id string) (string, error) {
} }
for _, s := range append([]ROLayerStore{lstore}, lstores...) { for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -1343,7 +1355,7 @@ func (s *store) Metadata(id string) (string, error) {
} }
for _, s := range append([]ROImageStore{istore}, istores...) { for _, s := range append([]ROImageStore{istore}, istores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -1359,7 +1371,7 @@ func (s *store) Metadata(id string) (string, error) {
if err != nil { if err != nil {
return "", err return "", err
} }
cstore.Lock() cstore.RLock()
defer cstore.Unlock() defer cstore.Unlock()
if modified, err := cstore.Modified(); modified || err != nil { if modified, err := cstore.Modified(); modified || err != nil {
if err = cstore.Load(); err != nil { if err = cstore.Load(); err != nil {
@ -1383,7 +1395,7 @@ func (s *store) ListImageBigData(id string) ([]string, error) {
} }
for _, s := range append([]ROImageStore{istore}, istores...) { for _, s := range append([]ROImageStore{istore}, istores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -1409,7 +1421,7 @@ func (s *store) ImageBigDataSize(id, key string) (int64, error) {
} }
for _, s := range append([]ROImageStore{istore}, istores...) { for _, s := range append([]ROImageStore{istore}, istores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -1436,7 +1448,7 @@ func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) {
stores = append([]ROImageStore{ristore}, stores...) stores = append([]ROImageStore{ristore}, stores...)
for _, r := range stores { for _, r := range stores {
ristore := r ristore := r
ristore.Lock() ristore.RLock()
defer ristore.Unlock() defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil { if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil { if err = ristore.Load(); err != nil {
@ -1477,7 +1489,7 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) {
return nil, ErrImageUnknown return nil, ErrImageUnknown
} }
func (s *store) SetImageBigData(id, key string, data []byte) error { func (s *store) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
ristore, err := s.ImageStore() ristore, err := s.ImageStore()
if err != nil { if err != nil {
return err return err
@ -1491,7 +1503,7 @@ func (s *store) SetImageBigData(id, key string, data []byte) error {
} }
} }
return ristore.SetBigData(id, key, data) return ristore.SetBigData(id, key, data, digestManifest)
} }
func (s *store) ImageSize(id string) (int64, error) { func (s *store) ImageSize(id string) (int64, error) {
@ -1507,7 +1519,7 @@ func (s *store) ImageSize(id string) (int64, error) {
} }
for _, s := range append([]ROLayerStore{lstore}, lstores...) { for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -1529,7 +1541,7 @@ func (s *store) ImageSize(id string) (int64, error) {
// Look for the image's record. // Look for the image's record.
for _, s := range append([]ROImageStore{istore}, istores...) { for _, s := range append([]ROImageStore{istore}, istores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -1617,7 +1629,7 @@ func (s *store) ContainerSize(id string) (int64, error) {
} }
for _, s := range append([]ROLayerStore{lstore}, lstores...) { for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -1641,7 +1653,7 @@ func (s *store) ContainerSize(id string) (int64, error) {
if err != nil { if err != nil {
return -1, err return -1, err
} }
rcstore.Lock() rcstore.RLock()
defer rcstore.Unlock() defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil { if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil { if err = rcstore.Load(); err != nil {
@ -1705,7 +1717,7 @@ func (s *store) ListContainerBigData(id string) ([]string, error) {
return nil, err return nil, err
} }
rcstore.Lock() rcstore.RLock()
defer rcstore.Unlock() defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil { if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil { if err = rcstore.Load(); err != nil {
@ -1721,7 +1733,7 @@ func (s *store) ContainerBigDataSize(id, key string) (int64, error) {
if err != nil { if err != nil {
return -1, err return -1, err
} }
rcstore.Lock() rcstore.RLock()
defer rcstore.Unlock() defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil { if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil { if err = rcstore.Load(); err != nil {
@ -1736,7 +1748,7 @@ func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) {
if err != nil { if err != nil {
return "", err return "", err
} }
rcstore.Lock() rcstore.RLock()
defer rcstore.Unlock() defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil { if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil { if err = rcstore.Load(); err != nil {
@ -1751,7 +1763,7 @@ func (s *store) ContainerBigData(id, key string) ([]byte, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
rcstore.Lock() rcstore.RLock()
defer rcstore.Unlock() defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil { if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil { if err = rcstore.Load(); err != nil {
@ -1787,7 +1799,7 @@ func (s *store) Exists(id string) bool {
} }
for _, s := range append([]ROLayerStore{lstore}, lstores...) { for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -1809,7 +1821,7 @@ func (s *store) Exists(id string) bool {
} }
for _, s := range append([]ROImageStore{istore}, istores...) { for _, s := range append([]ROImageStore{istore}, istores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -1825,7 +1837,7 @@ func (s *store) Exists(id string) bool {
if err != nil { if err != nil {
return false return false
} }
rcstore.Lock() rcstore.RLock()
defer rcstore.Unlock() defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil { if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil { if err = rcstore.Load(); err != nil {
@ -1912,7 +1924,7 @@ func (s *store) Names(id string) ([]string, error) {
} }
for _, s := range append([]ROLayerStore{lstore}, lstores...) { for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -1934,7 +1946,7 @@ func (s *store) Names(id string) ([]string, error) {
} }
for _, s := range append([]ROImageStore{istore}, istores...) { for _, s := range append([]ROImageStore{istore}, istores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -1950,7 +1962,7 @@ func (s *store) Names(id string) ([]string, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
rcstore.Lock() rcstore.RLock()
defer rcstore.Unlock() defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil { if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil { if err = rcstore.Load(); err != nil {
@ -1974,7 +1986,7 @@ func (s *store) Lookup(name string) (string, error) {
} }
for _, s := range append([]ROLayerStore{lstore}, lstores...) { for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -1996,7 +2008,7 @@ func (s *store) Lookup(name string) (string, error) {
} }
for _, s := range append([]ROImageStore{istore}, istores...) { for _, s := range append([]ROImageStore{istore}, istores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -2012,7 +2024,7 @@ func (s *store) Lookup(name string) (string, error) {
if err != nil { if err != nil {
return "", err return "", err
} }
cstore.Lock() cstore.RLock()
defer cstore.Unlock() defer cstore.Unlock()
if modified, err := cstore.Modified(); modified || err != nil { if modified, err := cstore.Modified(); modified || err != nil {
if err = cstore.Load(); err != nil { if err = cstore.Load(); err != nil {
@ -2464,7 +2476,7 @@ func (s *store) Mounted(id string) (int, error) {
if err != nil { if err != nil {
return 0, err return 0, err
} }
rlstore.Lock() rlstore.RLock()
defer rlstore.Unlock() defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil { if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil { if err = rlstore.Load(); err != nil {
@ -2507,7 +2519,7 @@ func (s *store) Changes(from, to string) ([]archive.Change, error) {
} }
for _, s := range append([]ROLayerStore{lstore}, lstores...) { for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -2532,7 +2544,7 @@ func (s *store) DiffSize(from, to string) (int64, error) {
} }
for _, s := range append([]ROLayerStore{lstore}, lstores...) { for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -2612,7 +2624,7 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye
} }
for _, s := range append([]ROLayerStore{lstore}, lstores...) { for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -2659,7 +2671,7 @@ func (s *store) LayerSize(id string) (int64, error) {
} }
for _, s := range append([]ROLayerStore{lstore}, lstores...) { for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -2678,7 +2690,7 @@ func (s *store) LayerParentOwners(id string) ([]int, []int, error) {
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
rlstore.Lock() rlstore.RLock()
defer rlstore.Unlock() defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil { if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil { if err = rlstore.Load(); err != nil {
@ -2700,14 +2712,14 @@ func (s *store) ContainerParentOwners(id string) ([]int, []int, error) {
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
rlstore.Lock() rlstore.RLock()
defer rlstore.Unlock() defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil { if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil { if err = rlstore.Load(); err != nil {
return nil, nil, err return nil, nil, err
} }
} }
rcstore.Lock() rcstore.RLock()
defer rcstore.Unlock() defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil { if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil { if err = rcstore.Load(); err != nil {
@ -2738,7 +2750,7 @@ func (s *store) Layers() ([]Layer, error) {
for _, s := range append([]ROLayerStore{lstore}, lstores...) { for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -2767,7 +2779,7 @@ func (s *store) Images() ([]Image, error) {
} }
for _, s := range append([]ROImageStore{istore}, istores...) { for _, s := range append([]ROImageStore{istore}, istores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -2789,7 +2801,7 @@ func (s *store) Containers() ([]Container, error) {
return nil, err return nil, err
} }
rcstore.Lock() rcstore.RLock()
defer rcstore.Unlock() defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil { if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil { if err = rcstore.Load(); err != nil {
@ -2811,7 +2823,7 @@ func (s *store) Layer(id string) (*Layer, error) {
} }
for _, s := range append([]ROLayerStore{lstore}, lstores...) { for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -2837,7 +2849,7 @@ func (s *store) Image(id string) (*Image, error) {
} }
for _, s := range append([]ROImageStore{istore}, istores...) { for _, s := range append([]ROImageStore{istore}, istores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -2870,7 +2882,7 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
} }
for _, s := range append([]ROImageStore{istore}, istores...) { for _, s := range append([]ROImageStore{istore}, istores...) {
store := s store := s
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -2903,7 +2915,7 @@ func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) {
return nil, err return nil, err
} }
for _, store := range append([]ROImageStore{istore}, istores...) { for _, store := range append([]ROImageStore{istore}, istores...) {
store.Lock() store.RLock()
defer store.Unlock() defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil { if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil { if err = store.Load(); err != nil {
@ -2924,7 +2936,7 @@ func (s *store) Container(id string) (*Container, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
rcstore.Lock() rcstore.RLock()
defer rcstore.Unlock() defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil { if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil { if err = rcstore.Load(); err != nil {
@ -2940,7 +2952,7 @@ func (s *store) ContainerLayerID(id string) (string, error) {
if err != nil { if err != nil {
return "", err return "", err
} }
rcstore.Lock() rcstore.RLock()
defer rcstore.Unlock() defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil { if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil { if err = rcstore.Load(); err != nil {
@ -2963,7 +2975,7 @@ func (s *store) ContainerByLayer(id string) (*Container, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
rcstore.Lock() rcstore.RLock()
defer rcstore.Unlock() defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil { if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil { if err = rcstore.Load(); err != nil {
@ -2988,7 +3000,7 @@ func (s *store) ContainerDirectory(id string) (string, error) {
if err != nil { if err != nil {
return "", err return "", err
} }
rcstore.Lock() rcstore.RLock()
defer rcstore.Unlock() defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil { if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil { if err = rcstore.Load(); err != nil {
@ -3015,7 +3027,7 @@ func (s *store) ContainerRunDirectory(id string) (string, error) {
return "", err return "", err
} }
rcstore.Lock() rcstore.RLock()
defer rcstore.Unlock() defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil { if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil { if err = rcstore.Load(); err != nil {
@ -3205,8 +3217,20 @@ func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} {
return ret return ret
} }
// DefaultConfigFile path to the system wide storage.conf file // defaultConfigFile path to the system wide storage.conf file
const DefaultConfigFile = "/etc/containers/storage.conf" const defaultConfigFile = "/etc/containers/storage.conf"
// DefaultConfigFile returns the path to the storage config file used
func DefaultConfigFile(rootless bool) (string, error) {
if rootless {
home, err := homeDir()
if err != nil {
return "", errors.Wrapf(err, "cannot determine users homedir")
}
return filepath.Join(home, ".config/containers/storage.conf"), nil
}
return defaultConfigFile, nil
}
// TOML-friendly explicit tables used for conversions. // TOML-friendly explicit tables used for conversions.
type tomlConfig struct { type tomlConfig struct {
@ -3346,19 +3370,19 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
} }
func init() { func init() {
DefaultStoreOptions.RunRoot = "/var/run/containers/storage" defaultStoreOptions.RunRoot = "/var/run/containers/storage"
DefaultStoreOptions.GraphRoot = "/var/lib/containers/storage" defaultStoreOptions.GraphRoot = "/var/lib/containers/storage"
DefaultStoreOptions.GraphDriverName = "" defaultStoreOptions.GraphDriverName = ""
ReloadConfigurationFile(DefaultConfigFile, &DefaultStoreOptions) ReloadConfigurationFile(defaultConfigFile, &defaultStoreOptions)
} }
func GetDefaultMountOptions() ([]string, error) { func GetDefaultMountOptions() ([]string, error) {
mountOpts := []string{ mountOpts := []string{
".mountopt", ".mountopt",
fmt.Sprintf("%s.mountopt", DefaultStoreOptions.GraphDriverName), fmt.Sprintf("%s.mountopt", defaultStoreOptions.GraphDriverName),
} }
for _, option := range DefaultStoreOptions.GraphDriverOptions { for _, option := range defaultStoreOptions.GraphDriverOptions {
key, val, err := parsers.ParseKeyValueOpt(option) key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil { if err != nil {
return nil, err return nil, err

234
vendor/github.com/containers/storage/utils.go generated vendored Normal file
View File

@ -0,0 +1,234 @@
package storage
import (
"fmt"
"os"
"os/exec"
"os/user"
"path/filepath"
"strings"
"github.com/BurntSushi/toml"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping
func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap string) (*IDMappingOptions, error) {
options := IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
}
if subGIDMap == "" && subUIDMap != "" {
subGIDMap = subUIDMap
}
if subUIDMap == "" && subGIDMap != "" {
subUIDMap = subGIDMap
}
if len(GIDMapSlice) == 0 && len(UIDMapSlice) != 0 {
GIDMapSlice = UIDMapSlice
}
if len(UIDMapSlice) == 0 && len(GIDMapSlice) != 0 {
UIDMapSlice = GIDMapSlice
}
if len(UIDMapSlice) == 0 && subUIDMap == "" && os.Getuid() != 0 {
UIDMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getuid())}
}
if len(GIDMapSlice) == 0 && subGIDMap == "" && os.Getuid() != 0 {
GIDMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getgid())}
}
if subUIDMap != "" && subGIDMap != "" {
mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap)
if err != nil {
return nil, errors.Wrapf(err, "failed to create NewIDMappings for uidmap=%s gidmap=%s", subUIDMap, subGIDMap)
}
options.UIDMap = mappings.UIDs()
options.GIDMap = mappings.GIDs()
}
parsedUIDMap, err := idtools.ParseIDMap(UIDMapSlice, "UID")
if err != nil {
return nil, errors.Wrapf(err, "failed to create ParseUIDMap UID=%s", UIDMapSlice)
}
parsedGIDMap, err := idtools.ParseIDMap(GIDMapSlice, "GID")
if err != nil {
return nil, errors.Wrapf(err, "failed to create ParseGIDMap GID=%s", UIDMapSlice)
}
options.UIDMap = append(options.UIDMap, parsedUIDMap...)
options.GIDMap = append(options.GIDMap, parsedGIDMap...)
if len(options.UIDMap) > 0 {
options.HostUIDMapping = false
}
if len(options.GIDMap) > 0 {
options.HostGIDMapping = false
}
return &options, nil
}
// GetRootlessRuntimeDir returns the runtime directory when running as non root
func GetRootlessRuntimeDir(rootlessUid int) (string, error) {
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir == "" {
tmpDir := fmt.Sprintf("/run/user/%d", rootlessUid)
st, err := system.Stat(tmpDir)
if err == nil && int(st.UID()) == os.Getuid() && st.Mode() == 0700 {
return tmpDir, nil
}
}
tmpDir := fmt.Sprintf("%s/%d", os.TempDir(), rootlessUid)
if err := os.MkdirAll(tmpDir, 0700); err != nil {
logrus.Errorf("failed to create %s: %v", tmpDir, err)
} else {
return tmpDir, nil
}
home, err := homeDir()
if err != nil {
return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty")
}
resolvedHome, err := filepath.EvalSymlinks(home)
if err != nil {
return "", errors.Wrapf(err, "cannot resolve %s", home)
}
return filepath.Join(resolvedHome, "rundir"), nil
}
// getRootlessDirInfo returns the parent path of where the storage for containers and
// volumes will be in rootless mode
func getRootlessDirInfo(rootlessUid int) (string, string, error) {
rootlessRuntime, err := GetRootlessRuntimeDir(rootlessUid)
if err != nil {
return "", "", err
}
dataDir := os.Getenv("XDG_DATA_HOME")
if dataDir == "" {
home, err := homeDir()
if err != nil {
return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty")
}
// runc doesn't like symlinks in the rootfs path, and at least
// on CoreOS /home is a symlink to /var/home, so resolve any symlink.
resolvedHome, err := filepath.EvalSymlinks(home)
if err != nil {
return "", "", errors.Wrapf(err, "cannot resolve %s", home)
}
dataDir = filepath.Join(resolvedHome, ".local", "share")
}
return dataDir, rootlessRuntime, nil
}
// getRootlessStorageOpts returns the storage opts for containers running as non root
func getRootlessStorageOpts(rootlessUid int) (StoreOptions, error) {
var opts StoreOptions
dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUid)
if err != nil {
return opts, err
}
opts.RunRoot = rootlessRuntime
opts.GraphRoot = filepath.Join(dataDir, "containers", "storage")
if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
opts.GraphDriverName = "overlay"
opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)}
} else {
opts.GraphDriverName = "vfs"
}
return opts, nil
}
type tomlOptionsConfig struct {
MountProgram string `toml:"mount_program"`
}
func getTomlStorage(storeOptions *StoreOptions) *tomlConfig {
config := new(tomlConfig)
config.Storage.Driver = storeOptions.GraphDriverName
config.Storage.RunRoot = storeOptions.RunRoot
config.Storage.GraphRoot = storeOptions.GraphRoot
for _, i := range storeOptions.GraphDriverOptions {
s := strings.Split(i, "=")
if s[0] == "overlay.mount_program" {
config.Storage.Options.MountProgram = s[1]
}
}
return config
}
// DefaultStoreOptions returns the default storage ops for containers
func DefaultStoreOptions(rootless bool, rootlessUid int) (StoreOptions, error) {
var (
defaultRootlessRunRoot string
defaultRootlessGraphRoot string
err error
)
storageOpts := defaultStoreOptions
if rootless {
storageOpts, err = getRootlessStorageOpts(rootlessUid)
if err != nil {
return storageOpts, err
}
}
storageConf, err := DefaultConfigFile(rootless)
if err != nil {
return storageOpts, err
}
if _, err = os.Stat(storageConf); err == nil {
defaultRootlessRunRoot = storageOpts.RunRoot
defaultRootlessGraphRoot = storageOpts.GraphRoot
storageOpts = StoreOptions{}
ReloadConfigurationFile(storageConf, &storageOpts)
}
if !os.IsNotExist(err) {
return storageOpts, errors.Wrapf(err, "cannot stat %s", storageConf)
}
if rootless {
if err == nil {
// If the file did not specify a graphroot or runroot,
// set sane defaults so we don't try and use root-owned
// directories
if storageOpts.RunRoot == "" {
storageOpts.RunRoot = defaultRootlessRunRoot
}
if storageOpts.GraphRoot == "" {
storageOpts.GraphRoot = defaultRootlessGraphRoot
}
} else {
if err := os.MkdirAll(filepath.Dir(storageConf), 0755); err != nil {
return storageOpts, errors.Wrapf(err, "cannot make directory %s", filepath.Dir(storageConf))
}
file, err := os.OpenFile(storageConf, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
return storageOpts, errors.Wrapf(err, "cannot open %s", storageConf)
}
tomlConfiguration := getTomlStorage(&storageOpts)
defer file.Close()
enc := toml.NewEncoder(file)
if err := enc.Encode(tomlConfiguration); err != nil {
os.Remove(storageConf)
return storageOpts, errors.Wrapf(err, "failed to encode %s", storageConf)
}
}
}
return storageOpts, nil
}
func homeDir() (string, error) {
home := os.Getenv("HOME")
if home == "" {
usr, err := user.Current()
if err != nil {
return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty")
}
home = usr.HomeDir
}
return home, nil
}

View File

@ -1,18 +1,15 @@
github.com/BurntSushi/toml master github.com/BurntSushi/toml master
github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165 github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165
github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8 github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8
github.com/containers/image master
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00 github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52 github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
github.com/docker/libtrust master
github.com/klauspost/compress v1.4.1 github.com/klauspost/compress v1.4.1
github.com/klauspost/cpuid v1.2.0 github.com/klauspost/cpuid v1.2.0
github.com/klauspost/pgzip v1.2.1 github.com/klauspost/pgzip v1.2.1
github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6 github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062 github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
github.com/opencontainers/go-digest master github.com/opencontainers/go-digest master
github.com/opencontainers/image-spec master
github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07 github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07
github.com/opencontainers/selinux v1.1 github.com/opencontainers/selinux v1.1
github.com/ostreedev/ostree-go master github.com/ostreedev/ostree-go master

View File

@ -1,6 +1,7 @@
package ocicni package ocicni
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"net" "net"
@ -511,7 +512,7 @@ func (network *cniNetwork) addToNetwork(cacheDir string, podNetwork *PodNetwork,
netconf, cninet := network.NetworkConfig, network.CNIConfig netconf, cninet := network.NetworkConfig, network.CNIConfig
logrus.Infof("About to add CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type) logrus.Infof("About to add CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type)
res, err := cninet.AddNetworkList(netconf, rt) res, err := cninet.AddNetworkList(context.Background(), netconf, rt)
if err != nil { if err != nil {
logrus.Errorf("Error adding network: %v", err) logrus.Errorf("Error adding network: %v", err)
return nil, err return nil, err
@ -529,7 +530,7 @@ func (network *cniNetwork) deleteFromNetwork(cacheDir string, podNetwork *PodNet
netconf, cninet := network.NetworkConfig, network.CNIConfig netconf, cninet := network.NetworkConfig, network.CNIConfig
logrus.Infof("About to del CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type) logrus.Infof("About to del CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type)
err = cninet.DelNetworkList(netconf, rt) err = cninet.DelNetworkList(context.Background(), netconf, rt)
if err != nil { if err != nil {
logrus.Errorf("Error deleting network: %v", err) logrus.Errorf("Error deleting network: %v", err)
return err return err

13
vendor/github.com/cri-o/ocicni/vendor.conf generated vendored Normal file
View File

@ -0,0 +1,13 @@
github.com/containernetworking/cni fbb95fff8a5239a4295c991efa8a397d43118f7e
github.com/fsnotify/fsnotify 1485a34d5d5723fea214f5710708e19a831720e4
github.com/sirupsen/logrus 787e519fa85519b874dead61020de598e9a23944
github.com/onsi/ginkgo eea6ad008b96acdaa524f5b409513bf062b500ad
github.com/onsi/gomega 90e289841c1ed79b7a598a7cd9959750cb5e89e2
golang.org/x/net 63eda1eb0650888965ead1296efd04d0b2b61128
gopkg.in/yaml.v2 51d6538a90f86fe93ac480b35f37b2be17fef232
golang.org/x/text e3703dcdd614d2d7488fff034c75c551ea25da95
golang.org/x/sys f49334f85ddcf0f08d7fb6dd7363e9e6d6b777eb
github.com/hpcloud/tail a1dbeea552b7c8df4b542c66073e393de198a800
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
gopkg.in/fsnotify/fsnotify.v1 c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9
github.com/konsorten/go-windows-terminal-sequences f55edac94c9bbba5d6182a4be46d86a2c9b5b50e