mirror of
https://github.com/containers/podman.git
synced 2025-09-29 01:35:06 +08:00
vendor: bump c/common and other vendors
This commit bumps majorly c/common so netavark features could be synced with podman. But there are some other vendor bumps as well [NO NEW TESTS NEEDED] [NO TESTS NEEDED] Signed-off-by: Aditya R <arajan@redhat.com>
This commit is contained in:
11
vendor/github.com/containers/common/libnetwork/netavark/network.go
generated
vendored
11
vendor/github.com/containers/common/libnetwork/netavark/network.go
generated
vendored
@ -13,6 +13,7 @@ import (
|
||||
"github.com/containers/common/libnetwork/internal/util"
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -21,6 +22,12 @@ type netavarkNetwork struct {
|
||||
// networkConfigDir is directory where the network config files are stored.
|
||||
networkConfigDir string
|
||||
|
||||
// networkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config etc
|
||||
networkRunDir string
|
||||
|
||||
// tells netavark wheather this is rootless mode or rootfull, "true" or "false"
|
||||
networkRootless bool
|
||||
|
||||
// netavarkBinary is the path to the netavark binary.
|
||||
netavarkBinary string
|
||||
|
||||
@ -53,7 +60,7 @@ type InitConfig struct {
|
||||
// NetavarkBinary is the path to the netavark binary.
|
||||
NetavarkBinary string
|
||||
|
||||
// NetworkRunDir is where temporary files are stored, i.e.the ipam db.
|
||||
// NetworkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config
|
||||
NetworkRunDir string
|
||||
|
||||
// DefaultNetwork is the name for the default network.
|
||||
@ -99,7 +106,9 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) {
|
||||
|
||||
n := &netavarkNetwork{
|
||||
networkConfigDir: conf.NetworkConfigDir,
|
||||
networkRunDir: conf.NetworkRunDir,
|
||||
netavarkBinary: conf.NetavarkBinary,
|
||||
networkRootless: unshare.IsRootless(),
|
||||
ipamDBPath: filepath.Join(conf.NetworkRunDir, "ipam.db"),
|
||||
defaultNetwork: defaultNetworkName,
|
||||
defaultSubnet: defaultNet,
|
||||
|
5
vendor/github.com/containers/common/libnetwork/netavark/run.go
generated
vendored
5
vendor/github.com/containers/common/libnetwork/netavark/run.go
generated
vendored
@ -5,6 +5,7 @@ package netavark
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/containers/common/libnetwork/internal/util"
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
@ -54,7 +55,7 @@ func (n *netavarkNetwork) Setup(namespacePath string, options types.SetupOptions
|
||||
}
|
||||
|
||||
result := map[string]types.StatusBlock{}
|
||||
err = n.execNetavark([]string{"setup", namespacePath}, netavarkOpts, &result)
|
||||
err = n.execNetavark([]string{"--config", n.networkRunDir, "--rootless=" + strconv.FormatBool(n.networkRootless), "setup", namespacePath}, netavarkOpts, &result)
|
||||
if err != nil {
|
||||
// lets dealloc ips to prevent leaking
|
||||
if err := n.deallocIPs(&options.NetworkOptions); err != nil {
|
||||
@ -94,7 +95,7 @@ func (n *netavarkNetwork) Teardown(namespacePath string, options types.TeardownO
|
||||
return errors.Wrap(err, "failed to convert net opts")
|
||||
}
|
||||
|
||||
retErr := n.execNetavark([]string{"teardown", namespacePath}, netavarkOpts, nil)
|
||||
retErr := n.execNetavark([]string{"--config", n.networkRunDir, "--rootless=" + strconv.FormatBool(n.networkRootless), "teardown", namespacePath}, netavarkOpts, nil)
|
||||
|
||||
// when netavark returned an error we still free the used ips
|
||||
// otherwise we could end up in a state where block the ips forever
|
||||
|
13
vendor/github.com/containers/common/pkg/config/config.go
generated
vendored
13
vendor/github.com/containers/common/pkg/config/config.go
generated
vendored
@ -72,6 +72,8 @@ type Config struct {
|
||||
Network NetworkConfig `toml:"network"`
|
||||
// Secret section defines configurations for the secret management
|
||||
Secrets SecretConfig `toml:"secrets"`
|
||||
// ConfigMap section defines configurations for the configmaps management
|
||||
ConfigMaps ConfigMapConfig `toml:"configmaps"`
|
||||
}
|
||||
|
||||
// ContainersConfig represents the "containers" TOML config table
|
||||
@ -514,6 +516,17 @@ type SecretConfig struct {
|
||||
Opts map[string]string `toml:"opts,omitempty"`
|
||||
}
|
||||
|
||||
// ConfigMapConfig represents the "configmap" TOML config table
|
||||
type ConfigMapConfig struct {
|
||||
// Driver specifies the configmap driver to use.
|
||||
// Current valid value:
|
||||
// * file
|
||||
// * pass
|
||||
Driver string `toml:"driver,omitempty"`
|
||||
// Opts contains driver specific options
|
||||
Opts map[string]string `toml:"opts,omitempty"`
|
||||
}
|
||||
|
||||
// MachineConfig represents the "machine" TOML config table
|
||||
type MachineConfig struct {
|
||||
// Number of CPU's a machine is created with.
|
||||
|
20
vendor/github.com/containers/common/pkg/parse/parse.go
generated
vendored
20
vendor/github.com/containers/common/pkg/parse/parse.go
generated
vendored
@ -14,9 +14,27 @@ import (
|
||||
|
||||
// ValidateVolumeOpts validates a volume's options
|
||||
func ValidateVolumeOpts(options []string) ([]string, error) {
|
||||
var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown int
|
||||
var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown, foundUpperDir, foundWorkDir int
|
||||
finalOpts := make([]string, 0, len(options))
|
||||
for _, opt := range options {
|
||||
// support advanced options like upperdir=/path, workdir=/path
|
||||
if strings.Contains(opt, "upperdir") {
|
||||
foundUpperDir++
|
||||
if foundUpperDir > 1 {
|
||||
return nil, errors.Errorf("invalid options %q, can only specify 1 upperdir per overlay", strings.Join(options, ", "))
|
||||
}
|
||||
finalOpts = append(finalOpts, opt)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(opt, "workdir") {
|
||||
foundWorkDir++
|
||||
if foundWorkDir > 1 {
|
||||
return nil, errors.Errorf("invalid options %q, can only specify 1 workdir per overlay", strings.Join(options, ", "))
|
||||
}
|
||||
finalOpts = append(finalOpts, opt)
|
||||
continue
|
||||
}
|
||||
|
||||
switch opt {
|
||||
case "noexec", "exec":
|
||||
foundExec++
|
||||
|
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@ -1 +1 @@
|
||||
1.37.0+dev
|
||||
1.38.0
|
||||
|
10
vendor/github.com/containers/storage/go.mod
generated
vendored
10
vendor/github.com/containers/storage/go.mod
generated
vendored
@ -3,22 +3,22 @@ go 1.14
|
||||
module github.com/containers/storage
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.4.1
|
||||
github.com/BurntSushi/toml v1.0.0
|
||||
github.com/Microsoft/go-winio v0.5.1
|
||||
github.com/Microsoft/hcsshim v0.9.1
|
||||
github.com/Microsoft/hcsshim v0.9.2
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.10.1
|
||||
github.com/cyphar/filepath-securejoin v0.2.3
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/google/go-intervals v0.0.2
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/klauspost/compress v1.13.6
|
||||
github.com/klauspost/compress v1.14.1
|
||||
github.com/klauspost/pgzip v1.2.5
|
||||
github.com/mattn/go-shellwords v1.0.12
|
||||
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
|
||||
github.com/moby/sys/mountinfo v0.5.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/runc v1.0.3
|
||||
github.com/opencontainers/runc v1.1.0
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
|
||||
github.com/opencontainers/selinux v1.10.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
@ -29,6 +29,6 @@ require (
|
||||
github.com/ulikunitz/xz v0.5.10
|
||||
github.com/vbatts/tar-split v0.11.2
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
|
||||
gotest.tools v2.2.0+incompatible
|
||||
)
|
||||
|
25
vendor/github.com/containers/storage/go.sum
generated
vendored
25
vendor/github.com/containers/storage/go.sum
generated
vendored
@ -36,8 +36,8 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935
|
||||
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
|
||||
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU=
|
||||
github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
@ -57,8 +57,8 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2
|
||||
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
|
||||
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
|
||||
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||
github.com/Microsoft/hcsshim v0.9.1 h1:VfDCj+QnY19ktX5TsH22JHcjaZ05RWQiwDbOyEg5ziM=
|
||||
github.com/Microsoft/hcsshim v0.9.1/go.mod h1:Y/0uV2jUab5kBI7SQgl62at0AVX7uaruzADAVmxm3eM=
|
||||
github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY=
|
||||
github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
@ -98,6 +98,7 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
@ -106,6 +107,7 @@ github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLI
|
||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
@ -131,6 +133,7 @@ github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on
|
||||
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
|
||||
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
|
||||
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
@ -296,6 +299,7 @@ github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblf
|
||||
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
|
||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
|
||||
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
@ -420,8 +424,9 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.14.1 h1:hLQYb23E8/fO+1u53d02A97a8UnsddcvYzq4ERRU4ds=
|
||||
github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
@ -515,8 +520,8 @@ github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h
|
||||
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
|
||||
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||
github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k=
|
||||
github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||
github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8=
|
||||
github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
@ -578,6 +583,7 @@ github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiB
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
@ -839,8 +845,11 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 h1:2B5p2L5IfGiD7+b9BOoRMC6DgObAVZV+Fsp050NqXik=
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
630
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
Normal file
630
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
Normal file
@ -0,0 +1,630 @@
|
||||
package chunked
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
storage "github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
cacheKey = "chunked-manifest-cache"
|
||||
cacheVersion = 1
|
||||
)
|
||||
|
||||
type metadata struct {
|
||||
tagLen int
|
||||
digestLen int
|
||||
tags []byte
|
||||
vdata []byte
|
||||
}
|
||||
|
||||
type layer struct {
|
||||
id string
|
||||
metadata *metadata
|
||||
target string
|
||||
}
|
||||
|
||||
type layersCache struct {
|
||||
layers []layer
|
||||
refs int
|
||||
store storage.Store
|
||||
mutex sync.RWMutex
|
||||
created time.Time
|
||||
}
|
||||
|
||||
var cacheMutex sync.Mutex
|
||||
var cache *layersCache
|
||||
|
||||
func (c *layersCache) release() {
|
||||
cacheMutex.Lock()
|
||||
defer cacheMutex.Unlock()
|
||||
|
||||
c.refs--
|
||||
if c.refs == 0 {
|
||||
cache = nil
|
||||
}
|
||||
}
|
||||
|
||||
func getLayersCacheRef(store storage.Store) *layersCache {
|
||||
cacheMutex.Lock()
|
||||
defer cacheMutex.Unlock()
|
||||
if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 {
|
||||
cache.refs++
|
||||
return cache
|
||||
}
|
||||
cache := &layersCache{
|
||||
store: store,
|
||||
refs: 1,
|
||||
created: time.Now(),
|
||||
}
|
||||
return cache
|
||||
}
|
||||
|
||||
func getLayersCache(store storage.Store) (*layersCache, error) {
|
||||
c := getLayersCacheRef(store)
|
||||
|
||||
if err := c.load(); err != nil {
|
||||
c.release()
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *layersCache) load() error {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
allLayers, err := c.store.Layers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
existingLayers := make(map[string]string)
|
||||
for _, r := range c.layers {
|
||||
existingLayers[r.id] = r.target
|
||||
}
|
||||
|
||||
currentLayers := make(map[string]string)
|
||||
for _, r := range allLayers {
|
||||
currentLayers[r.ID] = r.ID
|
||||
if _, found := existingLayers[r.ID]; found {
|
||||
continue
|
||||
}
|
||||
|
||||
bigData, err := c.store.LayerBigData(r.ID, cacheKey)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == os.ErrNotExist {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
defer bigData.Close()
|
||||
|
||||
metadata, err := readMetadataFromCache(bigData)
|
||||
if err != nil {
|
||||
logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err)
|
||||
}
|
||||
|
||||
if metadata != nil {
|
||||
c.addLayer(r.ID, metadata)
|
||||
continue
|
||||
}
|
||||
|
||||
manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
defer manifestReader.Close()
|
||||
manifest, err := ioutil.ReadAll(manifestReader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err)
|
||||
}
|
||||
|
||||
metadata, err = writeCache(manifest, r.ID, c.store)
|
||||
if err == nil {
|
||||
c.addLayer(r.ID, metadata)
|
||||
}
|
||||
}
|
||||
|
||||
var newLayers []layer
|
||||
for _, l := range c.layers {
|
||||
if _, found := currentLayers[l.id]; found {
|
||||
newLayers = append(newLayers, l)
|
||||
}
|
||||
}
|
||||
c.layers = newLayers
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// calculateHardLinkFingerprint calculates a hash that can be used to verify if a file
|
||||
// is usable for deduplication with hardlinks.
|
||||
// To calculate the digest, it uses the file payload digest, UID, GID, mode and xattrs.
|
||||
func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) {
|
||||
digester := digest.Canonical.Digester()
|
||||
|
||||
modeString := fmt.Sprintf("%d:%d:%o", f.UID, f.GID, f.Mode)
|
||||
hash := digester.Hash()
|
||||
|
||||
if _, err := hash.Write([]byte(f.Digest)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if _, err := hash.Write([]byte(modeString)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(f.Xattrs) > 0 {
|
||||
keys := make([]string, 0, len(f.Xattrs))
|
||||
for k := range f.Xattrs {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
if _, err := hash.Write([]byte(k)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err := hash.Write([]byte(f.Xattrs[k])); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
return string(digester.Digest()), nil
|
||||
}
|
||||
|
||||
// generateFileLocation generates a file location in the form $OFFSET@$PATH
|
||||
func generateFileLocation(path string, offset uint64) []byte {
|
||||
return []byte(fmt.Sprintf("%d@%s", offset, path))
|
||||
}
|
||||
|
||||
// generateTag generates a tag in the form $DIGEST$OFFSET@LEN.
|
||||
// the [OFFSET; LEN] points to the variable length data where the file locations
|
||||
// are stored. $DIGEST has length digestLen stored in the metadata file header.
|
||||
func generateTag(digest string, offset, len uint64) string {
|
||||
return fmt.Sprintf("%s%.20d@%.20d", digest, offset, len)
|
||||
}
|
||||
|
||||
type setBigData interface {
|
||||
// SetLayerBigData stores a (possibly large) chunk of named data
|
||||
SetLayerBigData(id, key string, data io.Reader) error
|
||||
}
|
||||
|
||||
// writeCache write a cache for the layer ID.
|
||||
// It generates a sorted list of digests with their offset to the path location and offset.
|
||||
// The same cache is used to lookup files, chunks and candidates for deduplication with hard links.
|
||||
// There are 3 kind of digests stored:
|
||||
// - digest(file.payload))
|
||||
// - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs)
|
||||
// - digest(i) for each i in chunks(file payload)
|
||||
func writeCache(manifest []byte, id string, dest setBigData) (*metadata, error) {
|
||||
var vdata bytes.Buffer
|
||||
tagLen := 0
|
||||
digestLen := 0
|
||||
var tagsBuffer bytes.Buffer
|
||||
|
||||
toc, err := prepareMetadata(manifest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var tags []string
|
||||
for _, k := range toc {
|
||||
if k.Digest != "" {
|
||||
location := generateFileLocation(k.Name, 0)
|
||||
|
||||
off := uint64(vdata.Len())
|
||||
l := uint64(len(location))
|
||||
|
||||
d := generateTag(k.Digest, off, l)
|
||||
if tagLen == 0 {
|
||||
tagLen = len(d)
|
||||
}
|
||||
if tagLen != len(d) {
|
||||
return nil, errors.New("digest with different length found")
|
||||
}
|
||||
tags = append(tags, d)
|
||||
|
||||
fp, err := calculateHardLinkFingerprint(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d = generateTag(fp, off, l)
|
||||
if tagLen != len(d) {
|
||||
return nil, errors.New("digest with different length found")
|
||||
}
|
||||
tags = append(tags, d)
|
||||
|
||||
if _, err := vdata.Write(location); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
digestLen = len(k.Digest)
|
||||
}
|
||||
if k.ChunkDigest != "" {
|
||||
location := generateFileLocation(k.Name, uint64(k.ChunkOffset))
|
||||
off := uint64(vdata.Len())
|
||||
l := uint64(len(location))
|
||||
d := generateTag(k.ChunkDigest, off, l)
|
||||
if tagLen == 0 {
|
||||
tagLen = len(d)
|
||||
}
|
||||
if tagLen != len(d) {
|
||||
return nil, errors.New("digest with different length found")
|
||||
}
|
||||
tags = append(tags, d)
|
||||
|
||||
if _, err := vdata.Write(location); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
digestLen = len(k.ChunkDigest)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(tags)
|
||||
|
||||
for _, t := range tags {
|
||||
if _, err := tagsBuffer.Write([]byte(t)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
errChan := make(chan error, 1)
|
||||
go func() {
|
||||
defer pipeWriter.Close()
|
||||
defer close(errChan)
|
||||
|
||||
// version
|
||||
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(cacheVersion)); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
// len of a tag
|
||||
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagLen)); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
// len of a digest
|
||||
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(digestLen)); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
// tags length
|
||||
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
// vdata length
|
||||
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(vdata.Len())); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
// tags
|
||||
if _, err := pipeWriter.Write(tagsBuffer.Bytes()); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
// variable length data
|
||||
if _, err := pipeWriter.Write(vdata.Bytes()); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
errChan <- nil
|
||||
}()
|
||||
defer pipeReader.Close()
|
||||
|
||||
counter := ioutils.NewWriteCounter(ioutil.Discard)
|
||||
|
||||
r := io.TeeReader(pipeReader, counter)
|
||||
|
||||
if err := dest.SetLayerBigData(id, cacheKey, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := <-errChan; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count)
|
||||
|
||||
return &metadata{
|
||||
digestLen: digestLen,
|
||||
tagLen: tagLen,
|
||||
tags: tagsBuffer.Bytes(),
|
||||
vdata: vdata.Bytes(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func readMetadataFromCache(bigData io.Reader) (*metadata, error) {
|
||||
var version, tagLen, digestLen, tagsLen, vdataLen uint64
|
||||
if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if version != cacheVersion {
|
||||
return nil, nil
|
||||
}
|
||||
if err := binary.Read(bigData, binary.LittleEndian, &tagLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Read(bigData, binary.LittleEndian, &digestLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Read(bigData, binary.LittleEndian, &tagsLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Read(bigData, binary.LittleEndian, &vdataLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tags := make([]byte, tagsLen)
|
||||
if _, err := bigData.Read(tags); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vdata := make([]byte, vdataLen)
|
||||
if _, err := bigData.Read(vdata); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &metadata{
|
||||
tagLen: int(tagLen),
|
||||
digestLen: int(digestLen),
|
||||
tags: tags,
|
||||
vdata: vdata,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) {
|
||||
toc, err := unmarshalToc(manifest)
|
||||
if err != nil {
|
||||
// ignore errors here. They might be caused by a different manifest format.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var r []*internal.FileMetadata
|
||||
chunkSeen := make(map[string]bool)
|
||||
for i := range toc.Entries {
|
||||
d := toc.Entries[i].Digest
|
||||
if d != "" {
|
||||
r = append(r, &toc.Entries[i])
|
||||
continue
|
||||
}
|
||||
|
||||
// chunks do not use hard link dedup so keeping just one candidate is enough
|
||||
cd := toc.Entries[i].ChunkDigest
|
||||
if cd != "" && !chunkSeen[cd] {
|
||||
r = append(r, &toc.Entries[i])
|
||||
chunkSeen[cd] = true
|
||||
}
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (c *layersCache) addLayer(id string, metadata *metadata) error {
|
||||
target, err := c.store.DifferTarget(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get checkout directory layer %q: %w", id, err)
|
||||
}
|
||||
|
||||
l := layer{
|
||||
id: id,
|
||||
metadata: metadata,
|
||||
target: target,
|
||||
}
|
||||
c.layers = append(c.layers, l)
|
||||
return nil
|
||||
}
|
||||
|
||||
func byteSliceAsString(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
func findTag(digest string, metadata *metadata) (string, uint64, uint64) {
|
||||
if len(digest) != metadata.digestLen {
|
||||
return "", 0, 0
|
||||
}
|
||||
|
||||
nElements := len(metadata.tags) / metadata.tagLen
|
||||
|
||||
i := sort.Search(nElements, func(i int) bool {
|
||||
d := byteSliceAsString(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+metadata.digestLen])
|
||||
return strings.Compare(d, digest) >= 0
|
||||
})
|
||||
if i < nElements {
|
||||
d := string(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+len(digest)])
|
||||
if digest == d {
|
||||
startOff := i*metadata.tagLen + metadata.digestLen
|
||||
parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@")
|
||||
off, _ := strconv.ParseInt(parts[0], 10, 64)
|
||||
len, _ := strconv.ParseInt(parts[1], 10, 64)
|
||||
return digest, uint64(off), uint64(len)
|
||||
}
|
||||
}
|
||||
return "", 0, 0
|
||||
}
|
||||
|
||||
func (c *layersCache) findDigestInternal(digest string) (string, string, int64, error) {
|
||||
if digest == "" {
|
||||
return "", "", -1, nil
|
||||
}
|
||||
|
||||
c.mutex.RLock()
|
||||
defer c.mutex.RUnlock()
|
||||
|
||||
for _, layer := range c.layers {
|
||||
digest, off, len := findTag(digest, layer.metadata)
|
||||
if digest != "" {
|
||||
position := string(layer.metadata.vdata[off : off+len])
|
||||
parts := strings.SplitN(position, "@", 2)
|
||||
offFile, _ := strconv.ParseInt(parts[0], 10, 64)
|
||||
return layer.target, parts[1], offFile, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", "", -1, nil
|
||||
}
|
||||
|
||||
// findFileInOtherLayers finds the specified file in other layers.
|
||||
// file is the file to look for.
|
||||
func (c *layersCache) findFileInOtherLayers(file *internal.FileMetadata, useHardLinks bool) (string, string, error) {
|
||||
digest := file.Digest
|
||||
if useHardLinks {
|
||||
var err error
|
||||
digest, err = calculateHardLinkFingerprint(file)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
}
|
||||
target, name, off, err := c.findDigestInternal(digest)
|
||||
if off == 0 {
|
||||
return target, name, err
|
||||
}
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) {
|
||||
return c.findDigestInternal(chunk.ChunkDigest)
|
||||
}
|
||||
|
||||
func unmarshalToc(manifest []byte) (*internal.TOC, error) {
|
||||
var buf bytes.Buffer
|
||||
count := 0
|
||||
var toc internal.TOC
|
||||
|
||||
iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
if field != "entries" {
|
||||
iter.Skip()
|
||||
continue
|
||||
}
|
||||
for iter.ReadArray() {
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
switch field {
|
||||
case "type", "name", "linkName", "digest", "chunkDigest", "chunkType":
|
||||
count += len(iter.ReadStringAsSlice())
|
||||
case "xattrs":
|
||||
for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
|
||||
count += len(iter.ReadStringAsSlice())
|
||||
}
|
||||
default:
|
||||
iter.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
buf.Grow(count)
|
||||
|
||||
getString := func(b []byte) string {
|
||||
from := buf.Len()
|
||||
buf.Write(b)
|
||||
to := buf.Len()
|
||||
return byteSliceAsString(buf.Bytes()[from:to])
|
||||
}
|
||||
|
||||
iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
if field == "version" {
|
||||
toc.Version = iter.ReadInt()
|
||||
continue
|
||||
}
|
||||
if field != "entries" {
|
||||
iter.Skip()
|
||||
continue
|
||||
}
|
||||
for iter.ReadArray() {
|
||||
var m internal.FileMetadata
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
switch field {
|
||||
case "type":
|
||||
m.Type = getString(iter.ReadStringAsSlice())
|
||||
case "name":
|
||||
m.Name = getString(iter.ReadStringAsSlice())
|
||||
case "linkName":
|
||||
m.Linkname = getString(iter.ReadStringAsSlice())
|
||||
case "mode":
|
||||
m.Mode = iter.ReadInt64()
|
||||
case "size":
|
||||
m.Size = iter.ReadInt64()
|
||||
case "UID":
|
||||
m.UID = iter.ReadInt()
|
||||
case "GID":
|
||||
m.GID = iter.ReadInt()
|
||||
case "ModTime":
|
||||
time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.ModTime = &time
|
||||
case "accesstime":
|
||||
time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.AccessTime = &time
|
||||
case "changetime":
|
||||
time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.ChangeTime = &time
|
||||
case "devMajor":
|
||||
m.Devmajor = iter.ReadInt64()
|
||||
case "devMinor":
|
||||
m.Devminor = iter.ReadInt64()
|
||||
case "digest":
|
||||
m.Digest = getString(iter.ReadStringAsSlice())
|
||||
case "offset":
|
||||
m.Offset = iter.ReadInt64()
|
||||
case "endOffset":
|
||||
m.EndOffset = iter.ReadInt64()
|
||||
case "chunkSize":
|
||||
m.ChunkSize = iter.ReadInt64()
|
||||
case "chunkOffset":
|
||||
m.ChunkOffset = iter.ReadInt64()
|
||||
case "chunkDigest":
|
||||
m.ChunkDigest = getString(iter.ReadStringAsSlice())
|
||||
case "chunkType":
|
||||
m.ChunkType = getString(iter.ReadStringAsSlice())
|
||||
case "xattrs":
|
||||
m.Xattrs = make(map[string]string)
|
||||
for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
|
||||
value := iter.ReadStringAsSlice()
|
||||
m.Xattrs[key] = getString(value)
|
||||
}
|
||||
default:
|
||||
iter.Skip()
|
||||
}
|
||||
}
|
||||
toc.Entries = append(toc.Entries, m)
|
||||
}
|
||||
break
|
||||
}
|
||||
toc.StringsBuf = buf
|
||||
return &toc, nil
|
||||
}
|
310
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
310
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
@ -5,6 +5,7 @@ package compressor
|
||||
// larger software like the graph drivers.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -15,6 +16,189 @@ import (
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
)
|
||||
|
||||
const RollsumBits = 16
|
||||
const holesThreshold = int64(1 << 10)
|
||||
|
||||
type holesFinder struct {
|
||||
reader *bufio.Reader
|
||||
fileOff int64
|
||||
zeros int64
|
||||
from int64
|
||||
threshold int64
|
||||
|
||||
state int
|
||||
}
|
||||
|
||||
const (
|
||||
holesFinderStateRead = iota
|
||||
holesFinderStateAccumulate
|
||||
holesFinderStateFound
|
||||
holesFinderStateEOF
|
||||
)
|
||||
|
||||
// ReadByte reads a single byte from the underlying reader.
|
||||
// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil).
|
||||
// If there are at least f.THRESHOLD consecutive zeros, then the
|
||||
// return value is (N_CONSECUTIVE_ZEROS, '\x00').
|
||||
func (f *holesFinder) ReadByte() (int64, byte, error) {
|
||||
for {
|
||||
switch f.state {
|
||||
// reading the file stream
|
||||
case holesFinderStateRead:
|
||||
if f.zeros > 0 {
|
||||
f.zeros--
|
||||
return 0, 0, nil
|
||||
}
|
||||
b, err := f.reader.ReadByte()
|
||||
if err != nil {
|
||||
return 0, b, err
|
||||
}
|
||||
|
||||
if b != 0 {
|
||||
return 0, b, err
|
||||
}
|
||||
|
||||
f.zeros = 1
|
||||
if f.zeros == f.threshold {
|
||||
f.state = holesFinderStateFound
|
||||
} else {
|
||||
f.state = holesFinderStateAccumulate
|
||||
}
|
||||
// accumulating zeros, but still didn't reach the threshold
|
||||
case holesFinderStateAccumulate:
|
||||
b, err := f.reader.ReadByte()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
f.state = holesFinderStateEOF
|
||||
continue
|
||||
}
|
||||
return 0, b, err
|
||||
}
|
||||
|
||||
if b == 0 {
|
||||
f.zeros++
|
||||
if f.zeros == f.threshold {
|
||||
f.state = holesFinderStateFound
|
||||
}
|
||||
} else {
|
||||
if f.reader.UnreadByte(); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
f.state = holesFinderStateRead
|
||||
}
|
||||
// found a hole. Number of zeros >= threshold
|
||||
case holesFinderStateFound:
|
||||
b, err := f.reader.ReadByte()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
f.state = holesFinderStateEOF
|
||||
}
|
||||
holeLen := f.zeros
|
||||
f.zeros = 0
|
||||
return holeLen, 0, nil
|
||||
}
|
||||
if b != 0 {
|
||||
if f.reader.UnreadByte(); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
f.state = holesFinderStateRead
|
||||
|
||||
holeLen := f.zeros
|
||||
f.zeros = 0
|
||||
return holeLen, 0, nil
|
||||
}
|
||||
f.zeros++
|
||||
// reached EOF. Flush pending zeros if any.
|
||||
case holesFinderStateEOF:
|
||||
if f.zeros > 0 {
|
||||
f.zeros--
|
||||
return 0, 0, nil
|
||||
}
|
||||
return 0, 0, io.EOF
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type rollingChecksumReader struct {
|
||||
reader *holesFinder
|
||||
closed bool
|
||||
rollsum *RollSum
|
||||
pendingHole int64
|
||||
|
||||
// WrittenOut is the total number of bytes read from
|
||||
// the stream.
|
||||
WrittenOut int64
|
||||
|
||||
// IsLastChunkZeros tells whether the last generated
|
||||
// chunk is a hole (made of consecutive zeros). If it
|
||||
// is false, then the last chunk is a data chunk
|
||||
// generated by the rolling checksum.
|
||||
IsLastChunkZeros bool
|
||||
}
|
||||
|
||||
func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) {
|
||||
rc.IsLastChunkZeros = false
|
||||
|
||||
if rc.pendingHole > 0 {
|
||||
toCopy := int64(len(b))
|
||||
if rc.pendingHole < toCopy {
|
||||
toCopy = rc.pendingHole
|
||||
}
|
||||
rc.pendingHole -= toCopy
|
||||
for i := int64(0); i < toCopy; i++ {
|
||||
b[i] = 0
|
||||
}
|
||||
|
||||
rc.WrittenOut += toCopy
|
||||
|
||||
rc.IsLastChunkZeros = true
|
||||
|
||||
// if there are no other zeros left, terminate the chunk
|
||||
return rc.pendingHole == 0, int(toCopy), nil
|
||||
}
|
||||
|
||||
if rc.closed {
|
||||
return false, 0, io.EOF
|
||||
}
|
||||
|
||||
for i := 0; i < len(b); i++ {
|
||||
holeLen, n, err := rc.reader.ReadByte()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
rc.closed = true
|
||||
if i == 0 {
|
||||
return false, 0, err
|
||||
}
|
||||
return false, i, nil
|
||||
}
|
||||
// Report any other error type
|
||||
return false, -1, err
|
||||
}
|
||||
if holeLen > 0 {
|
||||
for j := int64(0); j < holeLen; j++ {
|
||||
rc.rollsum.Roll(0)
|
||||
}
|
||||
rc.pendingHole = holeLen
|
||||
return true, i, nil
|
||||
}
|
||||
b[i] = n
|
||||
rc.WrittenOut++
|
||||
rc.rollsum.Roll(n)
|
||||
if rc.rollsum.OnSplitWithBits(RollsumBits) {
|
||||
return true, i + 1, nil
|
||||
}
|
||||
}
|
||||
return false, len(b), nil
|
||||
}
|
||||
|
||||
type chunk struct {
|
||||
ChunkOffset int64
|
||||
Offset int64
|
||||
Checksum string
|
||||
ChunkSize int64
|
||||
ChunkType string
|
||||
}
|
||||
|
||||
func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
|
||||
// total written so far. Used to retrieve partial offsets in the file
|
||||
dest := ioutils.NewWriteCounter(destFile)
|
||||
@ -64,40 +248,78 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
||||
if _, err := zstdWriter.Write(rawBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
payloadDigester := digest.Canonical.Digester()
|
||||
payloadChecksum := payloadDigester.Hash()
|
||||
|
||||
payloadDest := io.MultiWriter(payloadChecksum, zstdWriter)
|
||||
payloadDigester := digest.Canonical.Digester()
|
||||
chunkDigester := digest.Canonical.Digester()
|
||||
|
||||
// Now handle the payload, if any
|
||||
var startOffset, endOffset int64
|
||||
startOffset := int64(0)
|
||||
lastOffset := int64(0)
|
||||
lastChunkOffset := int64(0)
|
||||
|
||||
checksum := ""
|
||||
|
||||
chunks := []chunk{}
|
||||
|
||||
hf := &holesFinder{
|
||||
threshold: holesThreshold,
|
||||
reader: bufio.NewReader(tr),
|
||||
}
|
||||
|
||||
rcReader := &rollingChecksumReader{
|
||||
reader: hf,
|
||||
rollsum: NewRollSum(),
|
||||
}
|
||||
|
||||
payloadDest := io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter)
|
||||
for {
|
||||
read, errRead := tr.Read(buf)
|
||||
mustSplit, read, errRead := rcReader.Read(buf)
|
||||
if errRead != nil && errRead != io.EOF {
|
||||
return err
|
||||
}
|
||||
|
||||
// restart the compression only if there is
|
||||
// a payload.
|
||||
// restart the compression only if there is a payload.
|
||||
if read > 0 {
|
||||
if startOffset == 0 {
|
||||
startOffset, err = restartCompression()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lastOffset = startOffset
|
||||
}
|
||||
_, err := payloadDest.Write(buf[:read])
|
||||
if err != nil {
|
||||
|
||||
if _, err := payloadDest.Write(buf[:read]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if (mustSplit || errRead == io.EOF) && startOffset > 0 {
|
||||
off, err := restartCompression()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
chunkSize := rcReader.WrittenOut - lastChunkOffset
|
||||
if chunkSize > 0 {
|
||||
chunkType := internal.ChunkTypeData
|
||||
if rcReader.IsLastChunkZeros {
|
||||
chunkType = internal.ChunkTypeZeros
|
||||
}
|
||||
|
||||
chunks = append(chunks, chunk{
|
||||
ChunkOffset: lastChunkOffset,
|
||||
Offset: lastOffset,
|
||||
Checksum: chunkDigester.Digest().String(),
|
||||
ChunkSize: chunkSize,
|
||||
ChunkType: chunkType,
|
||||
})
|
||||
}
|
||||
|
||||
lastOffset = off
|
||||
lastChunkOffset = rcReader.WrittenOut
|
||||
chunkDigester = digest.Canonical.Digester()
|
||||
payloadDest = io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter)
|
||||
}
|
||||
if errRead == io.EOF {
|
||||
if startOffset > 0 {
|
||||
endOffset, err = restartCompression()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
checksum = payloadDigester.Digest().String()
|
||||
}
|
||||
break
|
||||
@ -112,30 +334,42 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
||||
for k, v := range hdr.Xattrs {
|
||||
xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v))
|
||||
}
|
||||
m := internal.FileMetadata{
|
||||
Type: typ,
|
||||
Name: hdr.Name,
|
||||
Linkname: hdr.Linkname,
|
||||
Mode: hdr.Mode,
|
||||
Size: hdr.Size,
|
||||
UID: hdr.Uid,
|
||||
GID: hdr.Gid,
|
||||
ModTime: hdr.ModTime,
|
||||
AccessTime: hdr.AccessTime,
|
||||
ChangeTime: hdr.ChangeTime,
|
||||
Devmajor: hdr.Devmajor,
|
||||
Devminor: hdr.Devminor,
|
||||
Xattrs: xattrs,
|
||||
Digest: checksum,
|
||||
Offset: startOffset,
|
||||
EndOffset: endOffset,
|
||||
|
||||
// ChunkSize is 0 for the last chunk
|
||||
ChunkSize: 0,
|
||||
ChunkOffset: 0,
|
||||
ChunkDigest: checksum,
|
||||
entries := []internal.FileMetadata{
|
||||
{
|
||||
Type: typ,
|
||||
Name: hdr.Name,
|
||||
Linkname: hdr.Linkname,
|
||||
Mode: hdr.Mode,
|
||||
Size: hdr.Size,
|
||||
UID: hdr.Uid,
|
||||
GID: hdr.Gid,
|
||||
ModTime: &hdr.ModTime,
|
||||
AccessTime: &hdr.AccessTime,
|
||||
ChangeTime: &hdr.ChangeTime,
|
||||
Devmajor: hdr.Devmajor,
|
||||
Devminor: hdr.Devminor,
|
||||
Xattrs: xattrs,
|
||||
Digest: checksum,
|
||||
Offset: startOffset,
|
||||
EndOffset: lastOffset,
|
||||
},
|
||||
}
|
||||
metadata = append(metadata, m)
|
||||
for i := 1; i < len(chunks); i++ {
|
||||
entries = append(entries, internal.FileMetadata{
|
||||
Type: internal.TypeChunk,
|
||||
Name: hdr.Name,
|
||||
ChunkOffset: chunks[i].ChunkOffset,
|
||||
})
|
||||
}
|
||||
if len(chunks) > 1 {
|
||||
for i := range chunks {
|
||||
entries[i].ChunkSize = chunks[i].ChunkSize
|
||||
entries[i].Offset = chunks[i].Offset
|
||||
entries[i].ChunkDigest = chunks[i].Checksum
|
||||
entries[i].ChunkType = chunks[i].ChunkType
|
||||
}
|
||||
}
|
||||
metadata = append(metadata, entries...)
|
||||
}
|
||||
|
||||
rawBytes := tr.RawBytes()
|
||||
@ -212,7 +446,7 @@ func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level
|
||||
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
||||
func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
||||
if level == nil {
|
||||
l := 3
|
||||
l := 10
|
||||
level = &l
|
||||
}
|
||||
|
||||
|
81
vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go
generated
vendored
Normal file
81
vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
/*
|
||||
Copyright 2011 The Perkeep Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package rollsum implements rolling checksums similar to apenwarr's bup, which
|
||||
// is similar to librsync.
|
||||
//
|
||||
// The bup project is at https://github.com/apenwarr/bup and its splitting in
|
||||
// particular is at https://github.com/apenwarr/bup/blob/master/lib/bup/bupsplit.c
|
||||
package compressor
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const windowSize = 64 // Roll assumes windowSize is a power of 2
|
||||
const charOffset = 31
|
||||
|
||||
const blobBits = 13
|
||||
const blobSize = 1 << blobBits // 8k
|
||||
|
||||
type RollSum struct {
|
||||
s1, s2 uint32
|
||||
window [windowSize]uint8
|
||||
wofs int
|
||||
}
|
||||
|
||||
func NewRollSum() *RollSum {
|
||||
return &RollSum{
|
||||
s1: windowSize * charOffset,
|
||||
s2: windowSize * (windowSize - 1) * charOffset,
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *RollSum) add(drop, add uint32) {
|
||||
s1 := rs.s1 + add - drop
|
||||
rs.s1 = s1
|
||||
rs.s2 += s1 - uint32(windowSize)*(drop+charOffset)
|
||||
}
|
||||
|
||||
// Roll adds ch to the rolling sum.
|
||||
func (rs *RollSum) Roll(ch byte) {
|
||||
wp := &rs.window[rs.wofs]
|
||||
rs.add(uint32(*wp), uint32(ch))
|
||||
*wp = ch
|
||||
rs.wofs = (rs.wofs + 1) & (windowSize - 1)
|
||||
}
|
||||
|
||||
// OnSplit reports whether at least 13 consecutive trailing bits of
|
||||
// the current checksum are set the same way.
|
||||
func (rs *RollSum) OnSplit() bool {
|
||||
return (rs.s2 & (blobSize - 1)) == ((^0) & (blobSize - 1))
|
||||
}
|
||||
|
||||
// OnSplitWithBits reports whether at least n consecutive trailing bits
|
||||
// of the current checksum are set the same way.
|
||||
func (rs *RollSum) OnSplitWithBits(n uint32) bool {
|
||||
mask := (uint32(1) << n) - 1
|
||||
return rs.s2&mask == (^uint32(0))&mask
|
||||
}
|
||||
|
||||
func (rs *RollSum) Bits() int {
|
||||
rsum := rs.Digest() >> (blobBits + 1)
|
||||
return blobBits + bits.TrailingZeros32(^rsum)
|
||||
}
|
||||
|
||||
func (rs *RollSum) Digest() uint32 {
|
||||
return (rs.s1 << 16) | (rs.s2 & 0xffff)
|
||||
}
|
32
vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
generated
vendored
32
vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
generated
vendored
@ -8,11 +8,11 @@ import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
@ -20,6 +20,9 @@ import (
|
||||
type TOC struct {
|
||||
Version int `json:"version"`
|
||||
Entries []FileMetadata `json:"entries"`
|
||||
|
||||
// internal: used by unmarshalToc
|
||||
StringsBuf bytes.Buffer `json:"-"`
|
||||
}
|
||||
|
||||
type FileMetadata struct {
|
||||
@ -27,25 +30,33 @@ type FileMetadata struct {
|
||||
Name string `json:"name"`
|
||||
Linkname string `json:"linkName,omitempty"`
|
||||
Mode int64 `json:"mode,omitempty"`
|
||||
Size int64 `json:"size"`
|
||||
UID int `json:"uid"`
|
||||
GID int `json:"gid"`
|
||||
ModTime time.Time `json:"modtime"`
|
||||
AccessTime time.Time `json:"accesstime"`
|
||||
ChangeTime time.Time `json:"changetime"`
|
||||
Devmajor int64 `json:"devMajor"`
|
||||
Devminor int64 `json:"devMinor"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
UID int `json:"uid,omitempty"`
|
||||
GID int `json:"gid,omitempty"`
|
||||
ModTime *time.Time `json:"modtime,omitempty"`
|
||||
AccessTime *time.Time `json:"accesstime,omitempty"`
|
||||
ChangeTime *time.Time `json:"changetime,omitempty"`
|
||||
Devmajor int64 `json:"devMajor,omitempty"`
|
||||
Devminor int64 `json:"devMinor,omitempty"`
|
||||
Xattrs map[string]string `json:"xattrs,omitempty"`
|
||||
Digest string `json:"digest,omitempty"`
|
||||
Offset int64 `json:"offset,omitempty"`
|
||||
EndOffset int64 `json:"endOffset,omitempty"`
|
||||
|
||||
// Currently chunking is not supported.
|
||||
ChunkSize int64 `json:"chunkSize,omitempty"`
|
||||
ChunkOffset int64 `json:"chunkOffset,omitempty"`
|
||||
ChunkDigest string `json:"chunkDigest,omitempty"`
|
||||
ChunkType string `json:"chunkType,omitempty"`
|
||||
|
||||
// internal: computed by mergeTOCEntries.
|
||||
Chunks []*FileMetadata `json:"-"`
|
||||
}
|
||||
|
||||
const (
|
||||
ChunkTypeData = ""
|
||||
ChunkTypeZeros = "zeros"
|
||||
)
|
||||
|
||||
const (
|
||||
TypeReg = "reg"
|
||||
TypeChunk = "chunk"
|
||||
@ -123,6 +134,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
|
||||
Entries: metadata,
|
||||
}
|
||||
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
// Generate the manifest
|
||||
manifest, err := json.Marshal(toc)
|
||||
if err != nil {
|
||||
|
995
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
995
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
File diff suppressed because it is too large
Load Diff
36
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
36
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
@ -82,7 +82,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
|
||||
if len(uidMap) == 1 && uidMap[0].Size == 1 {
|
||||
uid = uidMap[0].HostID
|
||||
} else {
|
||||
uid, err = toHost(0, uidMap)
|
||||
uid, err = RawToHost(0, uidMap)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
@ -90,7 +90,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
|
||||
if len(gidMap) == 1 && gidMap[0].Size == 1 {
|
||||
gid = gidMap[0].HostID
|
||||
} else {
|
||||
gid, err = toHost(0, gidMap)
|
||||
gid, err = RawToHost(0, gidMap)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
@ -98,10 +98,14 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
|
||||
return uid, gid, nil
|
||||
}
|
||||
|
||||
// toContainer takes an id mapping, and uses it to translate a
|
||||
// host ID to the remapped ID. If no map is provided, then the translation
|
||||
// assumes a 1-to-1 mapping and returns the passed in id
|
||||
func toContainer(hostID int, idMap []IDMap) (int, error) {
|
||||
// RawToContainer takes an id mapping, and uses it to translate a host ID to
|
||||
// the remapped ID. If no map is provided, then the translation assumes a
|
||||
// 1-to-1 mapping and returns the passed in id.
|
||||
//
|
||||
// If you wish to map a (uid,gid) combination you should use the corresponding
|
||||
// IDMappings methods, which ensure that you are mapping the correct ID against
|
||||
// the correct mapping.
|
||||
func RawToContainer(hostID int, idMap []IDMap) (int, error) {
|
||||
if idMap == nil {
|
||||
return hostID, nil
|
||||
}
|
||||
@ -114,10 +118,14 @@ func toContainer(hostID int, idMap []IDMap) (int, error) {
|
||||
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
|
||||
}
|
||||
|
||||
// toHost takes an id mapping and a remapped ID, and translates the
|
||||
// ID to the mapped host ID. If no map is provided, then the translation
|
||||
// assumes a 1-to-1 mapping and returns the passed in id #
|
||||
func toHost(contID int, idMap []IDMap) (int, error) {
|
||||
// RawToHost takes an id mapping and a remapped ID, and translates the ID to
|
||||
// the mapped host ID. If no map is provided, then the translation assumes a
|
||||
// 1-to-1 mapping and returns the passed in id.
|
||||
//
|
||||
// If you wish to map a (uid,gid) combination you should use the corresponding
|
||||
// IDMappings methods, which ensure that you are mapping the correct ID against
|
||||
// the correct mapping.
|
||||
func RawToHost(contID int, idMap []IDMap) (int, error) {
|
||||
if idMap == nil {
|
||||
return contID, nil
|
||||
}
|
||||
@ -187,22 +195,22 @@ func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) {
|
||||
var err error
|
||||
var target IDPair
|
||||
|
||||
target.UID, err = toHost(pair.UID, i.uids)
|
||||
target.UID, err = RawToHost(pair.UID, i.uids)
|
||||
if err != nil {
|
||||
return target, err
|
||||
}
|
||||
|
||||
target.GID, err = toHost(pair.GID, i.gids)
|
||||
target.GID, err = RawToHost(pair.GID, i.gids)
|
||||
return target, err
|
||||
}
|
||||
|
||||
// ToContainer returns the container UID and GID for the host uid and gid
|
||||
func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) {
|
||||
uid, err := toContainer(pair.UID, i.uids)
|
||||
uid, err := RawToContainer(pair.UID, i.uids)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
gid, err := toContainer(pair.GID, i.gids)
|
||||
gid, err := RawToContainer(pair.GID, i.gids)
|
||||
return uid, gid, err
|
||||
}
|
||||
|
||||
|
6
vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
generated
vendored
@ -12,10 +12,14 @@ import (
|
||||
#cgo LDFLAGS: -l subid
|
||||
#include <shadow/subid.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
const char *Prog = "storage";
|
||||
FILE *shadow_logfd = NULL;
|
||||
|
||||
struct subid_range get_range(struct subid_range *ranges, int i)
|
||||
{
|
||||
return ranges[i];
|
||||
shadow_logfd = stderr;
|
||||
return ranges[i];
|
||||
}
|
||||
|
||||
#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4)
|
||||
|
44
vendor/github.com/containers/storage/store.go
generated
vendored
44
vendor/github.com/containers/storage/store.go
generated
vendored
@ -647,17 +647,21 @@ func GetStore(options types.StoreOptions) (Store, error) {
|
||||
storesLock.Lock()
|
||||
defer storesLock.Unlock()
|
||||
|
||||
// return if BOTH run and graph root are matched, otherwise our run-root can be overriden if the graph is found first
|
||||
for _, s := range stores {
|
||||
if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) {
|
||||
if (s.graphRoot == options.GraphRoot) && (s.runRoot == options.RunRoot) && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
|
||||
if options.GraphRoot == "" {
|
||||
return nil, errors.Wrap(ErrIncompleteOptions, "no storage root specified")
|
||||
}
|
||||
if options.RunRoot == "" {
|
||||
return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot specified")
|
||||
// if passed a run-root or graph-root alone, the other should be defaulted only error if we have neither.
|
||||
switch {
|
||||
case options.RunRoot == "" && options.GraphRoot == "":
|
||||
return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot or graphroot specified")
|
||||
case options.GraphRoot == "":
|
||||
options.GraphRoot = types.Options().GraphRoot
|
||||
case options.RunRoot == "":
|
||||
options.RunRoot = types.Options().RunRoot
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(options.RunRoot, 0700); err != nil {
|
||||
@ -2497,23 +2501,29 @@ func (s *store) DeleteContainer(id string) error {
|
||||
gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
var err error
|
||||
for attempts := 0; attempts < 50; attempts++ {
|
||||
err = os.RemoveAll(gcpath)
|
||||
if err == nil || !system.IsEBUSY(err) {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
defer wg.Done()
|
||||
// attempt a simple rm -rf first
|
||||
err := os.RemoveAll(gcpath)
|
||||
if err == nil {
|
||||
errChan <- nil
|
||||
return
|
||||
}
|
||||
errChan <- err
|
||||
wg.Done()
|
||||
// and if it fails get to the more complicated cleanup
|
||||
errChan <- system.EnsureRemoveAll(gcpath)
|
||||
}()
|
||||
|
||||
rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
errChan <- os.RemoveAll(rcpath)
|
||||
wg.Done()
|
||||
defer wg.Done()
|
||||
// attempt a simple rm -rf first
|
||||
err := os.RemoveAll(rcpath)
|
||||
if err == nil {
|
||||
errChan <- nil
|
||||
return
|
||||
}
|
||||
// and if it fails get to the more complicated cleanup
|
||||
errChan <- system.EnsureRemoveAll(rcpath)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
|
39
vendor/github.com/containers/storage/types/options.go
generated
vendored
39
vendor/github.com/containers/storage/types/options.go
generated
vendored
@ -17,7 +17,7 @@ import (
|
||||
)
|
||||
|
||||
// TOML-friendly explicit tables used for conversions.
|
||||
type tomlConfig struct {
|
||||
type TomlConfig struct {
|
||||
Storage struct {
|
||||
Driver string `toml:"driver"`
|
||||
RunRoot string `toml:"runroot"`
|
||||
@ -306,7 +306,7 @@ func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio
|
||||
// ReloadConfigurationFile parses the specified configuration file and overrides
|
||||
// the configuration in storeOptions.
|
||||
func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
|
||||
config := new(tomlConfig)
|
||||
config := new(TomlConfig)
|
||||
|
||||
meta, err := toml.DecodeFile(configFile, &config)
|
||||
if err == nil {
|
||||
@ -424,3 +424,38 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
|
||||
func Options() StoreOptions {
|
||||
return defaultStoreOptions
|
||||
}
|
||||
|
||||
// Save overwrites the tomlConfig in storage.conf with the given conf
|
||||
func Save(conf TomlConfig, rootless bool) error {
|
||||
configFile, err := DefaultConfigFile(rootless)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = os.Remove(configFile); !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.Open(configFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return toml.NewEncoder(f).Encode(conf)
|
||||
}
|
||||
|
||||
// StorageConfig is used to retreive the storage.conf toml in order to overwrite it
|
||||
func StorageConfig(rootless bool) (*TomlConfig, error) {
|
||||
config := new(TomlConfig)
|
||||
|
||||
configFile, err := DefaultConfigFile(rootless)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = toml.DecodeFile(configFile, &config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user