mirror of
https://github.com/containers/podman.git
synced 2025-06-23 02:18:13 +08:00
Merge pull request #8145 from containers/dependabot/go_modules/github.com/containers/common-0.26.2
Bump github.com/containers/common from 0.26.0 to 0.26.3
This commit is contained in:
6
go.mod
6
go.mod
@ -12,11 +12,11 @@ require (
|
|||||||
github.com/containernetworking/cni v0.8.0
|
github.com/containernetworking/cni v0.8.0
|
||||||
github.com/containernetworking/plugins v0.8.7
|
github.com/containernetworking/plugins v0.8.7
|
||||||
github.com/containers/buildah v1.16.5
|
github.com/containers/buildah v1.16.5
|
||||||
github.com/containers/common v0.26.0
|
github.com/containers/common v0.26.3
|
||||||
github.com/containers/conmon v2.0.20+incompatible
|
github.com/containers/conmon v2.0.20+incompatible
|
||||||
github.com/containers/image/v5 v5.6.0
|
github.com/containers/image/v5 v5.7.0
|
||||||
github.com/containers/psgo v1.5.1
|
github.com/containers/psgo v1.5.1
|
||||||
github.com/containers/storage v1.23.5
|
github.com/containers/storage v1.23.7
|
||||||
github.com/coreos/go-systemd/v22 v22.1.0
|
github.com/coreos/go-systemd/v22 v22.1.0
|
||||||
github.com/cri-o/ocicni v0.2.0
|
github.com/cri-o/ocicni v0.2.0
|
||||||
github.com/cyphar/filepath-securejoin v0.2.2
|
github.com/cyphar/filepath-securejoin v0.2.2
|
||||||
|
24
go.sum
24
go.sum
@ -90,13 +90,13 @@ github.com/containernetworking/plugins v0.8.7/go.mod h1:R7lXeZaBzpfqapcAbHRW8/CY
|
|||||||
github.com/containers/buildah v1.16.5 h1:0tH2TJeZWbspWExHY0A7d7lpdYoxB5VFgRTbzc+qKGs=
|
github.com/containers/buildah v1.16.5 h1:0tH2TJeZWbspWExHY0A7d7lpdYoxB5VFgRTbzc+qKGs=
|
||||||
github.com/containers/buildah v1.16.5/go.mod h1:tqjupkpg4uqeQWhb7O6puyJwoIbpTkBrWAq1iUK9Wh0=
|
github.com/containers/buildah v1.16.5/go.mod h1:tqjupkpg4uqeQWhb7O6puyJwoIbpTkBrWAq1iUK9Wh0=
|
||||||
github.com/containers/common v0.21.0/go.mod h1:8w8SVwc+P2p1MOnRMbSKNWXt1Iwd2bKFu2LLZx55DTM=
|
github.com/containers/common v0.21.0/go.mod h1:8w8SVwc+P2p1MOnRMbSKNWXt1Iwd2bKFu2LLZx55DTM=
|
||||||
github.com/containers/common v0.26.0 h1:BCo/S5Dl8aRRG7vze+hoWdCd5xuThIP/tCB5NjTIn6g=
|
github.com/containers/common v0.26.3 h1:5Kb5fMmJ7/xMiJ+iEbPA+5pQpl/FGxCgJex4nml4Slo=
|
||||||
github.com/containers/common v0.26.0/go.mod h1:BCK8f8Ye1gvUVGcokJngJG4YC80c2Bjx/F9GyoIAVMc=
|
github.com/containers/common v0.26.3/go.mod h1:hJWZIlrl5MsE2ELNRa+MPp6I1kPbXHauuj0Ym4BsLG4=
|
||||||
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
||||||
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||||
github.com/containers/image/v5 v5.5.2/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
|
github.com/containers/image/v5 v5.5.2/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
|
||||||
github.com/containers/image/v5 v5.6.0 h1:r4AqIX4NO/X7OJkqX574zITV3fq0ZPn0pSlLsxWF6ww=
|
github.com/containers/image/v5 v5.7.0 h1:fiTC8/Xbr+zEP6njGTZtPW/3UD7MC93nC9DbUoWdxkA=
|
||||||
github.com/containers/image/v5 v5.6.0/go.mod h1:iUSWo3SOLqJo0CkZkKrHxqR6YWqrT98mkXFpE0MceE8=
|
github.com/containers/image/v5 v5.7.0/go.mod h1:8aOy+YaItukxghRORkvhq5ibWttHErzDLy6egrKfKos=
|
||||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
|
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
|
||||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||||
github.com/containers/ocicrypt v1.0.2/go.mod h1:nsOhbP19flrX6rE7ieGFvBlr7modwmNjsqWarIUce4M=
|
github.com/containers/ocicrypt v1.0.2/go.mod h1:nsOhbP19flrX6rE7ieGFvBlr7modwmNjsqWarIUce4M=
|
||||||
@ -106,8 +106,10 @@ github.com/containers/psgo v1.5.1 h1:MQNb7FLbXqBdqz6u4lI2QWizVz4RSTzs1+Nk9XT1iVA
|
|||||||
github.com/containers/psgo v1.5.1/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzPUWfawVU=
|
github.com/containers/psgo v1.5.1/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzPUWfawVU=
|
||||||
github.com/containers/storage v1.20.2/go.mod h1:oOB9Ie8OVPojvoaKWEGSEtHbXUAs+tSyr7RO7ZGteMc=
|
github.com/containers/storage v1.20.2/go.mod h1:oOB9Ie8OVPojvoaKWEGSEtHbXUAs+tSyr7RO7ZGteMc=
|
||||||
github.com/containers/storage v1.23.3/go.mod h1:0azTMiuBhArp/VUmH1o4DJAGaaH+qLtEu17pJ/iKJCg=
|
github.com/containers/storage v1.23.3/go.mod h1:0azTMiuBhArp/VUmH1o4DJAGaaH+qLtEu17pJ/iKJCg=
|
||||||
github.com/containers/storage v1.23.5 h1:He9I6y1vRVXYoQg4v2Q9HFAcX4dI3V5MCCrjeBcjkCY=
|
github.com/containers/storage v1.23.6 h1:3rcZ1KTNv8q7SkZ75gcrFGYqTeiuI04Zg7m9X1sCg/s=
|
||||||
github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM=
|
github.com/containers/storage v1.23.6/go.mod h1:haFs0HRowKwyzvWEx9EgI3WsL8XCSnBDb5f8P5CAxJY=
|
||||||
|
github.com/containers/storage v1.23.7 h1:43ImvG/npvQSZXRjaudVvKISIuZSfI6qvtSNQQSGO/A=
|
||||||
|
github.com/containers/storage v1.23.7/go.mod h1:cUT2zHjtx+WlVri30obWmM2gpqpi8jfPsmIzP1TVpEI=
|
||||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
@ -318,8 +320,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
|||||||
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.10.11/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.10.11/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg=
|
github.com/klauspost/compress v1.11.1 h1:bPb7nMRdOZYDrpPMTA3EInUQrdgoBinqUuSwlGdKDdE=
|
||||||
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.11.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
@ -360,8 +362,11 @@ github.com/moby/sys/mount v0.1.1 h1:mdhBytJ1SMmMat0gtzWWjFX/87K5j6E/7Q5z7rR0cZY=
|
|||||||
github.com/moby/sys/mount v0.1.1/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74=
|
github.com/moby/sys/mount v0.1.1/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74=
|
||||||
github.com/moby/sys/mountinfo v0.1.0/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
|
github.com/moby/sys/mountinfo v0.1.0/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
|
||||||
github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
|
github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
|
||||||
github.com/moby/sys/mountinfo v0.2.0 h1:HgYSHMWCj8D7w7TE/cQJfWrY6W3TUxs3pwGFyC5qCvE=
|
|
||||||
github.com/moby/sys/mountinfo v0.2.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
github.com/moby/sys/mountinfo v0.2.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||||
|
github.com/moby/sys/mountinfo v0.3.1 h1:R+C9GycEzoR3GdwQ7mANRhJORnVDJiRkf0JMY82MeI0=
|
||||||
|
github.com/moby/sys/mountinfo v0.3.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||||
|
github.com/moby/sys/mountinfo v0.4.0 h1:1KInV3Huv18akCu58V7lzNlt+jFmqlu1EaErnEHE/VM=
|
||||||
|
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||||
github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2 h1:SPoLlS9qUUnXcIY4pvA4CTwYjk0Is5f4UPEkeESr53k=
|
github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2 h1:SPoLlS9qUUnXcIY4pvA4CTwYjk0Is5f4UPEkeESr53k=
|
||||||
github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ=
|
github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ=
|
||||||
github.com/moby/vpnkit v0.4.0/go.mod h1:KyjUrL9cb6ZSNNAUwZfqRjhwwgJ3BJN+kXh0t43WTUQ=
|
github.com/moby/vpnkit v0.4.0/go.mod h1:KyjUrL9cb6ZSNNAUwZfqRjhwwgJ3BJN+kXh0t43WTUQ=
|
||||||
@ -503,7 +508,6 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B
|
|||||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||||
github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
|
||||||
github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
|
github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
|
||||||
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
|
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
|
||||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||||
|
@ -621,6 +621,13 @@ func SkipIfRootless(reason string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func SkipIfNotRootless(reason string) {
|
||||||
|
checkReason(reason)
|
||||||
|
if os.Geteuid() == 0 {
|
||||||
|
ginkgo.Skip("[notRootless]: " + reason)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func SkipIfNotFedora() {
|
func SkipIfNotFedora() {
|
||||||
info := GetHostDistributionInfo()
|
info := GetHostDistributionInfo()
|
||||||
if info.Distribution != "fedora" {
|
if info.Distribution != "fedora" {
|
||||||
|
@ -5,9 +5,9 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"os/user"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/containers/podman/v2/pkg/rootless"
|
|
||||||
. "github.com/containers/podman/v2/test/utils"
|
. "github.com/containers/podman/v2/test/utils"
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
@ -78,39 +78,35 @@ var _ = Describe("Podman Info", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
It("podman info rootless storage path", func() {
|
It("podman info rootless storage path", func() {
|
||||||
if !rootless.IsRootless() {
|
SkipIfNotRootless("test of rootless_storage_path is only meaningful as rootless")
|
||||||
Skip("test of rootless_storage_path is only meaningful as rootless")
|
|
||||||
}
|
|
||||||
SkipIfRemote("Only tests storage on local client")
|
SkipIfRemote("Only tests storage on local client")
|
||||||
oldHOME, hasHOME := os.LookupEnv("HOME")
|
configPath := filepath.Join(podmanTest.TempDir, ".config", "containers", "storage.conf")
|
||||||
|
os.Setenv("CONTAINERS_STORAGE_CONF", configPath)
|
||||||
defer func() {
|
defer func() {
|
||||||
if hasHOME {
|
os.Unsetenv("CONTAINERS_STORAGE_CONF")
|
||||||
os.Setenv("HOME", oldHOME)
|
|
||||||
} else {
|
|
||||||
os.Unsetenv("HOME")
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
os.Setenv("HOME", podmanTest.TempDir)
|
|
||||||
configPath := filepath.Join(os.Getenv("HOME"), ".config", "containers", "storage.conf")
|
|
||||||
err := os.RemoveAll(filepath.Dir(configPath))
|
err := os.RemoveAll(filepath.Dir(configPath))
|
||||||
Expect(err).To(BeNil())
|
Expect(err).To(BeNil())
|
||||||
|
|
||||||
err = os.MkdirAll(filepath.Dir(configPath), os.ModePerm)
|
err = os.MkdirAll(filepath.Dir(configPath), os.ModePerm)
|
||||||
Expect(err).To(BeNil())
|
Expect(err).To(BeNil())
|
||||||
|
|
||||||
rootlessStoragePath := `"/tmp/$HOME/$USER/$UID"`
|
rootlessStoragePath := `"/tmp/$HOME/$USER/$UID/storage"`
|
||||||
driver := `"overlay"`
|
driver := `"overlay"`
|
||||||
storageOpt := `"/usr/bin/fuse-overlayfs"`
|
storageOpt := `"/usr/bin/fuse-overlayfs"`
|
||||||
storageConf := []byte(fmt.Sprintf("[storage]\ndriver=%s\nrootless_storage_path=%s\n[storage.options]\nmount_program=%s", driver, rootlessStoragePath, storageOpt))
|
storageConf := []byte(fmt.Sprintf("[storage]\ndriver=%s\nrootless_storage_path=%s\n[storage.options]\nmount_program=%s", driver, rootlessStoragePath, storageOpt))
|
||||||
err = ioutil.WriteFile(configPath, storageConf, os.ModePerm)
|
err = ioutil.WriteFile(configPath, storageConf, os.ModePerm)
|
||||||
Expect(err).To(BeNil())
|
Expect(err).To(BeNil())
|
||||||
|
|
||||||
expect := filepath.Join("/tmp", os.Getenv("HOME"), os.Getenv("USER"), os.Getenv("UID"))
|
u, err := user.Current()
|
||||||
|
Expect(err).To(BeNil())
|
||||||
|
|
||||||
|
expect := filepath.Join("/tmp", os.Getenv("HOME"), u.Username, u.Uid, "storage")
|
||||||
podmanPath := podmanTest.PodmanTest.PodmanBinary
|
podmanPath := podmanTest.PodmanTest.PodmanBinary
|
||||||
cmd := exec.Command(podmanPath, "info", "--format", "{{.Store.GraphRoot}}")
|
cmd := exec.Command(podmanPath, "info", "--format", "{{.Store.GraphRoot}}")
|
||||||
out, err := cmd.CombinedOutput()
|
out, err := cmd.CombinedOutput()
|
||||||
fmt.Println(string(out))
|
fmt.Println(string(out))
|
||||||
Expect(err).To(BeNil())
|
Expect(err).To(BeNil())
|
||||||
Expect(string(out)).To(ContainSubstring(expect))
|
Expect(string(out)).To(Equal(expect))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
2
vendor/github.com/containers/common/pkg/config/config_local.go
generated
vendored
2
vendor/github.com/containers/common/pkg/config/config_local.go
generated
vendored
@ -79,7 +79,7 @@ func (c *ContainersConfig) validateUlimits() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *ContainersConfig) validateTZ() error {
|
func (c *ContainersConfig) validateTZ() error {
|
||||||
if c.TZ == "local" {
|
if c.TZ == "local" || c.TZ == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
8
vendor/github.com/containers/common/pkg/config/default.go
generated
vendored
8
vendor/github.com/containers/common/pkg/config/default.go
generated
vendored
@ -224,15 +224,13 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
|
|||||||
|
|
||||||
c.EventsLogFilePath = filepath.Join(c.TmpDir, "events", "events.log")
|
c.EventsLogFilePath = filepath.Join(c.TmpDir, "events", "events.log")
|
||||||
|
|
||||||
var storeOpts storage.StoreOptions
|
|
||||||
if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok {
|
if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok {
|
||||||
storage.ReloadConfigurationFile(path, &storeOpts)
|
storage.SetDefaultConfigFilePath(path)
|
||||||
} else {
|
}
|
||||||
storeOpts, err = storage.DefaultStoreOptions(unshare.IsRootless(), unshare.GetRootlessUID())
|
storeOpts, err := storage.DefaultStoreOptions(unshare.IsRootless(), unshare.GetRootlessUID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if storeOpts.GraphRoot == "" {
|
if storeOpts.GraphRoot == "" {
|
||||||
logrus.Warnf("Storage configuration is unset - using hardcoded default graph root %q", _defaultGraphRoot)
|
logrus.Warnf("Storage configuration is unset - using hardcoded default graph root %q", _defaultGraphRoot)
|
||||||
|
3
vendor/github.com/containers/common/pkg/report/template.go
generated
vendored
3
vendor/github.com/containers/common/pkg/report/template.go
generated
vendored
@ -44,7 +44,6 @@ func NormalizeFormat(format string) string {
|
|||||||
if !strings.HasSuffix(f, "\n") {
|
if !strings.HasSuffix(f, "\n") {
|
||||||
f += "\n"
|
f += "\n"
|
||||||
}
|
}
|
||||||
|
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -97,6 +96,8 @@ func (t *Template) Parse(text string) (*Template, error) {
|
|||||||
if strings.HasPrefix(text, "table ") {
|
if strings.HasPrefix(text, "table ") {
|
||||||
t.isTable = true
|
t.isTable = true
|
||||||
text = "{{range .}}" + NormalizeFormat(text) + "{{end}}"
|
text = "{{range .}}" + NormalizeFormat(text) + "{{end}}"
|
||||||
|
} else {
|
||||||
|
text = NormalizeFormat(text)
|
||||||
}
|
}
|
||||||
|
|
||||||
tt, err := t.Template.Parse(text)
|
tt, err := t.Template.Parse(text)
|
||||||
|
11
vendor/github.com/containers/common/pkg/seccomp/default_linux.go
generated
vendored
11
vendor/github.com/containers/common/pkg/seccomp/default_linux.go
generated
vendored
@ -66,9 +66,13 @@ func DefaultProfile() *Seccomp {
|
|||||||
"chown",
|
"chown",
|
||||||
"chown32",
|
"chown32",
|
||||||
"clock_adjtime",
|
"clock_adjtime",
|
||||||
|
"clock_adjtime64",
|
||||||
"clock_getres",
|
"clock_getres",
|
||||||
|
"clock_getres_time64",
|
||||||
"clock_gettime",
|
"clock_gettime",
|
||||||
|
"clock_gettime64",
|
||||||
"clock_nanosleep",
|
"clock_nanosleep",
|
||||||
|
"clock_nanosleep_time64",
|
||||||
"clone",
|
"clone",
|
||||||
"close",
|
"close",
|
||||||
"connect",
|
"connect",
|
||||||
@ -226,12 +230,14 @@ func DefaultProfile() *Seccomp {
|
|||||||
"pivot_root",
|
"pivot_root",
|
||||||
"poll",
|
"poll",
|
||||||
"ppoll",
|
"ppoll",
|
||||||
|
"ppoll_time64",
|
||||||
"prctl",
|
"prctl",
|
||||||
"pread64",
|
"pread64",
|
||||||
"preadv",
|
"preadv",
|
||||||
"preadv2",
|
"preadv2",
|
||||||
"prlimit64",
|
"prlimit64",
|
||||||
"pselect6",
|
"pselect6",
|
||||||
|
"pselect6_time64",
|
||||||
"pwrite64",
|
"pwrite64",
|
||||||
"pwritev",
|
"pwritev",
|
||||||
"pwritev2",
|
"pwritev2",
|
||||||
@ -343,10 +349,13 @@ func DefaultProfile() *Seccomp {
|
|||||||
"timer_delete",
|
"timer_delete",
|
||||||
"timer_getoverrun",
|
"timer_getoverrun",
|
||||||
"timer_gettime",
|
"timer_gettime",
|
||||||
|
"timer_gettime64",
|
||||||
"timer_settime",
|
"timer_settime",
|
||||||
"timerfd_create",
|
"timerfd_create",
|
||||||
"timerfd_gettime",
|
"timerfd_gettime",
|
||||||
|
"timerfd_gettime64",
|
||||||
"timerfd_settime",
|
"timerfd_settime",
|
||||||
|
"timerfd_settime64",
|
||||||
"times",
|
"times",
|
||||||
"tkill",
|
"tkill",
|
||||||
"truncate",
|
"truncate",
|
||||||
@ -361,6 +370,7 @@ func DefaultProfile() *Seccomp {
|
|||||||
"unshare",
|
"unshare",
|
||||||
"utime",
|
"utime",
|
||||||
"utimensat",
|
"utimensat",
|
||||||
|
"utimensat_time64",
|
||||||
"utimes",
|
"utimes",
|
||||||
"vfork",
|
"vfork",
|
||||||
"vmsplice",
|
"vmsplice",
|
||||||
@ -642,6 +652,7 @@ func DefaultProfile() *Seccomp {
|
|||||||
"settimeofday",
|
"settimeofday",
|
||||||
"stime",
|
"stime",
|
||||||
"clock_settime",
|
"clock_settime",
|
||||||
|
"clock_settime64",
|
||||||
},
|
},
|
||||||
Action: ActAllow,
|
Action: ActAllow,
|
||||||
Args: []*Arg{},
|
Args: []*Arg{},
|
||||||
|
13
vendor/github.com/containers/common/pkg/seccomp/seccomp.json
generated
vendored
13
vendor/github.com/containers/common/pkg/seccomp/seccomp.json
generated
vendored
@ -68,9 +68,13 @@
|
|||||||
"chown",
|
"chown",
|
||||||
"chown32",
|
"chown32",
|
||||||
"clock_adjtime",
|
"clock_adjtime",
|
||||||
|
"clock_adjtime64",
|
||||||
"clock_getres",
|
"clock_getres",
|
||||||
|
"clock_getres_time64",
|
||||||
"clock_gettime",
|
"clock_gettime",
|
||||||
|
"clock_gettime64",
|
||||||
"clock_nanosleep",
|
"clock_nanosleep",
|
||||||
|
"clock_nanosleep_time64",
|
||||||
"clone",
|
"clone",
|
||||||
"close",
|
"close",
|
||||||
"connect",
|
"connect",
|
||||||
@ -228,12 +232,14 @@
|
|||||||
"pivot_root",
|
"pivot_root",
|
||||||
"poll",
|
"poll",
|
||||||
"ppoll",
|
"ppoll",
|
||||||
|
"ppoll_time64",
|
||||||
"prctl",
|
"prctl",
|
||||||
"pread64",
|
"pread64",
|
||||||
"preadv",
|
"preadv",
|
||||||
"preadv2",
|
"preadv2",
|
||||||
"prlimit64",
|
"prlimit64",
|
||||||
"pselect6",
|
"pselect6",
|
||||||
|
"pselect6_time64",
|
||||||
"pwrite64",
|
"pwrite64",
|
||||||
"pwritev",
|
"pwritev",
|
||||||
"pwritev2",
|
"pwritev2",
|
||||||
@ -345,10 +351,13 @@
|
|||||||
"timer_delete",
|
"timer_delete",
|
||||||
"timer_getoverrun",
|
"timer_getoverrun",
|
||||||
"timer_gettime",
|
"timer_gettime",
|
||||||
|
"timer_gettime64",
|
||||||
"timer_settime",
|
"timer_settime",
|
||||||
"timerfd_create",
|
"timerfd_create",
|
||||||
"timerfd_gettime",
|
"timerfd_gettime",
|
||||||
|
"timerfd_gettime64",
|
||||||
"timerfd_settime",
|
"timerfd_settime",
|
||||||
|
"timerfd_settime64",
|
||||||
"times",
|
"times",
|
||||||
"tkill",
|
"tkill",
|
||||||
"truncate",
|
"truncate",
|
||||||
@ -363,6 +372,7 @@
|
|||||||
"unshare",
|
"unshare",
|
||||||
"utime",
|
"utime",
|
||||||
"utimensat",
|
"utimensat",
|
||||||
|
"utimensat_time64",
|
||||||
"utimes",
|
"utimes",
|
||||||
"vfork",
|
"vfork",
|
||||||
"vmsplice",
|
"vmsplice",
|
||||||
@ -749,7 +759,8 @@
|
|||||||
"names": [
|
"names": [
|
||||||
"settimeofday",
|
"settimeofday",
|
||||||
"stime",
|
"stime",
|
||||||
"clock_settime"
|
"clock_settime",
|
||||||
|
"clock_settime64"
|
||||||
],
|
],
|
||||||
"action": "SCMP_ACT_ALLOW",
|
"action": "SCMP_ACT_ALLOW",
|
||||||
"args": [],
|
"args": [],
|
||||||
|
2
vendor/github.com/containers/common/version/version.go
generated
vendored
2
vendor/github.com/containers/common/version/version.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
package version
|
package version
|
||||||
|
|
||||||
// Version is the version of the build.
|
// Version is the version of the build.
|
||||||
const Version = "0.26.0"
|
const Version = "0.26.3"
|
||||||
|
14
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
14
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
@ -121,8 +121,6 @@ type imageCopier struct {
|
|||||||
diffIDsAreNeeded bool
|
diffIDsAreNeeded bool
|
||||||
canModifyManifest bool
|
canModifyManifest bool
|
||||||
canSubstituteBlobs bool
|
canSubstituteBlobs bool
|
||||||
ociDecryptConfig *encconfig.DecryptConfig
|
|
||||||
ociEncryptConfig *encconfig.EncryptConfig
|
|
||||||
ociEncryptLayers *[]int
|
ociEncryptLayers *[]int
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -260,6 +258,8 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
|||||||
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
|
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
|
||||||
// we might want to add a separate CommonCtx — or would that be too confusing?
|
// we might want to add a separate CommonCtx — or would that be too confusing?
|
||||||
blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx),
|
blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx),
|
||||||
|
ociDecryptConfig: options.OciDecryptConfig,
|
||||||
|
ociEncryptConfig: options.OciEncryptConfig,
|
||||||
}
|
}
|
||||||
// Default to using gzip compression unless specified otherwise.
|
// Default to using gzip compression unless specified otherwise.
|
||||||
if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil {
|
if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil {
|
||||||
@ -605,8 +605,6 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
|||||||
src: src,
|
src: src,
|
||||||
// diffIDsAreNeeded is computed later
|
// diffIDsAreNeeded is computed later
|
||||||
canModifyManifest: len(sigs) == 0 && !destIsDigestedReference,
|
canModifyManifest: len(sigs) == 0 && !destIsDigestedReference,
|
||||||
ociDecryptConfig: options.OciDecryptConfig,
|
|
||||||
ociEncryptConfig: options.OciEncryptConfig,
|
|
||||||
ociEncryptLayers: options.OciEncryptLayers,
|
ociEncryptLayers: options.OciEncryptLayers,
|
||||||
}
|
}
|
||||||
// Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
|
// Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
|
||||||
@ -621,7 +619,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
|||||||
return nil, "", "", err
|
return nil, "", "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
destRequiresOciEncryption := (isEncrypted(src) && ic.ociDecryptConfig != nil) || options.OciEncryptLayers != nil
|
destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil
|
||||||
|
|
||||||
// We compute preferredManifestMIMEType only to show it in error messages.
|
// We compute preferredManifestMIMEType only to show it in error messages.
|
||||||
// Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
|
// Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
|
||||||
@ -633,7 +631,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
|||||||
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
|
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
|
||||||
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
|
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
|
||||||
// If encrypted and decryption keys provided, we should try to decrypt
|
// If encrypted and decryption keys provided, we should try to decrypt
|
||||||
ic.diffIDsAreNeeded = ic.diffIDsAreNeeded || (isEncrypted(src) && ic.ociDecryptConfig != nil) || ic.ociEncryptConfig != nil
|
ic.diffIDsAreNeeded = ic.diffIDsAreNeeded || (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || ic.c.ociEncryptConfig != nil
|
||||||
|
|
||||||
if err := ic.copyLayers(ctx); err != nil {
|
if err := ic.copyLayers(ctx); err != nil {
|
||||||
return nil, "", "", err
|
return nil, "", "", err
|
||||||
@ -1048,7 +1046,7 @@ type diffIDResult struct {
|
|||||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) {
|
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) {
|
||||||
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
|
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
|
||||||
// Diffs are needed if we are encrypting an image or trying to decrypt an image
|
// Diffs are needed if we are encrypting an image or trying to decrypt an image
|
||||||
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" || toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.ociDecryptConfig != nil)
|
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" || toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil)
|
||||||
|
|
||||||
// If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
|
// If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
|
||||||
if !diffIDIsNeeded {
|
if !diffIDIsNeeded {
|
||||||
@ -1136,8 +1134,6 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
|
|||||||
return pipeWriter
|
return pipeWriter
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ic.c.ociDecryptConfig = ic.ociDecryptConfig
|
|
||||||
ic.c.ociEncryptConfig = ic.ociEncryptConfig
|
|
||||||
|
|
||||||
blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, toEncrypt, bar) // Sets err to nil on success
|
blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, toEncrypt, bar) // Sets err to nil on success
|
||||||
return blobInfo, diffIDChan, err
|
return blobInfo, diffIDChan, err
|
||||||
|
2
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
@ -194,7 +194,7 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
|
2
vendor/github.com/containers/image/v5/docker/archive/transport.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/archive/transport.go
generated
vendored
@ -50,7 +50,7 @@ type archiveReference struct {
|
|||||||
// Must not be set if ref is set.
|
// Must not be set if ref is set.
|
||||||
sourceIndex int
|
sourceIndex int
|
||||||
// If not nil, must have been created from path (but archiveReader.path may point at a temporary
|
// If not nil, must have been created from path (but archiveReader.path may point at a temporary
|
||||||
// file, not necesarily path precisely).
|
// file, not necessarily path precisely).
|
||||||
archiveReader *tarfile.Reader
|
archiveReader *tarfile.Reader
|
||||||
// If not nil, must have been created for path
|
// If not nil, must have been created for path
|
||||||
archiveWriter *tarfile.Writer
|
archiveWriter *tarfile.Writer
|
||||||
|
3
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
3
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@ -209,6 +209,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
|
|||||||
|
|
||||||
// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
|
// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
|
||||||
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
|
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
|
||||||
|
// signatureBase is always set in the return value
|
||||||
func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
|
func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
|
||||||
registry := reference.Domain(ref.ref)
|
registry := reference.Domain(ref.ref)
|
||||||
auth, err := config.GetCredentials(sys, registry)
|
auth, err := config.GetCredentials(sys, registry)
|
||||||
@ -216,7 +217,7 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write
|
|||||||
return nil, errors.Wrapf(err, "error getting username and password")
|
return nil, errors.Wrapf(err, "error getting username and password")
|
||||||
}
|
}
|
||||||
|
|
||||||
sigBase, err := configuredSignatureStorageBase(sys, ref, write)
|
sigBase, err := SignatureStorageBaseURL(sys, ref, write)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
22
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
22
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@ -78,12 +78,12 @@ func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
case d.c.signatureBase != nil:
|
|
||||||
return nil
|
|
||||||
case d.c.supportsSignatures:
|
case d.c.supportsSignatures:
|
||||||
return nil
|
return nil
|
||||||
|
case d.c.signatureBase != nil:
|
||||||
|
return nil
|
||||||
default:
|
default:
|
||||||
return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
|
return errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -284,7 +284,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
@ -335,7 +335,7 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
|
|||||||
// On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly.
|
// On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly.
|
||||||
// Even worse, docker/distribution does not actually reasonably implement canceling uploads
|
// Even worse, docker/distribution does not actually reasonably implement canceling uploads
|
||||||
// (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask);
|
// (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask);
|
||||||
// so, be a nice client and don't create unnecesary upload sessions on the server.
|
// so, be a nice client and don't create unnecessary upload sessions on the server.
|
||||||
exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope)
|
exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Debugf("... Failed: %v", err)
|
logrus.Debugf("... Failed: %v", err)
|
||||||
@ -479,12 +479,12 @@ func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
case d.c.signatureBase != nil:
|
|
||||||
return d.putSignaturesToLookaside(signatures, *instanceDigest)
|
|
||||||
case d.c.supportsSignatures:
|
case d.c.supportsSignatures:
|
||||||
return d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest)
|
return d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest)
|
||||||
|
case d.c.signatureBase != nil:
|
||||||
|
return d.putSignaturesToLookaside(signatures, *instanceDigest)
|
||||||
default:
|
default:
|
||||||
return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
|
return errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -502,9 +502,6 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, m
|
|||||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||||
for i, signature := range signatures {
|
for i, signature := range signatures {
|
||||||
url := signatureStorageURL(d.c.signatureBase, manifestDigest, i)
|
url := signatureStorageURL(d.c.signatureBase, manifestDigest, i)
|
||||||
if url == nil {
|
|
||||||
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
|
||||||
}
|
|
||||||
err := d.putOneSignature(url, signature)
|
err := d.putOneSignature(url, signature)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -517,9 +514,6 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, m
|
|||||||
// is sufficient.
|
// is sufficient.
|
||||||
for i := len(signatures); ; i++ {
|
for i := len(signatures); ; i++ {
|
||||||
url := signatureStorageURL(d.c.signatureBase, manifestDigest, i)
|
url := signatureStorageURL(d.c.signatureBase, manifestDigest, i)
|
||||||
if url == nil {
|
|
||||||
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
|
||||||
}
|
|
||||||
missing, err := d.c.deleteOneSignature(url)
|
missing, err := d.c.deleteOneSignature(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
19
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
19
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
@ -53,7 +53,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
|
|||||||
// contain the image, it will be used for all future pull actions. Always try the
|
// contain the image, it will be used for all future pull actions. Always try the
|
||||||
// non-mirror original location last; this both transparently handles the case
|
// non-mirror original location last; this both transparently handles the case
|
||||||
// of no mirrors configured, and ensures we return the error encountered when
|
// of no mirrors configured, and ensures we return the error encountered when
|
||||||
// acessing the upstream location if all endpoints fail.
|
// accessing the upstream location if all endpoints fail.
|
||||||
pullSources, err := registry.PullSourcesFromReference(ref.ref)
|
pullSources, err := registry.PullSourcesFromReference(ref.ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -238,6 +238,9 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
|
|||||||
return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
|
return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
|
||||||
}
|
}
|
||||||
for _, url := range urls {
|
for _, url := range urls {
|
||||||
|
// NOTE: we must not authenticate on additional URLs as those
|
||||||
|
// can be abused to leak credentials or tokens. Please
|
||||||
|
// refer to CVE-2020-15157 for more information.
|
||||||
resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
|
resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
@ -297,12 +300,12 @@ func (s *dockerImageSource) GetSignatures(ctx context.Context, instanceDigest *d
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
case s.c.signatureBase != nil:
|
|
||||||
return s.getSignaturesFromLookaside(ctx, instanceDigest)
|
|
||||||
case s.c.supportsSignatures:
|
case s.c.supportsSignatures:
|
||||||
return s.getSignaturesFromAPIExtension(ctx, instanceDigest)
|
return s.getSignaturesFromAPIExtension(ctx, instanceDigest)
|
||||||
|
case s.c.signatureBase != nil:
|
||||||
|
return s.getSignaturesFromLookaside(ctx, instanceDigest)
|
||||||
default:
|
default:
|
||||||
return [][]byte{}, nil
|
return nil, errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -336,9 +339,6 @@ func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, inst
|
|||||||
signatures := [][]byte{}
|
signatures := [][]byte{}
|
||||||
for i := 0; ; i++ {
|
for i := 0; ; i++ {
|
||||||
url := signatureStorageURL(s.c.signatureBase, manifestDigest, i)
|
url := signatureStorageURL(s.c.signatureBase, manifestDigest, i)
|
||||||
if url == nil {
|
|
||||||
return nil, errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
|
||||||
}
|
|
||||||
signature, missing, err := s.getOneSignature(ctx, url)
|
signature, missing, err := s.getOneSignature(ctx, url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -474,7 +474,6 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
|||||||
return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status)
|
return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.signatureBase != nil {
|
|
||||||
manifestDigest, err := manifest.Digest(manifestBody)
|
manifestDigest, err := manifest.Digest(manifestBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -482,9 +481,6 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
|||||||
|
|
||||||
for i := 0; ; i++ {
|
for i := 0; ; i++ {
|
||||||
url := signatureStorageURL(c.signatureBase, manifestDigest, i)
|
url := signatureStorageURL(c.signatureBase, manifestDigest, i)
|
||||||
if url == nil {
|
|
||||||
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
|
||||||
}
|
|
||||||
missing, err := c.deleteOneSignature(url)
|
missing, err := c.deleteOneSignature(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -493,7 +489,6 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
4
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
@ -94,7 +94,7 @@ func (d *Destination) HasThreadSafePutBlob() bool {
|
|||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||||
// Ouch, we need to stream the blob into a temporary file just to determine the size.
|
// Ouch, we need to stream the blob into a temporary file just to determine the size.
|
||||||
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
|
// When the layer is decompressed, we also have to generate the digest on uncompressed data.
|
||||||
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
|
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
|
||||||
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
|
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
|
||||||
streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob")
|
streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob")
|
||||||
@ -159,7 +159,7 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
|
2
vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
generated
vendored
@ -287,7 +287,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
|
|||||||
// In particular, because the v2s2 manifest being generated uses
|
// In particular, because the v2s2 manifest being generated uses
|
||||||
// DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of
|
// DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of
|
||||||
// layers not their _actual_ digest. The result is that copy/... will
|
// layers not their _actual_ digest. The result is that copy/... will
|
||||||
// be verifing a "digest" which is not the actual layer's digest (but
|
// be verifying a "digest" which is not the actual layer's digest (but
|
||||||
// is instead the DiffID).
|
// is instead the DiffID).
|
||||||
|
|
||||||
uncompressedStream, _, err := compression.AutoDecompress(underlyingStream)
|
uncompressedStream, _, err := compression.AutoDecompress(underlyingStream)
|
||||||
|
4
vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
generated
vendored
4
vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
generated
vendored
@ -48,7 +48,7 @@ func NewWriter(dest io.Writer) *Writer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// lock does some sanity checks and locks the Writer.
|
// lock does some sanity checks and locks the Writer.
|
||||||
// If this function suceeds, the caller must call w.unlock.
|
// If this function succeeds, the caller must call w.unlock.
|
||||||
// Do not use Writer.mutex directly.
|
// Do not use Writer.mutex directly.
|
||||||
func (w *Writer) lock() error {
|
func (w *Writer) lock() error {
|
||||||
w.mutex.Lock()
|
w.mutex.Lock()
|
||||||
@ -67,7 +67,7 @@ func (w *Writer) unlock() {
|
|||||||
|
|
||||||
// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata.
|
// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata.
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||||
// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// The caller must have locked the Writer.
|
// The caller must have locked the Writer.
|
||||||
func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) {
|
func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) {
|
||||||
|
59
vendor/github.com/containers/image/v5/docker/lookaside.go
generated
vendored
59
vendor/github.com/containers/image/v5/docker/lookaside.go
generated
vendored
@ -10,6 +10,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/containers/image/v5/docker/reference"
|
"github.com/containers/image/v5/docker/reference"
|
||||||
|
"github.com/containers/image/v5/internal/rootless"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
"github.com/containers/storage/pkg/homedir"
|
"github.com/containers/storage/pkg/homedir"
|
||||||
"github.com/ghodss/yaml"
|
"github.com/ghodss/yaml"
|
||||||
@ -30,6 +31,12 @@ const builtinRegistriesDirPath = "/etc/containers/registries.d"
|
|||||||
// userRegistriesDirPath is the path to the per user registries.d.
|
// userRegistriesDirPath is the path to the per user registries.d.
|
||||||
var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d")
|
var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d")
|
||||||
|
|
||||||
|
// defaultUserDockerDir is the default sigstore directory for unprivileged user
|
||||||
|
var defaultUserDockerDir = filepath.FromSlash(".local/share/containers/sigstore")
|
||||||
|
|
||||||
|
// defaultDockerDir is the default sigstore directory for root
|
||||||
|
var defaultDockerDir = "/var/lib/containers/sigstore"
|
||||||
|
|
||||||
// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
|
// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
|
||||||
// NOTE: Keep this in sync with docs/registries.d.md!
|
// NOTE: Keep this in sync with docs/registries.d.md!
|
||||||
type registryConfiguration struct {
|
type registryConfiguration struct {
|
||||||
@ -45,11 +52,18 @@ type registryNamespace struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage.
|
// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage.
|
||||||
// Users outside of this file should use configuredSignatureStorageBase and signatureStorageURL below.
|
// Users outside of this file should use SignatureStorageBaseURL and signatureStorageURL below.
|
||||||
type signatureStorageBase *url.URL // The only documented value is nil, meaning storage is not supported.
|
type signatureStorageBase *url.URL
|
||||||
|
|
||||||
// configuredSignatureStorageBase reads configuration to find an appropriate signature storage URL for ref, for write access if “write”.
|
// SignatureStorageBaseURL reads configuration to find an appropriate signature storage URL for ref, for write access if “write”.
|
||||||
func configuredSignatureStorageBase(sys *types.SystemContext, ref dockerReference, write bool) (signatureStorageBase, error) {
|
// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md
|
||||||
|
// Warning: This function only exposes configuration in registries.d;
|
||||||
|
// just because this function returns an URL does not mean that the URL will be used by c/image/docker (e.g. if the registry natively supports X-R-S-S).
|
||||||
|
func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference, write bool) (*url.URL, error) {
|
||||||
|
dr, ok := ref.(dockerReference)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("ref must be a dockerReference")
|
||||||
|
}
|
||||||
// FIXME? Loading and parsing the config could be cached across calls.
|
// FIXME? Loading and parsing the config could be cached across calls.
|
||||||
dirPath := registriesDirPath(sys)
|
dirPath := registriesDirPath(sys)
|
||||||
logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath)
|
logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath)
|
||||||
@ -58,20 +72,23 @@ func configuredSignatureStorageBase(sys *types.SystemContext, ref dockerReferenc
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
topLevel := config.signatureTopLevel(ref, write)
|
topLevel := config.signatureTopLevel(dr, write)
|
||||||
if topLevel == "" {
|
var url *url.URL
|
||||||
return nil, nil
|
if topLevel != "" {
|
||||||
}
|
url, err = url.Parse(topLevel)
|
||||||
|
|
||||||
url, err := url.Parse(topLevel)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel)
|
return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// returns default directory if no sigstore specified in configuration file
|
||||||
|
url = builtinDefaultSignatureStorageDir(rootless.GetRootlessEUID())
|
||||||
|
logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.String())
|
||||||
|
}
|
||||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||||
// FIXME? Restrict to explicitly supported schemes?
|
// FIXME? Restrict to explicitly supported schemes?
|
||||||
repo := reference.Path(ref.ref) // Note that this is without a tag or digest.
|
repo := reference.Path(dr.ref) // Note that this is without a tag or digest.
|
||||||
if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
|
if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
|
||||||
return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String())
|
return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", dr.ref.String())
|
||||||
}
|
}
|
||||||
url.Path = url.Path + "/" + repo
|
url.Path = url.Path + "/" + repo
|
||||||
return url, nil
|
return url, nil
|
||||||
@ -93,6 +110,14 @@ func registriesDirPath(sys *types.SystemContext) string {
|
|||||||
return systemRegistriesDirPath
|
return systemRegistriesDirPath
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// builtinDefaultSignatureStorageDir returns default signature storage URL as per euid
|
||||||
|
func builtinDefaultSignatureStorageDir(euid int) *url.URL {
|
||||||
|
if euid != 0 {
|
||||||
|
return &url.URL{Scheme: "file", Path: filepath.Join(homedir.Get(), defaultUserDockerDir)}
|
||||||
|
}
|
||||||
|
return &url.URL{Scheme: "file", Path: defaultDockerDir}
|
||||||
|
}
|
||||||
|
|
||||||
// loadAndMergeConfig loads configuration files in dirPath
|
// loadAndMergeConfig loads configuration files in dirPath
|
||||||
func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
||||||
mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}}
|
mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}}
|
||||||
@ -149,7 +174,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”.
|
// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”.
|
||||||
// (the top level of the storage, namespaced by repo.FullName etc.), or "" if no signature storage should be used.
|
// (the top level of the storage, namespaced by repo.FullName etc.), or "" if nothing has been configured.
|
||||||
func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string {
|
func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string {
|
||||||
if config.Docker != nil {
|
if config.Docker != nil {
|
||||||
// Look for a full match.
|
// Look for a full match.
|
||||||
@ -178,7 +203,6 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ
|
|||||||
return url
|
return url
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logrus.Debugf(" No signature storage configuration found for %s", ref.PolicyConfigurationIdentity())
|
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -196,13 +220,10 @@ func (ns registryNamespace) signatureTopLevel(write bool) string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable.
|
// signatureStorageURL returns an URL usable for accessing signature index in base with known manifestDigest.
|
||||||
// Returns nil iff base == nil.
|
// base is not nil from the caller
|
||||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||||
func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL {
|
func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL {
|
||||||
if base == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
url := *base
|
url := *base
|
||||||
url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
|
url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
|
||||||
return &url
|
return &url
|
||||||
|
2
vendor/github.com/containers/image/v5/docker/tarfile/dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/tarfile/dest.go
generated
vendored
@ -86,7 +86,7 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
|
2
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
2
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
@ -131,7 +131,7 @@ var baseVariants = map[string]string{
|
|||||||
"arm64": "v8",
|
"arm64": "v8",
|
||||||
}
|
}
|
||||||
|
|
||||||
// WantedPlatforms returns all compatible platforms with the platform specifics possibly overriden by user,
|
// WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user,
|
||||||
// the most compatible platform is first.
|
// the most compatible platform is first.
|
||||||
// If some option (arch, os, variant) is not present, a value from current platform is detected.
|
// If some option (arch, os, variant) is not present, a value from current platform is detected.
|
||||||
func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
|
func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
|
||||||
|
25
vendor/github.com/containers/image/v5/internal/rootless/rootless.go
generated
vendored
Normal file
25
vendor/github.com/containers/image/v5/internal/rootless/rootless.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
package rootless
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetRootlessEUID returns the UID of the current user (in the parent userNS, if any)
|
||||||
|
//
|
||||||
|
// Podman and similar software, in “rootless” configuration, when run as a non-root
|
||||||
|
// user, very early switches to a user namespace, where Geteuid() == 0 (but does not
|
||||||
|
// switch to a limited mount namespace); so, code relying on Geteuid() would use
|
||||||
|
// system-wide paths in e.g. /var, when the user is actually not privileged to write to
|
||||||
|
// them, and expects state to be stored in the home directory.
|
||||||
|
//
|
||||||
|
// If Podman is setting up such a user namespace, it records the original UID in an
|
||||||
|
// environment variable, allowing us to make choices based on the actual user’s identity.
|
||||||
|
func GetRootlessEUID() int {
|
||||||
|
euidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
|
||||||
|
if euidEnv != "" {
|
||||||
|
euid, _ := strconv.Atoi(euidEnv)
|
||||||
|
return euid
|
||||||
|
}
|
||||||
|
return os.Geteuid()
|
||||||
|
}
|
2
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
@ -103,7 +103,7 @@ func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Read
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
|
2
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
@ -186,7 +186,7 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
|
2
vendor/github.com/containers/image/v5/openshift/openshift-copies.go
generated
vendored
2
vendor/github.com/containers/image/v5/openshift/openshift-copies.go
generated
vendored
@ -251,7 +251,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, conf
|
|||||||
// getUserIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getUserIdentificationPartialConfig.
|
// getUserIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getUserIdentificationPartialConfig.
|
||||||
// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
|
// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
|
||||||
// both, so we have to split the objects and merge them separately
|
// both, so we have to split the objects and merge them separately
|
||||||
// we want this order of precedence for user identifcation
|
// we want this order of precedence for user identification
|
||||||
// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files)
|
// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files)
|
||||||
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
|
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
|
||||||
// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file
|
// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file
|
||||||
|
2
vendor/github.com/containers/image/v5/openshift/openshift.go
generated
vendored
2
vendor/github.com/containers/image/v5/openshift/openshift.go
generated
vendored
@ -410,7 +410,7 @@ func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reade
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
|
2
vendor/github.com/containers/image/v5/ostree/ostree_dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/ostree/ostree_dest.go
generated
vendored
@ -339,7 +339,7 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
|
13
vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go
generated
vendored
13
vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go
generated
vendored
@ -4,8 +4,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
|
||||||
|
|
||||||
|
"github.com/containers/image/v5/internal/rootless"
|
||||||
"github.com/containers/image/v5/pkg/blobinfocache/boltdb"
|
"github.com/containers/image/v5/pkg/blobinfocache/boltdb"
|
||||||
"github.com/containers/image/v5/pkg/blobinfocache/memory"
|
"github.com/containers/image/v5/pkg/blobinfocache/memory"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
@ -48,18 +48,9 @@ func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) {
|
|||||||
return filepath.Join(dataDir, "containers", "cache"), nil
|
return filepath.Join(dataDir, "containers", "cache"), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getRootlessUID() int {
|
|
||||||
uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
|
|
||||||
if uidEnv != "" {
|
|
||||||
u, _ := strconv.Atoi(uidEnv)
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
return os.Geteuid()
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultCache returns the default BlobInfoCache implementation appropriate for sys.
|
// DefaultCache returns the default BlobInfoCache implementation appropriate for sys.
|
||||||
func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
|
func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
|
||||||
dir, err := blobInfoCacheDir(sys, getRootlessUID())
|
dir, err := blobInfoCacheDir(sys, rootless.GetRootlessEUID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename)
|
logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename)
|
||||||
return memory.New()
|
return memory.New()
|
||||||
|
18
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
18
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
@ -35,6 +35,7 @@ type authPath struct {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json")
|
defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json")
|
||||||
|
xdgConfigHomePath = filepath.FromSlash("containers/auth.json")
|
||||||
xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json")
|
xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json")
|
||||||
dockerHomePath = filepath.FromSlash(".docker/config.json")
|
dockerHomePath = filepath.FromSlash(".docker/config.json")
|
||||||
dockerLegacyHomePath = ".dockercfg"
|
dockerLegacyHomePath = ".dockercfg"
|
||||||
@ -117,7 +118,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(keyring): if we ever reenable the keyring support, we had to
|
// TODO(keyring): if we ever re-enable the keyring support, we had to
|
||||||
// query all credentials from the keyring here.
|
// query all credentials from the keyring here.
|
||||||
|
|
||||||
return authConfigs, nil
|
return authConfigs, nil
|
||||||
@ -136,8 +137,21 @@ func getAuthFilePaths(sys *types.SystemContext) []authPath {
|
|||||||
// Logging the error as a warning instead and moving on to pulling the image
|
// Logging the error as a warning instead and moving on to pulling the image
|
||||||
logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err)
|
logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err)
|
||||||
}
|
}
|
||||||
|
xdgCfgHome := os.Getenv("XDG_CONFIG_HOME")
|
||||||
|
if xdgCfgHome == "" {
|
||||||
|
xdgCfgHome = filepath.Join(homedir.Get(), ".config")
|
||||||
|
}
|
||||||
|
paths = append(paths, authPath{path: filepath.Join(xdgCfgHome, xdgConfigHomePath), legacyFormat: false})
|
||||||
|
if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" {
|
||||||
|
paths = append(paths,
|
||||||
|
authPath{path: filepath.Join(dockerConfig, "config.json"), legacyFormat: false},
|
||||||
|
)
|
||||||
|
} else {
|
||||||
paths = append(paths,
|
paths = append(paths,
|
||||||
authPath{path: filepath.Join(homedir.Get(), dockerHomePath), legacyFormat: false},
|
authPath{path: filepath.Join(homedir.Get(), dockerHomePath), legacyFormat: false},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
paths = append(paths,
|
||||||
authPath{path: filepath.Join(homedir.Get(), dockerLegacyHomePath), legacyFormat: true},
|
authPath{path: filepath.Join(homedir.Get(), dockerLegacyHomePath), legacyFormat: true},
|
||||||
)
|
)
|
||||||
return paths
|
return paths
|
||||||
@ -245,7 +259,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// getPathToAuth gets the path of the auth.json file used for reading and writting credentials
|
// getPathToAuth gets the path of the auth.json file used for reading and writing credentials
|
||||||
// returns the path, and a bool specifies whether the file is in legacy format
|
// returns the path, and a bool specifies whether the file is in legacy format
|
||||||
func getPathToAuth(sys *types.SystemContext) (string, bool, error) {
|
func getPathToAuth(sys *types.SystemContext) (string, bool, error) {
|
||||||
if sys != nil {
|
if sys != nil {
|
||||||
|
2
vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go
generated
vendored
2
vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go
generated
vendored
@ -63,7 +63,7 @@ func removeAllAuthFromKernelKeyring() error {
|
|||||||
// split string "type;uid;gid;perm;description"
|
// split string "type;uid;gid;perm;description"
|
||||||
keyAttrs := strings.SplitN(keyAttr, ";", 5)
|
keyAttrs := strings.SplitN(keyAttr, ";", 5)
|
||||||
if len(keyAttrs) < 5 {
|
if len(keyAttrs) < 5 {
|
||||||
return errors.Errorf("Key attributes of %d are not avaliable", k.ID())
|
return errors.Errorf("Key attributes of %d are not available", k.ID())
|
||||||
}
|
}
|
||||||
keyDescribe := keyAttrs[4]
|
keyDescribe := keyAttrs[4]
|
||||||
if strings.HasPrefix(keyDescribe, keyDescribePrefix) {
|
if strings.HasPrefix(keyDescribe, keyDescribePrefix) {
|
||||||
|
2
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
2
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
@ -363,7 +363,7 @@ type configWrapper struct {
|
|||||||
configPath string
|
configPath string
|
||||||
// path to system-wide registries.conf.d directory, or "" if not used
|
// path to system-wide registries.conf.d directory, or "" if not used
|
||||||
configDirPath string
|
configDirPath string
|
||||||
// path to user specificed registries.conf.d directory, or "" if not used
|
// path to user specified registries.conf.d directory, or "" if not used
|
||||||
userConfigDirPath string
|
userConfigDirPath string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
10
vendor/github.com/containers/image/v5/signature/mechanism.go
generated
vendored
10
vendor/github.com/containers/image/v5/signature/mechanism.go
generated
vendored
@ -28,8 +28,8 @@ type SigningMechanism interface {
|
|||||||
Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error)
|
Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error)
|
||||||
// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
|
// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
|
||||||
// along with a short identifier of the key used for signing.
|
// along with a short identifier of the key used for signing.
|
||||||
// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
|
// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys)
|
||||||
// is NOT the same as a "key identity" used in other calls ot this interface, and
|
// is NOT the same as a "key identity" used in other calls to this interface, and
|
||||||
// the values may have no recognizable relationship if the public key is not available.
|
// the values may have no recognizable relationship if the public key is not available.
|
||||||
UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error)
|
UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error)
|
||||||
}
|
}
|
||||||
@ -58,8 +58,8 @@ func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, e
|
|||||||
|
|
||||||
// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
|
// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
|
||||||
// along with a short identifier of the key used for signing.
|
// along with a short identifier of the key used for signing.
|
||||||
// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
|
// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys)
|
||||||
// is NOT the same as a "key identity" used in other calls ot this interface, and
|
// is NOT the same as a "key identity" used in other calls to this interface, and
|
||||||
// the values may have no recognizable relationship if the public key is not available.
|
// the values may have no recognizable relationship if the public key is not available.
|
||||||
func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
|
func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
|
||||||
// This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography.
|
// This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography.
|
||||||
@ -75,7 +75,7 @@ func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents
|
|||||||
// Coverage: An error during reading the body can happen only if
|
// Coverage: An error during reading the body can happen only if
|
||||||
// 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key
|
// 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key
|
||||||
// to decrypt the contents anyway), or
|
// to decrypt the contents anyway), or
|
||||||
// 2) the message is signed AND we give ReadMessage a correspnding public key, which we don’t.
|
// 2) the message is signed AND we give ReadMessage a corresponding public key, which we don’t.
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
4
vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
generated
vendored
4
vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
generated
vendored
@ -167,8 +167,8 @@ func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []b
|
|||||||
|
|
||||||
// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
|
// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
|
||||||
// along with a short identifier of the key used for signing.
|
// along with a short identifier of the key used for signing.
|
||||||
// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
|
// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys)
|
||||||
// is NOT the same as a "key identity" used in other calls ot this interface, and
|
// is NOT the same as a "key identity" used in other calls to this interface, and
|
||||||
// the values may have no recognizable relationship if the public key is not available.
|
// the values may have no recognizable relationship if the public key is not available.
|
||||||
func (m *gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
|
func (m *gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
|
||||||
return gpgUntrustedSignatureContents(untrustedSignature)
|
return gpgUntrustedSignatureContents(untrustedSignature)
|
||||||
|
4
vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
generated
vendored
4
vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
generated
vendored
@ -151,8 +151,8 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [
|
|||||||
|
|
||||||
// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
|
// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
|
||||||
// along with a short identifier of the key used for signing.
|
// along with a short identifier of the key used for signing.
|
||||||
// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
|
// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys)
|
||||||
// is NOT the same as a "key identity" used in other calls ot this interface, and
|
// is NOT the same as a "key identity" used in other calls to this interface, and
|
||||||
// the values may have no recognizable relationship if the public key is not available.
|
// the values may have no recognizable relationship if the public key is not available.
|
||||||
func (m *openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
|
func (m *openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
|
||||||
return gpgUntrustedSignatureContents(untrustedSignature)
|
return gpgUntrustedSignatureContents(untrustedSignature)
|
||||||
|
12
vendor/github.com/containers/image/v5/signature/policy_config.go
generated
vendored
12
vendor/github.com/containers/image/v5/signature/policy_config.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// policy_config.go hanles creation of policy objects, either by parsing JSON
|
// policy_config.go handles creation of policy objects, either by parsing JSON
|
||||||
// or by programs building them programmatically.
|
// or by programs building them programmatically.
|
||||||
|
|
||||||
// The New* constructors are intended to be a stable API. FIXME: after an independent review.
|
// The New* constructors are intended to be a stable API. FIXME: after an independent review.
|
||||||
@ -516,7 +516,7 @@ func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error)
|
|||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// newPRMMatchExact is NewPRMMatchExact, except it resturns the private type.
|
// newPRMMatchExact is NewPRMMatchExact, except it returns the private type.
|
||||||
func newPRMMatchExact() *prmMatchExact {
|
func newPRMMatchExact() *prmMatchExact {
|
||||||
return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}}
|
return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}}
|
||||||
}
|
}
|
||||||
@ -546,7 +546,7 @@ func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it resturns the private type.
|
// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it returns the private type.
|
||||||
func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact {
|
func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact {
|
||||||
return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}}
|
return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}}
|
||||||
}
|
}
|
||||||
@ -576,7 +576,7 @@ func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// newPRMMatchRepository is NewPRMMatchRepository, except it resturns the private type.
|
// newPRMMatchRepository is NewPRMMatchRepository, except it returns the private type.
|
||||||
func newPRMMatchRepository() *prmMatchRepository {
|
func newPRMMatchRepository() *prmMatchRepository {
|
||||||
return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}}
|
return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}}
|
||||||
}
|
}
|
||||||
@ -606,7 +606,7 @@ func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// newPRMExactReference is NewPRMExactReference, except it resturns the private type.
|
// newPRMExactReference is NewPRMExactReference, except it returns the private type.
|
||||||
func newPRMExactReference(dockerReference string) (*prmExactReference, error) {
|
func newPRMExactReference(dockerReference string) (*prmExactReference, error) {
|
||||||
ref, err := reference.ParseNormalizedNamed(dockerReference)
|
ref, err := reference.ParseNormalizedNamed(dockerReference)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -652,7 +652,7 @@ func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// newPRMExactRepository is NewPRMExactRepository, except it resturns the private type.
|
// newPRMExactRepository is NewPRMExactRepository, except it returns the private type.
|
||||||
func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) {
|
func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) {
|
||||||
if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil {
|
if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil {
|
||||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error()))
|
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error()))
|
||||||
|
6
vendor/github.com/containers/image/v5/signature/policy_eval.go
generated
vendored
6
vendor/github.com/containers/image/v5/signature/policy_eval.go
generated
vendored
@ -1,5 +1,5 @@
|
|||||||
// This defines the top-level policy evaluation API.
|
// This defines the top-level policy evaluation API.
|
||||||
// To the extent possible, the interface of the fuctions provided
|
// To the extent possible, the interface of the functions provided
|
||||||
// here is intended to be completely unambiguous, and stable for users
|
// here is intended to be completely unambiguous, and stable for users
|
||||||
// to rely on.
|
// to rely on.
|
||||||
|
|
||||||
@ -47,7 +47,7 @@ type PolicyRequirement interface {
|
|||||||
// - sarUnknown if if this PolicyRequirement does not deal with signatures.
|
// - sarUnknown if if this PolicyRequirement does not deal with signatures.
|
||||||
// NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed.
|
// NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed.
|
||||||
// Returning sarUnknown and a non-nil error value is invalid.
|
// Returning sarUnknown and a non-nil error value is invalid.
|
||||||
// WARNING: This makes the signature contents acceptable for futher processing,
|
// WARNING: This makes the signature contents acceptable for further processing,
|
||||||
// but it does not necessarily mean that the contents of the signature are
|
// but it does not necessarily mean that the contents of the signature are
|
||||||
// consistent with local policy.
|
// consistent with local policy.
|
||||||
// For example:
|
// For example:
|
||||||
@ -166,7 +166,7 @@ func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) Polic
|
|||||||
// verified).
|
// verified).
|
||||||
// NOTE: This may legitimately return an empty list and no error, if the image
|
// NOTE: This may legitimately return an empty list and no error, if the image
|
||||||
// has no signatures or only invalid signatures.
|
// has no signatures or only invalid signatures.
|
||||||
// WARNING: This makes the signature contents acceptable for futher processing,
|
// WARNING: This makes the signature contents acceptable for further processing,
|
||||||
// but it does not necessarily mean that the contents of the signature are
|
// but it does not necessarily mean that the contents of the signature are
|
||||||
// consistent with local policy.
|
// consistent with local policy.
|
||||||
// For example:
|
// For example:
|
||||||
|
2
vendor/github.com/containers/image/v5/signature/policy_reference_match.go
generated
vendored
2
vendor/github.com/containers/image/v5/signature/policy_reference_match.go
generated
vendored
@ -51,7 +51,7 @@ func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.Unparse
|
|||||||
return signature.String() == intended.String()
|
return signature.String() == intended.String()
|
||||||
case reference.Canonical:
|
case reference.Canonical:
|
||||||
// We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest.
|
// We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest.
|
||||||
// Becase UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest,
|
// Because UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest,
|
||||||
// we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms)
|
// we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms)
|
||||||
return signature.Name() == intended.Name()
|
return signature.Name() == intended.Name()
|
||||||
default: // !reference.IsNameOnly(intended)
|
default: // !reference.IsNameOnly(intended)
|
||||||
|
4
vendor/github.com/containers/image/v5/signature/signature.go
generated
vendored
4
vendor/github.com/containers/image/v5/signature/signature.go
generated
vendored
@ -210,7 +210,7 @@ type signatureAcceptanceRules struct {
|
|||||||
validateSignedDockerManifestDigest func(digest.Digest) error
|
validateSignedDockerManifestDigest func(digest.Digest) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components
|
// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principal components
|
||||||
// match expected values, both as specified by rules, and returns it
|
// match expected values, both as specified by rules, and returns it
|
||||||
func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) {
|
func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) {
|
||||||
signed, keyIdentity, err := mech.Verify(unverifiedSignature)
|
signed, keyIdentity, err := mech.Verify(unverifiedSignature)
|
||||||
@ -248,7 +248,7 @@ func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte
|
|||||||
// There is NO REASON to expect the values to be correct, or not intentionally misleading
|
// There is NO REASON to expect the values to be correct, or not intentionally misleading
|
||||||
// (including things like “✅ Verified by $authority”)
|
// (including things like “✅ Verified by $authority”)
|
||||||
func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) {
|
func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) {
|
||||||
// NOTE: This should eventualy do format autodetection.
|
// NOTE: This should eventually do format autodetection.
|
||||||
mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
|
mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
4
vendor/github.com/containers/image/v5/storage/storage_image.go
generated
vendored
4
vendor/github.com/containers/image/v5/storage/storage_image.go
generated
vendored
@ -463,7 +463,7 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
@ -657,7 +657,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
|
|||||||
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
|
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
|
||||||
// or to even check if we had it.
|
// or to even check if we had it.
|
||||||
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
|
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
|
||||||
// that relies on using a blob digest that has never been seeen by the store had better call
|
// that relies on using a blob digest that has never been seen by the store had better call
|
||||||
// TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
|
// TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
|
||||||
// so far we are going to accommodate that (if we should be doing that at all).
|
// so far we are going to accommodate that (if we should be doing that at all).
|
||||||
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
|
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
|
||||||
|
6
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
6
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
@ -170,7 +170,7 @@ type BICReplacementCandidate struct {
|
|||||||
Location BICLocationReference
|
Location BICLocationReference
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies.
|
// BlobInfoCache records data useful for reusing blobs, or substituting equivalent ones, to avoid unnecessary blob copies.
|
||||||
//
|
//
|
||||||
// It records two kinds of data:
|
// It records two kinds of data:
|
||||||
// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
|
// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
|
||||||
@ -193,7 +193,7 @@ type BICReplacementCandidate struct {
|
|||||||
// can be directly reused within a registry, or mounted across registries within a registry server.)
|
// can be directly reused within a registry, or mounted across registries within a registry server.)
|
||||||
//
|
//
|
||||||
// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
|
// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
|
||||||
// users of the cahce should just fall back to copying the blobs the usual way.
|
// users of the cache should just fall back to copying the blobs the usual way.
|
||||||
type BlobInfoCache interface {
|
type BlobInfoCache interface {
|
||||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||||
// May return anyDigest if it is known to be uncompressed.
|
// May return anyDigest if it is known to be uncompressed.
|
||||||
@ -306,7 +306,7 @@ type ImageDestination interface {
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error)
|
TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error)
|
||||||
|
2
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
2
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
@ -6,7 +6,7 @@ const (
|
|||||||
// VersionMajor is for an API incompatible changes
|
// VersionMajor is for an API incompatible changes
|
||||||
VersionMajor = 5
|
VersionMajor = 5
|
||||||
// VersionMinor is for functionality in a backwards-compatible manner
|
// VersionMinor is for functionality in a backwards-compatible manner
|
||||||
VersionMinor = 6
|
VersionMinor = 7
|
||||||
// VersionPatch is for backwards-compatible bug fixes
|
// VersionPatch is for backwards-compatible bug fixes
|
||||||
VersionPatch = 0
|
VersionPatch = 0
|
||||||
|
|
||||||
|
11
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
11
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
@ -17,14 +17,15 @@ env:
|
|||||||
####
|
####
|
||||||
#### Cache-image names to test with (double-quotes around names are critical)
|
#### Cache-image names to test with (double-quotes around names are critical)
|
||||||
###
|
###
|
||||||
FEDORA_NAME: "fedora-32"
|
FEDORA_NAME: "fedora"
|
||||||
PRIOR_FEDORA_NAME: "fedora-31"
|
PRIOR_FEDORA_NAME: "prior-fedora"
|
||||||
UBUNTU_NAME: "ubuntu-20"
|
UBUNTU_NAME: "ubuntu"
|
||||||
PRIOR_UBUNTU_NAME: "ubuntu-19"
|
PRIOR_UBUNTU_NAME: "prior-ubuntu"
|
||||||
|
|
||||||
# GCE project where images live
|
# GCE project where images live
|
||||||
IMAGE_PROJECT: "libpod-218412"
|
IMAGE_PROJECT: "libpod-218412"
|
||||||
_BUILT_IMAGE_SUFFIX: "libpod-6508632441356288"
|
# VM Image built in containers/automation_images
|
||||||
|
_BUILT_IMAGE_SUFFIX: "c4948709391728640"
|
||||||
FEDORA_CACHE_IMAGE_NAME: "${FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}"
|
FEDORA_CACHE_IMAGE_NAME: "${FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}"
|
||||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "${PRIOR_FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}"
|
PRIOR_FEDORA_CACHE_IMAGE_NAME: "${PRIOR_FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}"
|
||||||
UBUNTU_CACHE_IMAGE_NAME: "${UBUNTU_NAME}-${_BUILT_IMAGE_SUFFIX}"
|
UBUNTU_CACHE_IMAGE_NAME: "${UBUNTU_NAME}-${_BUILT_IMAGE_SUFFIX}"
|
||||||
|
1
vendor/github.com/containers/storage/.gitignore
generated
vendored
1
vendor/github.com/containers/storage/.gitignore
generated
vendored
@ -9,6 +9,7 @@
|
|||||||
*.test
|
*.test
|
||||||
.*.swp
|
.*.swp
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
.idea*
|
||||||
# a .bashrc may be added to customize the build environment
|
# a .bashrc may be added to customize the build environment
|
||||||
.bashrc
|
.bashrc
|
||||||
.gopath/
|
.gopath/
|
||||||
|
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@ -1 +1 @@
|
|||||||
1.23.5
|
1.23.7
|
||||||
|
2
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
@ -422,7 +422,7 @@ func subvolLimitQgroup(path string, size uint64) error {
|
|||||||
|
|
||||||
// subvolQgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path
|
// subvolQgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path
|
||||||
// with search key of BTRFS_QGROUP_STATUS_KEY.
|
// with search key of BTRFS_QGROUP_STATUS_KEY.
|
||||||
// In case qgroup is enabled, the retuned key type will match BTRFS_QGROUP_STATUS_KEY.
|
// In case qgroup is enabled, the returned key type will match BTRFS_QGROUP_STATUS_KEY.
|
||||||
// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035
|
// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035
|
||||||
func subvolQgroupStatus(path string) error {
|
func subvolQgroupStatus(path string) error {
|
||||||
dir, err := openDir(path)
|
dir, err := openDir(path)
|
||||||
|
2
vendor/github.com/containers/storage/drivers/chown_unix.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/chown_unix.go
generated
vendored
@ -54,7 +54,7 @@ func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make the change.
|
// Make the change.
|
||||||
if err := syscall.Lchown(path, uid, gid); err != nil {
|
if err := os.Lchown(path, uid, gid); err != nil {
|
||||||
return fmt.Errorf("%s: chown(%q): %v", os.Args[0], path, err)
|
return fmt.Errorf("%s: chown(%q): %v", os.Args[0], path, err)
|
||||||
}
|
}
|
||||||
// Restore the SUID and SGID bits if they were originally set.
|
// Restore the SUID and SGID bits if they were originally set.
|
||||||
|
96
vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
generated
vendored
96
vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
generated
vendored
@ -1213,7 +1213,11 @@ func (devices *DeviceSet) growFS(info *devInfo) error {
|
|||||||
return errors.Wrapf(err, "Failed to mount; dmesg: %s", string(dmesg.Dmesg(256)))
|
return errors.Wrapf(err, "Failed to mount; dmesg: %s", string(dmesg.Dmesg(256)))
|
||||||
}
|
}
|
||||||
|
|
||||||
defer unix.Unmount(fsMountPoint, unix.MNT_DETACH)
|
defer func() {
|
||||||
|
if err := mount.Unmount(fsMountPoint); err != nil {
|
||||||
|
logrus.Warnf("devmapper.growFS cleanup error: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
switch devices.BaseDeviceFilesystem {
|
switch devices.BaseDeviceFilesystem {
|
||||||
case ext4:
|
case ext4:
|
||||||
@ -2257,6 +2261,38 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (devices *DeviceSet) unmountAndDeactivateAll(dir string) {
|
||||||
|
files, err := ioutil.ReadDir(dir)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Warnf("devmapper: unmountAndDeactivate: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range files {
|
||||||
|
if !d.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
name := d.Name()
|
||||||
|
fullname := path.Join(dir, name)
|
||||||
|
|
||||||
|
// We use MNT_DETACH here in case it is still busy in some running
|
||||||
|
// container. This means it'll go away from the global scope directly,
|
||||||
|
// and the device will be released when that container dies.
|
||||||
|
if err := mount.Unmount(fullname); err != nil {
|
||||||
|
logrus.Warnf("devmapper.Shutdown error: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if devInfo, err := devices.lookupDevice(name); err != nil {
|
||||||
|
logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", name, err)
|
||||||
|
} else {
|
||||||
|
if err := devices.deactivateDevice(devInfo); err != nil {
|
||||||
|
logrus.Debugf("devmapper: Shutdown deactivate %s, error: %s", devInfo.Hash, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Shutdown shuts down the device by unmounting the root.
|
// Shutdown shuts down the device by unmounting the root.
|
||||||
func (devices *DeviceSet) Shutdown(home string) error {
|
func (devices *DeviceSet) Shutdown(home string) error {
|
||||||
logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix)
|
logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix)
|
||||||
@ -2278,45 +2314,7 @@ func (devices *DeviceSet) Shutdown(home string) error {
|
|||||||
// will be killed and we will not get a chance to save deviceset
|
// will be killed and we will not get a chance to save deviceset
|
||||||
// metadata. Hence save this early before trying to deactivate devices.
|
// metadata. Hence save this early before trying to deactivate devices.
|
||||||
devices.saveDeviceSetMetaData()
|
devices.saveDeviceSetMetaData()
|
||||||
|
devices.unmountAndDeactivateAll(path.Join(home, "mnt"))
|
||||||
// ignore the error since it's just a best effort to not try to unmount something that's mounted
|
|
||||||
mounts, _ := mount.GetMounts()
|
|
||||||
mounted := make(map[string]bool, len(mounts))
|
|
||||||
for _, mnt := range mounts {
|
|
||||||
mounted[mnt.Mountpoint] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !info.IsDir() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if mounted[p] {
|
|
||||||
// We use MNT_DETACH here in case it is still busy in some running
|
|
||||||
// container. This means it'll go away from the global scope directly,
|
|
||||||
// and the device will be released when that container dies.
|
|
||||||
if err := unix.Unmount(p, unix.MNT_DETACH); err != nil {
|
|
||||||
logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil {
|
|
||||||
logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err)
|
|
||||||
} else {
|
|
||||||
if err := devices.deactivateDevice(devInfo); err != nil {
|
|
||||||
logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}); err != nil && !os.IsNotExist(err) {
|
|
||||||
devices.Unlock()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
devices.Unlock()
|
devices.Unlock()
|
||||||
|
|
||||||
info, _ := devices.lookupDeviceWithLock("")
|
info, _ := devices.lookupDeviceWithLock("")
|
||||||
@ -2420,7 +2418,9 @@ func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.Mo
|
|||||||
|
|
||||||
if fstype == xfs && devices.xfsNospaceRetries != "" {
|
if fstype == xfs && devices.xfsNospaceRetries != "" {
|
||||||
if err := devices.xfsSetNospaceRetries(info); err != nil {
|
if err := devices.xfsSetNospaceRetries(info); err != nil {
|
||||||
unix.Unmount(path, unix.MNT_DETACH)
|
if err := mount.Unmount(path); err != nil {
|
||||||
|
logrus.Warnf("devmapper.MountDevice cleanup error: %v", err)
|
||||||
|
}
|
||||||
devices.deactivateDevice(info)
|
devices.deactivateDevice(info)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -2446,11 +2446,23 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
|
|||||||
defer devices.Unlock()
|
defer devices.Unlock()
|
||||||
|
|
||||||
logrus.Debugf("devmapper: Unmount(%s)", mountPath)
|
logrus.Debugf("devmapper: Unmount(%s)", mountPath)
|
||||||
if err := unix.Unmount(mountPath, unix.MNT_DETACH); err != nil {
|
if err := mount.Unmount(mountPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
logrus.Debug("devmapper: Unmount done")
|
logrus.Debug("devmapper: Unmount done")
|
||||||
|
|
||||||
|
// Remove the mountpoint here. Removing the mountpoint (in newer kernels)
|
||||||
|
// will cause all other instances of this mount in other mount namespaces
|
||||||
|
// to be killed (this is an anti-DoS measure that is necessary for things
|
||||||
|
// like devicemapper). This is necessary to avoid cases where a libdm mount
|
||||||
|
// that is present in another namespace will cause subsequent RemoveDevice
|
||||||
|
// operations to fail. We ignore any errors here because this may fail on
|
||||||
|
// older kernels which don't have
|
||||||
|
// torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied.
|
||||||
|
if err := os.Remove(mountPath); err != nil {
|
||||||
|
logrus.Debugf("devmapper: error doing a remove on unmounted device %s: %v", mountPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
return devices.deactivateDevice(info)
|
return devices.deactivateDevice(info)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
32
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
32
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
@ -14,9 +14,9 @@ import (
|
|||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/pkg/locker"
|
"github.com/containers/storage/pkg/locker"
|
||||||
"github.com/containers/storage/pkg/mount"
|
"github.com/containers/storage/pkg/mount"
|
||||||
"github.com/containers/storage/pkg/system"
|
|
||||||
units "github.com/docker/go-units"
|
units "github.com/docker/go-units"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -116,11 +116,13 @@ func (d *Driver) Metadata(id string) (map[string]string, error) {
|
|||||||
func (d *Driver) Cleanup() error {
|
func (d *Driver) Cleanup() error {
|
||||||
err := d.DeviceSet.Shutdown(d.home)
|
err := d.DeviceSet.Shutdown(d.home)
|
||||||
|
|
||||||
if err2 := mount.Unmount(d.home); err == nil {
|
umountErr := mount.Unmount(d.home)
|
||||||
err = err2
|
// in case we have two errors, prefer the one from Shutdown()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return umountErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
||||||
@ -148,7 +150,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove removes a device with a given id, unmounts the filesystem.
|
// Remove removes a device with a given id, unmounts the filesystem, and removes the mount point.
|
||||||
func (d *Driver) Remove(id string) error {
|
func (d *Driver) Remove(id string) error {
|
||||||
d.locker.Lock(id)
|
d.locker.Lock(id)
|
||||||
defer d.locker.Unlock(id)
|
defer d.locker.Unlock(id)
|
||||||
@ -163,7 +165,21 @@ func (d *Driver) Remove(id string) error {
|
|||||||
if err := d.DeviceSet.DeleteDevice(id, false); err != nil {
|
if err := d.DeviceSet.DeleteDevice(id, false); err != nil {
|
||||||
return fmt.Errorf("failed to remove device %s: %v", id, err)
|
return fmt.Errorf("failed to remove device %s: %v", id, err)
|
||||||
}
|
}
|
||||||
return system.EnsureRemoveAll(path.Join(d.home, "mnt", id))
|
|
||||||
|
// Most probably the mount point is already removed on Put()
|
||||||
|
// (see DeviceSet.UnmountDevice()), but just in case it was not
|
||||||
|
// let's try to remove it here as well, ignoring errors as
|
||||||
|
// an older kernel can return EBUSY if e.g. the mount was leaked
|
||||||
|
// to other mount namespaces. A failure to remove the container's
|
||||||
|
// mount point is not important and should not be treated
|
||||||
|
// as a failure to remove the container.
|
||||||
|
mp := path.Join(d.home, "mnt", id)
|
||||||
|
err := unix.Rmdir(mp)
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
logrus.WithField("storage-driver", "devicemapper").Warnf("unable to remove mount point %q: %s", mp, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get mounts a device with given id into the root filesystem
|
// Get mounts a device with given id into the root filesystem
|
||||||
@ -226,10 +242,12 @@ func (d *Driver) Put(id string) error {
|
|||||||
if count := d.ctr.Decrement(mp); count > 0 {
|
if count := d.ctr.Decrement(mp); count > 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
err := d.DeviceSet.UnmountDevice(id, mp)
|
err := d.DeviceSet.UnmountDevice(id, mp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err)
|
logrus.Errorf("devmapper: Error unmounting device %s: %v", id, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
10
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
10
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@ -75,7 +75,7 @@ const (
|
|||||||
maxDepth = 128
|
maxDepth = 128
|
||||||
|
|
||||||
// idLength represents the number of random characters
|
// idLength represents the number of random characters
|
||||||
// which can be used to create the unique link identifer
|
// which can be used to create the unique link identifier
|
||||||
// for every layer. If this value is too long then the
|
// for every layer. If this value is too long then the
|
||||||
// page size limit for the mount command may be exceeded.
|
// page size limit for the mount command may be exceeded.
|
||||||
// The idLength should be selected such that following equation
|
// The idLength should be selected such that following equation
|
||||||
@ -219,7 +219,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||||||
return nil, errors.Wrap(err, "error recording metacopy-being-used status")
|
return nil, errors.Wrap(err, "error recording metacopy-being-used status")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logrus.Warnf("overlay test mount did not indicate whether or not metacopy is being used: %v", err)
|
logrus.Infof("overlay test mount did not indicate whether or not metacopy is being used: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -280,7 +280,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
|||||||
trimkey = strings.TrimPrefix(trimkey, ".")
|
trimkey = strings.TrimPrefix(trimkey, ".")
|
||||||
switch trimkey {
|
switch trimkey {
|
||||||
case "override_kernel_check":
|
case "override_kernel_check":
|
||||||
logrus.Warnf("overlay: override_kernel_check option was specified, but is no longer necessary")
|
logrus.Debugf("overlay: override_kernel_check option was specified, but is no longer necessary")
|
||||||
case "mountopt":
|
case "mountopt":
|
||||||
o.mountOptions = val
|
o.mountOptions = val
|
||||||
case "size":
|
case "size":
|
||||||
@ -444,14 +444,14 @@ func (d *Driver) useNaiveDiff() bool {
|
|||||||
logrus.Debugf("cached value indicated that native-diff is usable")
|
logrus.Debugf("cached value indicated that native-diff is usable")
|
||||||
} else {
|
} else {
|
||||||
logrus.Debugf("cached value indicated that native-diff is not being used")
|
logrus.Debugf("cached value indicated that native-diff is not being used")
|
||||||
logrus.Warn(nativeDiffCacheText)
|
logrus.Info(nativeDiffCacheText)
|
||||||
}
|
}
|
||||||
useNaiveDiffOnly = !nativeDiffCacheResult
|
useNaiveDiffOnly = !nativeDiffCacheResult
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := doesSupportNativeDiff(d.home, d.options.mountOptions); err != nil {
|
if err := doesSupportNativeDiff(d.home, d.options.mountOptions); err != nil {
|
||||||
nativeDiffCacheText = fmt.Sprintf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err)
|
nativeDiffCacheText = fmt.Sprintf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err)
|
||||||
logrus.Warn(nativeDiffCacheText)
|
logrus.Info(nativeDiffCacheText)
|
||||||
useNaiveDiffOnly = true
|
useNaiveDiffOnly = true
|
||||||
}
|
}
|
||||||
cachedFeatureRecord(d.runhome, feature, !useNaiveDiffOnly, nativeDiffCacheText)
|
cachedFeatureRecord(d.runhome, feature, !useNaiveDiffOnly, nativeDiffCacheText)
|
||||||
|
2
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
@ -160,7 +160,7 @@ func lookupZfsDataset(rootdir string) (string, error) {
|
|||||||
continue // may fail on fuse file systems
|
continue // may fail on fuse file systems
|
||||||
}
|
}
|
||||||
|
|
||||||
if stat.Dev == wantedDev && m.Fstype == "zfs" {
|
if stat.Dev == wantedDev && m.FSType == "zfs" {
|
||||||
return m.Source, nil
|
return m.Source, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
10
vendor/github.com/containers/storage/go.mod
generated
vendored
10
vendor/github.com/containers/storage/go.mod
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
go 1.15
|
go 1.14
|
||||||
|
|
||||||
module github.com/containers/storage
|
module github.com/containers/storage
|
||||||
|
|
||||||
@ -8,23 +8,23 @@ require (
|
|||||||
github.com/Microsoft/hcsshim v0.8.9
|
github.com/Microsoft/hcsshim v0.8.9
|
||||||
github.com/docker/go-units v0.4.0
|
github.com/docker/go-units v0.4.0
|
||||||
github.com/hashicorp/go-multierror v1.1.0
|
github.com/hashicorp/go-multierror v1.1.0
|
||||||
github.com/klauspost/compress v1.11.0
|
github.com/klauspost/compress v1.11.1
|
||||||
github.com/klauspost/pgzip v1.2.5
|
github.com/klauspost/pgzip v1.2.5
|
||||||
github.com/mattn/go-shellwords v1.0.10
|
github.com/mattn/go-shellwords v1.0.10
|
||||||
github.com/mistifyio/go-zfs v2.1.1+incompatible
|
github.com/mistifyio/go-zfs v2.1.1+incompatible
|
||||||
github.com/moby/sys/mountinfo v0.1.3
|
github.com/moby/sys/mountinfo v0.4.0
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
github.com/opencontainers/runc v1.0.0-rc91
|
github.com/opencontainers/runc v1.0.0-rc91
|
||||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
|
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
|
||||||
github.com/opencontainers/selinux v1.6.0
|
github.com/opencontainers/selinux v1.6.0
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7
|
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7
|
||||||
github.com/sirupsen/logrus v1.6.0
|
github.com/sirupsen/logrus v1.7.0
|
||||||
github.com/stretchr/testify v1.6.1
|
github.com/stretchr/testify v1.6.1
|
||||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
|
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
|
||||||
github.com/tchap/go-patricia v2.3.0+incompatible
|
github.com/tchap/go-patricia v2.3.0+incompatible
|
||||||
github.com/vbatts/tar-split v0.11.1
|
github.com/vbatts/tar-split v0.11.1
|
||||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9
|
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9
|
||||||
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775
|
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009
|
||||||
gotest.tools v2.2.0+incompatible
|
gotest.tools v2.2.0+incompatible
|
||||||
)
|
)
|
||||||
|
11
vendor/github.com/containers/storage/go.sum
generated
vendored
11
vendor/github.com/containers/storage/go.sum
generated
vendored
@ -62,8 +62,8 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
|
|||||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg=
|
github.com/klauspost/compress v1.11.1 h1:bPb7nMRdOZYDrpPMTA3EInUQrdgoBinqUuSwlGdKDdE=
|
||||||
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.11.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
@ -80,6 +80,8 @@ github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJd
|
|||||||
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
|
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
|
||||||
github.com/moby/sys/mountinfo v0.1.3 h1:KIrhRO14+AkwKvG/g2yIpNMOUVZ02xNhOw8KY1WsLOI=
|
github.com/moby/sys/mountinfo v0.1.3 h1:KIrhRO14+AkwKvG/g2yIpNMOUVZ02xNhOw8KY1WsLOI=
|
||||||
github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
|
github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
|
||||||
|
github.com/moby/sys/mountinfo v0.4.0 h1:1KInV3Huv18akCu58V7lzNlt+jFmqlu1EaErnEHE/VM=
|
||||||
|
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||||
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 h1:7InQ7/zrOh6SlFjaXFubv0xX0HsuC9qJsdqm7bNQpYM=
|
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 h1:7InQ7/zrOh6SlFjaXFubv0xX0HsuC9qJsdqm7bNQpYM=
|
||||||
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0=
|
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0=
|
||||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||||
@ -113,6 +115,8 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB
|
|||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||||
|
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||||
|
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
@ -163,12 +167,15 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk=
|
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk=
|
||||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775 h1:TC0v2RSO1u2kn1ZugjrFXkRZAEaqMN/RW+OTZkBzmLE=
|
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775 h1:TC0v2RSO1u2kn1ZugjrFXkRZAEaqMN/RW+OTZkBzmLE=
|
||||||
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009 h1:W0lCpv29Hv0UaM1LXb9QlBHLNP8UFfcKjblhVCWftOM=
|
||||||
|
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
4
vendor/github.com/containers/storage/layers.go
generated
vendored
4
vendor/github.com/containers/storage/layers.go
generated
vendored
@ -907,11 +907,11 @@ func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
|
|||||||
for dir := filepath.Dir(layer.MountPoint); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) {
|
for dir := filepath.Dir(layer.MountPoint); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) {
|
||||||
st, err := system.Stat(dir)
|
st, err := system.Stat(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrapf(err, "error reading ownership of directory %q", dir)
|
return nil, nil, errors.Wrap(err, "read directory ownership")
|
||||||
}
|
}
|
||||||
lst, err := system.Lstat(dir)
|
lst, err := system.Lstat(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrapf(err, "error reading ownership of directory-in-case-it's-a-symlink %q", dir)
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
fsuid := int(st.UID())
|
fsuid := int(st.UID())
|
||||||
fsgid := int(st.GID())
|
fsgid := int(st.GID())
|
||||||
|
48
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
48
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
@ -694,29 +694,6 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var errors []string
|
|
||||||
for key, value := range hdr.Xattrs {
|
|
||||||
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
|
|
||||||
if err == syscall.ENOTSUP || (err == syscall.EPERM && inUserns) {
|
|
||||||
// We ignore errors here because not all graphdrivers support
|
|
||||||
// xattrs *cough* old versions of AUFS *cough*. However only
|
|
||||||
// ENOTSUP should be emitted in that case, otherwise we still
|
|
||||||
// bail. We also ignore EPERM errors if we are running in a
|
|
||||||
// user namespace.
|
|
||||||
errors = append(errors, err.Error())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(errors) > 0 {
|
|
||||||
logrus.WithFields(logrus.Fields{
|
|
||||||
"errors": errors,
|
|
||||||
}).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
|
|
||||||
}
|
|
||||||
|
|
||||||
// There is no LChmod, so ignore mode for symlink. Also, this
|
// There is no LChmod, so ignore mode for symlink. Also, this
|
||||||
// must happen after chown, as that can modify the file mode
|
// must happen after chown, as that can modify the file mode
|
||||||
if err := handleLChmod(hdr, path, hdrInfo); err != nil {
|
if err := handleLChmod(hdr, path, hdrInfo); err != nil {
|
||||||
@ -746,6 +723,30 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var errors []string
|
||||||
|
for key, value := range hdr.Xattrs {
|
||||||
|
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
|
||||||
|
if err == syscall.ENOTSUP || (err == syscall.EPERM && inUserns) {
|
||||||
|
// We ignore errors here because not all graphdrivers support
|
||||||
|
// xattrs *cough* old versions of AUFS *cough*. However only
|
||||||
|
// ENOTSUP should be emitted in that case, otherwise we still
|
||||||
|
// bail. We also ignore EPERM errors if we are running in a
|
||||||
|
// user namespace.
|
||||||
|
errors = append(errors, err.Error())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(errors) > 0 {
|
||||||
|
logrus.WithFields(logrus.Fields{
|
||||||
|
"errors": errors,
|
||||||
|
}).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1251,6 +1252,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
|||||||
GIDMaps: archiver.UntarIDMappings.GIDs(),
|
GIDMaps: archiver.UntarIDMappings.GIDs(),
|
||||||
ChownOpts: archiver.ChownOpts,
|
ChownOpts: archiver.ChownOpts,
|
||||||
InUserNS: rsystem.RunningInUserNS(),
|
InUserNS: rsystem.RunningInUserNS(),
|
||||||
|
NoOverwriteDirNonDir: true,
|
||||||
}
|
}
|
||||||
err = archiver.Untar(r, filepath.Dir(dst), options)
|
err = archiver.Untar(r, filepath.Dir(dst), options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
2
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go
generated
vendored
@ -12,7 +12,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// DevmapperLogger defines methods required to register as a callback for
|
// DevmapperLogger defines methods required to register as a callback for
|
||||||
// logging events recieved from devicemapper. Note that devicemapper will send
|
// logging events received from devicemapper. Note that devicemapper will send
|
||||||
// *all* logs regardless to callbacks (including debug logs) so it's
|
// *all* logs regardless to callbacks (including debug logs) so it's
|
||||||
// recommended to not spam the console with the outputs.
|
// recommended to not spam the console with the outputs.
|
||||||
type DevmapperLogger interface {
|
type DevmapperLogger interface {
|
||||||
|
2
vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
generated
vendored
@ -13,7 +13,7 @@ import (
|
|||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PatternMatcher allows checking paths agaist a list of patterns
|
// PatternMatcher allows checking paths against a list of patterns
|
||||||
type PatternMatcher struct {
|
type PatternMatcher struct {
|
||||||
patterns []*Pattern
|
patterns []*Pattern
|
||||||
exclusions bool
|
exclusions bool
|
||||||
|
2
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
@ -291,7 +291,7 @@ func parseSubidFile(path, username string) (ranges, error) {
|
|||||||
|
|
||||||
func checkChownErr(err error, name string, uid, gid int) error {
|
func checkChownErr(err error, name string, uid, gid int) error {
|
||||||
if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL {
|
if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL {
|
||||||
return errors.Wrapf(err, "there might not be enough IDs available in the namespace (requested %d:%d for %s)", uid, gid, name)
|
return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid", uid, gid, name)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
14
vendor/github.com/containers/storage/pkg/idtools/parser.go
generated
vendored
14
vendor/github.com/containers/storage/pkg/idtools/parser.go
generated
vendored
@ -8,13 +8,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
func nonDigitsToWhitespace(r rune) rune {
|
|
||||||
if !strings.ContainsRune("0123456789", r) {
|
|
||||||
return ' '
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseTriple(spec []string) (container, host, size uint32, err error) {
|
func parseTriple(spec []string) (container, host, size uint32, err error) {
|
||||||
cid, err := strconv.ParseUint(spec[0], 10, 32)
|
cid, err := strconv.ParseUint(spec[0], 10, 32)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -33,9 +26,12 @@ func parseTriple(spec []string) (container, host, size uint32, err error) {
|
|||||||
|
|
||||||
// ParseIDMap parses idmap triples from string.
|
// ParseIDMap parses idmap triples from string.
|
||||||
func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) {
|
func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) {
|
||||||
stdErr := fmt.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting)
|
stdErr := fmt.Errorf("error initializing ID mappings: %s setting is malformed expected [\"uint32:uint32:uint32\"]: %q", mapSetting, mapSpec)
|
||||||
for _, idMapSpec := range mapSpec {
|
for _, idMapSpec := range mapSpec {
|
||||||
idSpec := strings.Fields(strings.Map(nonDigitsToWhitespace, idMapSpec))
|
if idMapSpec == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
idSpec := strings.Split(idMapSpec, ":")
|
||||||
if len(idSpec)%3 != 0 {
|
if len(idSpec)%3 != 0 {
|
||||||
return nil, stdErr
|
return nil, stdErr
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/containers/storage/pkg/mount/mount.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/mount/mount.go
generated
vendored
@ -90,7 +90,7 @@ func RecursiveUnmount(target string) error {
|
|||||||
if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 {
|
if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 {
|
||||||
return err
|
return err
|
||||||
// Ignore errors for submounts and continue trying to unmount others
|
// Ignore errors for submounts and continue trying to unmount others
|
||||||
// The final unmount should fail if there ane any submounts remaining
|
// The final unmount should fail if there are any submounts remaining
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
12
vendor/github.com/containers/storage/pkg/mount/mountinfo.go
generated
vendored
12
vendor/github.com/containers/storage/pkg/mount/mountinfo.go
generated
vendored
@ -1,21 +1,13 @@
|
|||||||
package mount
|
package mount
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/containers/storage/pkg/fileutils"
|
|
||||||
"github.com/moby/sys/mountinfo"
|
"github.com/moby/sys/mountinfo"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Info = mountinfo.Info
|
type Info = mountinfo.Info
|
||||||
|
|
||||||
|
var Mounted = mountinfo.Mounted
|
||||||
|
|
||||||
func GetMounts() ([]*Info, error) {
|
func GetMounts() ([]*Info, error) {
|
||||||
return mountinfo.GetMounts(nil)
|
return mountinfo.GetMounts(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mounted determines if a specified mountpoint has been mounted.
|
|
||||||
func Mounted(mountpoint string) (bool, error) {
|
|
||||||
mountpoint, err := fileutils.ReadSymlinkedPath(mountpoint)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return mountinfo.Mounted(mountpoint)
|
|
||||||
}
|
|
||||||
|
2
vendor/github.com/containers/storage/storage.conf
generated
vendored
2
vendor/github.com/containers/storage/storage.conf
generated
vendored
@ -47,7 +47,7 @@ additionalimagestores = [
|
|||||||
# remap-group = "containers"
|
# remap-group = "containers"
|
||||||
|
|
||||||
# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID
|
# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID
|
||||||
# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partioned
|
# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned
|
||||||
# to containers configured to create automatically a user namespace. Containers
|
# to containers configured to create automatically a user namespace. Containers
|
||||||
# configured to automatically create a user namespace can still overlap with containers
|
# configured to automatically create a user namespace can still overlap with containers
|
||||||
# having an explicit mapping set.
|
# having an explicit mapping set.
|
||||||
|
39
vendor/github.com/containers/storage/store.go
generated
vendored
39
vendor/github.com/containers/storage/store.go
generated
vendored
@ -613,14 +613,14 @@ func GetStore(options StoreOptions) (Store, error) {
|
|||||||
if options.GraphRoot != "" {
|
if options.GraphRoot != "" {
|
||||||
dir, err := filepath.Abs(options.GraphRoot)
|
dir, err := filepath.Abs(options.GraphRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error deriving an absolute path from %q", options.GraphRoot)
|
return nil, err
|
||||||
}
|
}
|
||||||
options.GraphRoot = dir
|
options.GraphRoot = dir
|
||||||
}
|
}
|
||||||
if options.RunRoot != "" {
|
if options.RunRoot != "" {
|
||||||
dir, err := filepath.Abs(options.RunRoot)
|
dir, err := filepath.Abs(options.RunRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error deriving an absolute path from %q", options.RunRoot)
|
return nil, err
|
||||||
}
|
}
|
||||||
options.RunRoot = dir
|
options.RunRoot = dir
|
||||||
}
|
}
|
||||||
@ -2677,21 +2677,16 @@ func (s *store) MountImage(id string, mountOpts []string, mountLabel string) (st
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) Mount(id, mountLabel string) (string, error) {
|
func (s *store) Mount(id, mountLabel string) (string, error) {
|
||||||
container, err := s.Container(id)
|
|
||||||
var (
|
|
||||||
uidMap, gidMap []idtools.IDMap
|
|
||||||
mountOpts []string
|
|
||||||
)
|
|
||||||
if err == nil {
|
|
||||||
uidMap, gidMap = container.UIDMap, container.GIDMap
|
|
||||||
id = container.LayerID
|
|
||||||
mountOpts = container.MountOpts()
|
|
||||||
}
|
|
||||||
options := drivers.MountOpts{
|
options := drivers.MountOpts{
|
||||||
MountLabel: mountLabel,
|
MountLabel: mountLabel,
|
||||||
UidMaps: uidMap,
|
}
|
||||||
GidMaps: gidMap,
|
// check if `id` is a container, then grab the LayerID, uidmap and gidmap, along with
|
||||||
Options: mountOpts,
|
// otherwise we assume the id is a LayerID and attempt to mount it.
|
||||||
|
if container, err := s.Container(id); err == nil {
|
||||||
|
id = container.LayerID
|
||||||
|
options.UidMaps = container.UIDMap
|
||||||
|
options.GidMaps = container.GIDMap
|
||||||
|
options.Options = container.MountOpts()
|
||||||
}
|
}
|
||||||
return s.mount(id, options)
|
return s.mount(id, options)
|
||||||
}
|
}
|
||||||
@ -3460,7 +3455,10 @@ func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// defaultConfigFile path to the system wide storage.conf file
|
// defaultConfigFile path to the system wide storage.conf file
|
||||||
var defaultConfigFile = "/etc/containers/storage.conf"
|
var (
|
||||||
|
defaultConfigFile = "/etc/containers/storage.conf"
|
||||||
|
defaultConfigFileSet = false
|
||||||
|
)
|
||||||
|
|
||||||
// AutoUserNsMinSize is the minimum size for automatically created user namespaces
|
// AutoUserNsMinSize is the minimum size for automatically created user namespaces
|
||||||
const AutoUserNsMinSize = 1024
|
const AutoUserNsMinSize = 1024
|
||||||
@ -3475,11 +3473,16 @@ const RootAutoUserNsUser = "containers"
|
|||||||
// SetDefaultConfigFilePath sets the default configuration to the specified path
|
// SetDefaultConfigFilePath sets the default configuration to the specified path
|
||||||
func SetDefaultConfigFilePath(path string) {
|
func SetDefaultConfigFilePath(path string) {
|
||||||
defaultConfigFile = path
|
defaultConfigFile = path
|
||||||
|
defaultConfigFileSet = true
|
||||||
|
reloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultConfigFile returns the path to the storage config file used
|
// DefaultConfigFile returns the path to the storage config file used
|
||||||
func DefaultConfigFile(rootless bool) (string, error) {
|
func DefaultConfigFile(rootless bool) (string, error) {
|
||||||
if rootless {
|
if defaultConfigFileSet || !rootless {
|
||||||
|
return defaultConfigFile, nil
|
||||||
|
}
|
||||||
|
|
||||||
if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" {
|
if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" {
|
||||||
return filepath.Join(configHome, "containers/storage.conf"), nil
|
return filepath.Join(configHome, "containers/storage.conf"), nil
|
||||||
}
|
}
|
||||||
@ -3489,8 +3492,6 @@ func DefaultConfigFile(rootless bool) (string, error) {
|
|||||||
}
|
}
|
||||||
return filepath.Join(home, ".config/containers/storage.conf"), nil
|
return filepath.Join(home, ".config/containers/storage.conf"), nil
|
||||||
}
|
}
|
||||||
return defaultConfigFile, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TOML-friendly explicit tables used for conversions.
|
// TOML-friendly explicit tables used for conversions.
|
||||||
type tomlConfig struct {
|
type tomlConfig struct {
|
||||||
|
46
vendor/github.com/containers/storage/utils.go
generated
vendored
46
vendor/github.com/containers/storage/utils.go
generated
vendored
@ -76,7 +76,7 @@ func GetRootlessRuntimeDir(rootlessUID int) (string, error) {
|
|||||||
}
|
}
|
||||||
path = filepath.Join(path, "containers")
|
path = filepath.Join(path, "containers")
|
||||||
if err := os.MkdirAll(path, 0700); err != nil {
|
if err := os.MkdirAll(path, 0700); err != nil {
|
||||||
return "", errors.Wrapf(err, "unable to make rootless runtime dir %s", path)
|
return "", errors.Wrapf(err, "unable to make rootless runtime")
|
||||||
}
|
}
|
||||||
return path, nil
|
return path, nil
|
||||||
}
|
}
|
||||||
@ -154,7 +154,7 @@ func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, e
|
|||||||
}
|
}
|
||||||
resolvedHomeDir, err := filepath.EvalSymlinks(homeDir)
|
resolvedHomeDir, err := filepath.EvalSymlinks(homeDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrapf(err, "cannot resolve %s", homeDir)
|
return "", err
|
||||||
}
|
}
|
||||||
return filepath.Join(resolvedHomeDir, "rundir"), nil
|
return filepath.Join(resolvedHomeDir, "rundir"), nil
|
||||||
}
|
}
|
||||||
@ -190,7 +190,7 @@ func getRootlessDirInfo(rootlessUID int) (string, string, error) {
|
|||||||
// on CoreOS /home is a symlink to /var/home, so resolve any symlink.
|
// on CoreOS /home is a symlink to /var/home, so resolve any symlink.
|
||||||
resolvedHome, err := filepath.EvalSymlinks(home)
|
resolvedHome, err := filepath.EvalSymlinks(home)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", errors.Wrapf(err, "cannot resolve %s", home)
|
return "", "", err
|
||||||
}
|
}
|
||||||
dataDir = filepath.Join(resolvedHome, ".local", "share")
|
dataDir = filepath.Join(resolvedHome, ".local", "share")
|
||||||
|
|
||||||
@ -206,11 +206,10 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti
|
|||||||
return opts, err
|
return opts, err
|
||||||
}
|
}
|
||||||
opts.RunRoot = rootlessRuntime
|
opts.RunRoot = rootlessRuntime
|
||||||
opts.GraphRoot = filepath.Join(dataDir, "containers", "storage")
|
|
||||||
if systemOpts.RootlessStoragePath != "" {
|
if systemOpts.RootlessStoragePath != "" {
|
||||||
opts.RootlessStoragePath = systemOpts.RootlessStoragePath
|
opts.GraphRoot = systemOpts.RootlessStoragePath
|
||||||
} else {
|
} else {
|
||||||
opts.RootlessStoragePath = opts.GraphRoot
|
opts.GraphRoot = filepath.Join(dataDir, "containers", "storage")
|
||||||
}
|
}
|
||||||
if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
|
if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
|
||||||
opts.GraphDriverName = "overlay"
|
opts.GraphDriverName = "overlay"
|
||||||
@ -259,13 +258,24 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str
|
|||||||
}
|
}
|
||||||
_, err = os.Stat(storageConf)
|
_, err = os.Stat(storageConf)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
return storageOpts, errors.Wrapf(err, "cannot stat %s", storageConf)
|
return storageOpts, err
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil && !defaultConfigFileSet {
|
||||||
defaultRootlessRunRoot = storageOpts.RunRoot
|
defaultRootlessRunRoot = storageOpts.RunRoot
|
||||||
defaultRootlessGraphRoot = storageOpts.GraphRoot
|
defaultRootlessGraphRoot = storageOpts.GraphRoot
|
||||||
storageOpts = StoreOptions{}
|
storageOpts = StoreOptions{}
|
||||||
reloadConfigurationFileIfNeeded(storageConf, &storageOpts)
|
reloadConfigurationFileIfNeeded(storageConf, &storageOpts)
|
||||||
|
if rootless && rootlessUID != 0 {
|
||||||
|
// If the file did not specify a graphroot or runroot,
|
||||||
|
// set sane defaults so we don't try and use root-owned
|
||||||
|
// directories
|
||||||
|
if storageOpts.RunRoot == "" {
|
||||||
|
storageOpts.RunRoot = defaultRootlessRunRoot
|
||||||
|
}
|
||||||
|
if storageOpts.GraphRoot == "" {
|
||||||
|
storageOpts.GraphRoot = defaultRootlessGraphRoot
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if storageOpts.RunRoot != "" {
|
if storageOpts.RunRoot != "" {
|
||||||
runRoot, err := expandEnvPath(storageOpts.RunRoot, rootlessUID)
|
runRoot, err := expandEnvPath(storageOpts.RunRoot, rootlessUID)
|
||||||
@ -282,26 +292,6 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str
|
|||||||
storageOpts.GraphRoot = graphRoot
|
storageOpts.GraphRoot = graphRoot
|
||||||
}
|
}
|
||||||
|
|
||||||
if rootless && rootlessUID != 0 {
|
|
||||||
if err == nil {
|
|
||||||
// If the file did not specify a graphroot or runroot,
|
|
||||||
// set sane defaults so we don't try and use root-owned
|
|
||||||
// directories
|
|
||||||
if storageOpts.RunRoot == "" {
|
|
||||||
storageOpts.RunRoot = defaultRootlessRunRoot
|
|
||||||
}
|
|
||||||
if storageOpts.GraphRoot == "" {
|
|
||||||
storageOpts.GraphRoot = defaultRootlessGraphRoot
|
|
||||||
}
|
|
||||||
if storageOpts.RootlessStoragePath != "" {
|
|
||||||
rootlessStoragePath, err := expandEnvPath(storageOpts.RootlessStoragePath, rootlessUID)
|
|
||||||
if err != nil {
|
|
||||||
return storageOpts, err
|
|
||||||
}
|
|
||||||
storageOpts.GraphRoot = rootlessStoragePath
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return storageOpts, nil
|
return storageOpts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
1
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
1
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
@ -36,6 +36,7 @@ func (o *encoderOptions) setDefault() {
|
|||||||
blockSize: 1 << 16,
|
blockSize: 1 << 16,
|
||||||
windowSize: 8 << 20,
|
windowSize: 8 << 20,
|
||||||
level: SpeedDefault,
|
level: SpeedDefault,
|
||||||
|
allLitEntropy: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
15
vendor/github.com/moby/sys/mountinfo/doc.go
generated
vendored
15
vendor/github.com/moby/sys/mountinfo/doc.go
generated
vendored
@ -1,5 +1,6 @@
|
|||||||
// Package mountinfo provides a set of functions to retrieve information about OS mounts.
|
// Package mountinfo provides a set of functions to retrieve information about OS mounts.
|
||||||
// Currently it supports Linux. For historical reasons, there is also some support for FreeBSD,
|
//
|
||||||
|
// Currently it supports Linux. For historical reasons, there is also some support for FreeBSD and OpenBSD,
|
||||||
// and a shallow implementation for Windows, but in general this is Linux-only package, so
|
// and a shallow implementation for Windows, but in general this is Linux-only package, so
|
||||||
// the rest of the document only applies to Linux, unless explicitly specified otherwise.
|
// the rest of the document only applies to Linux, unless explicitly specified otherwise.
|
||||||
//
|
//
|
||||||
@ -8,17 +9,14 @@
|
|||||||
// see different mounts. A per-process mountinfo table is available from /proc/<PID>/mountinfo,
|
// see different mounts. A per-process mountinfo table is available from /proc/<PID>/mountinfo,
|
||||||
// where <PID> is a numerical process identifier.
|
// where <PID> is a numerical process identifier.
|
||||||
//
|
//
|
||||||
// In general, /proc is not a very effective interface, and mountinfo is not an exception.
|
// In general, /proc is not a very efficient interface, and mountinfo is not an exception.
|
||||||
// For example, there is no way to get information about a specific mount point (i.e. it
|
// For example, there is no way to get information about a specific mount point (i.e. it
|
||||||
// is all-or-nothing). This package tries to hide the /proc ineffectiveness by using
|
// is all-or-nothing). This package tries to hide the /proc ineffectiveness by using
|
||||||
// parse filters while reading mountinfo. A filter can skip some entries, or stop
|
// parse filters while reading mountinfo. A filter can skip some entries, or stop
|
||||||
// processing the rest of the file once the needed information is found.
|
// processing the rest of the file once the needed information is found.
|
||||||
//
|
//
|
||||||
// For mountinfo filters that accept path as an argument, the path must be:
|
// For mountinfo filters that accept path as an argument, the path must be absolute,
|
||||||
// - absolute;
|
// having all symlinks resolved, and being cleaned (i.e. no extra slashes or dots).
|
||||||
// - having all symlinks resolved;
|
|
||||||
// - being cleaned.
|
|
||||||
//
|
|
||||||
// One way to achieve all of the above is to employ filepath.Abs followed by
|
// One way to achieve all of the above is to employ filepath.Abs followed by
|
||||||
// filepath.EvalSymlinks (the latter calls filepath.Clean on the result so
|
// filepath.EvalSymlinks (the latter calls filepath.Clean on the result so
|
||||||
// there is no need to explicitly call filepath.Clean).
|
// there is no need to explicitly call filepath.Clean).
|
||||||
@ -27,7 +25,7 @@
|
|||||||
// of the cases where mountinfo should not be parsed:
|
// of the cases where mountinfo should not be parsed:
|
||||||
//
|
//
|
||||||
// 1. Before performing a mount. Usually, this is not needed, but if required (say to
|
// 1. Before performing a mount. Usually, this is not needed, but if required (say to
|
||||||
// prevent overmounts), to check whether a directory is mounted, call os.Lstat
|
// prevent over-mounts), to check whether a directory is mounted, call os.Lstat
|
||||||
// on it and its parent directory, and compare their st.Sys().(*syscall.Stat_t).Dev
|
// on it and its parent directory, and compare their st.Sys().(*syscall.Stat_t).Dev
|
||||||
// fields -- if they differ, then the directory is the mount point. NOTE this does
|
// fields -- if they differ, then the directory is the mount point. NOTE this does
|
||||||
// not work for bind mounts. Optionally, the filesystem type can also be checked
|
// not work for bind mounts. Optionally, the filesystem type can also be checked
|
||||||
@ -43,5 +41,4 @@
|
|||||||
//
|
//
|
||||||
// 5. To find the mount point root of a specific directory. You can perform os.Stat()
|
// 5. To find the mount point root of a specific directory. You can perform os.Stat()
|
||||||
// on the directory and traverse up until the Dev field of a parent directory differs.
|
// on the directory and traverse up until the Dev field of a parent directory differs.
|
||||||
|
|
||||||
package mountinfo
|
package mountinfo
|
||||||
|
2
vendor/github.com/moby/sys/mountinfo/mounted_unix.go
generated
vendored
2
vendor/github.com/moby/sys/mountinfo/mounted_unix.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build linux freebsd,cgo
|
// +build linux freebsd,cgo openbsd,cgo
|
||||||
|
|
||||||
package mountinfo
|
package mountinfo
|
||||||
|
|
||||||
|
21
vendor/github.com/moby/sys/mountinfo/mountinfo.go
generated
vendored
21
vendor/github.com/moby/sys/mountinfo/mountinfo.go
generated
vendored
@ -1,7 +1,6 @@
|
|||||||
package mountinfo
|
package mountinfo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -11,14 +10,6 @@ func GetMounts(f FilterFunc) ([]*Info, error) {
|
|||||||
return parseMountTable(f)
|
return parseMountTable(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMountsFromReader retrieves a list of mounts from the
|
|
||||||
// reader provided, with an optional filter applied (use nil
|
|
||||||
// for no filter). This can be useful in tests or benchmarks
|
|
||||||
// that provide a fake mountinfo data.
|
|
||||||
func GetMountsFromReader(reader io.Reader, f FilterFunc) ([]*Info, error) {
|
|
||||||
return parseInfoFile(reader, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mounted determines if a specified path is a mount point.
|
// Mounted determines if a specified path is a mount point.
|
||||||
//
|
//
|
||||||
// The argument must be an absolute path, with all symlinks resolved, and clean.
|
// The argument must be an absolute path, with all symlinks resolved, and clean.
|
||||||
@ -55,18 +46,18 @@ type Info struct {
|
|||||||
// Mountpoint indicates the mount point relative to the process's root.
|
// Mountpoint indicates the mount point relative to the process's root.
|
||||||
Mountpoint string
|
Mountpoint string
|
||||||
|
|
||||||
// Opts represents mount-specific options.
|
// Options represents mount-specific options.
|
||||||
Opts string
|
Options string
|
||||||
|
|
||||||
// Optional represents optional fields.
|
// Optional represents optional fields.
|
||||||
Optional string
|
Optional string
|
||||||
|
|
||||||
// Fstype indicates the type of filesystem, such as EXT3.
|
// FSType indicates the type of filesystem, such as EXT3.
|
||||||
Fstype string
|
FSType string
|
||||||
|
|
||||||
// Source indicates filesystem specific information or "none".
|
// Source indicates filesystem specific information or "none".
|
||||||
Source string
|
Source string
|
||||||
|
|
||||||
// VfsOpts represents per super block options.
|
// VFSOptions represents per super block options.
|
||||||
VfsOpts string
|
VFSOptions string
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
// +build freebsd,cgo openbsd,cgo
|
||||||
|
|
||||||
package mountinfo
|
package mountinfo
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -33,7 +35,7 @@ func parseMountTable(filter FilterFunc) ([]*Info, error) {
|
|||||||
var mountinfo Info
|
var mountinfo Info
|
||||||
var skip, stop bool
|
var skip, stop bool
|
||||||
mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
|
mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
|
||||||
mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
|
mountinfo.FSType = C.GoString(&entry.f_fstypename[0])
|
||||||
mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
|
mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
|
||||||
|
|
||||||
if filter != nil {
|
if filter != nil {
|
||||||
@ -54,7 +56,7 @@ func parseMountTable(filter FilterFunc) ([]*Info, error) {
|
|||||||
|
|
||||||
func mounted(path string) (bool, error) {
|
func mounted(path string) (bool, error) {
|
||||||
// Fast path: compare st.st_dev fields.
|
// Fast path: compare st.st_dev fields.
|
||||||
// This should always work for FreeBSD.
|
// This should always work for FreeBSD and OpenBSD.
|
||||||
mounted, err := mountedByStat(path)
|
mounted, err := mountedByStat(path)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return mounted, nil
|
return mounted, nil
|
22
vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go
generated
vendored
22
vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go
generated
vendored
@ -6,12 +6,12 @@ import "strings"
|
|||||||
// used to filter out mountinfo entries we're not interested in,
|
// used to filter out mountinfo entries we're not interested in,
|
||||||
// and/or stop further processing if we found what we wanted.
|
// and/or stop further processing if we found what we wanted.
|
||||||
//
|
//
|
||||||
// It takes a pointer to the Info struct (not fully populated,
|
// It takes a pointer to the Info struct (fully populated with all available
|
||||||
// currently only Mountpoint, Fstype, Source, and (on Linux)
|
// fields on the GOOS platform), and returns two booleans:
|
||||||
// VfsOpts are filled in), and returns two booleans:
|
|
||||||
//
|
//
|
||||||
// - skip: true if the entry should be skipped
|
// skip: true if the entry should be skipped;
|
||||||
// - stop: true if parsing should be stopped after the entry
|
//
|
||||||
|
// stop: true if parsing should be stopped after the entry.
|
||||||
type FilterFunc func(*Info) (skip, stop bool)
|
type FilterFunc func(*Info) (skip, stop bool)
|
||||||
|
|
||||||
// PrefixFilter discards all entries whose mount points
|
// PrefixFilter discards all entries whose mount points
|
||||||
@ -36,8 +36,8 @@ func SingleEntryFilter(mp string) FilterFunc {
|
|||||||
// ParentsFilter returns all entries whose mount points
|
// ParentsFilter returns all entries whose mount points
|
||||||
// can be parents of a path specified, discarding others.
|
// can be parents of a path specified, discarding others.
|
||||||
//
|
//
|
||||||
// For example, given `/var/lib/docker/something`, entries
|
// For example, given /var/lib/docker/something, entries
|
||||||
// like `/var/lib/docker`, `/var` and `/` are returned.
|
// like /var/lib/docker, /var and / are returned.
|
||||||
func ParentsFilter(path string) FilterFunc {
|
func ParentsFilter(path string) FilterFunc {
|
||||||
return func(m *Info) (bool, bool) {
|
return func(m *Info) (bool, bool) {
|
||||||
skip := !strings.HasPrefix(path, m.Mountpoint)
|
skip := !strings.HasPrefix(path, m.Mountpoint)
|
||||||
@ -45,12 +45,12 @@ func ParentsFilter(path string) FilterFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FstypeFilter returns all entries that match provided fstype(s).
|
// FSTypeFilter returns all entries that match provided fstype(s).
|
||||||
func FstypeFilter(fstype ...string) FilterFunc {
|
func FSTypeFilter(fstype ...string) FilterFunc {
|
||||||
return func(m *Info) (bool, bool) {
|
return func(m *Info) (bool, bool) {
|
||||||
for _, t := range fstype {
|
for _, t := range fstype {
|
||||||
if m.Fstype == t {
|
if m.FSType == t {
|
||||||
return false, false // don't skeep, keep going
|
return false, false // don't skip, keep going
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true, false // skip, keep going
|
return true, false // skip, keep going
|
||||||
|
64
vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go
generated
vendored
64
vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go
generated
vendored
@ -1,5 +1,3 @@
|
|||||||
// +build go1.13
|
|
||||||
|
|
||||||
package mountinfo
|
package mountinfo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -11,14 +9,18 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) {
|
// GetMountsFromReader retrieves a list of mounts from the
|
||||||
|
// reader provided, with an optional filter applied (use nil
|
||||||
|
// for no filter). This can be useful in tests or benchmarks
|
||||||
|
// that provide a fake mountinfo data.
|
||||||
|
//
|
||||||
|
// This function is Linux-specific.
|
||||||
|
func GetMountsFromReader(r io.Reader, filter FilterFunc) ([]*Info, error) {
|
||||||
s := bufio.NewScanner(r)
|
s := bufio.NewScanner(r)
|
||||||
out := []*Info{}
|
out := []*Info{}
|
||||||
var err error
|
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
if err = s.Err(); err != nil {
|
var err error
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
See http://man7.org/linux/man-pages/man5/proc.5.html
|
See http://man7.org/linux/man-pages/man5/proc.5.html
|
||||||
|
|
||||||
@ -70,12 +72,11 @@ func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) {
|
|||||||
|
|
||||||
p := &Info{}
|
p := &Info{}
|
||||||
|
|
||||||
// Fill in the fields that a filter might check
|
|
||||||
p.Mountpoint, err = unescape(fields[4])
|
p.Mountpoint, err = unescape(fields[4])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Parsing '%s' failed: mount point: %w", fields[4], err)
|
return nil, fmt.Errorf("Parsing '%s' failed: mount point: %w", fields[4], err)
|
||||||
}
|
}
|
||||||
p.Fstype, err = unescape(fields[sepIdx+1])
|
p.FSType, err = unescape(fields[sepIdx+1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Parsing '%s' failed: fstype: %w", fields[sepIdx+1], err)
|
return nil, fmt.Errorf("Parsing '%s' failed: fstype: %w", fields[sepIdx+1], err)
|
||||||
}
|
}
|
||||||
@ -83,19 +84,7 @@ func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Parsing '%s' failed: source: %w", fields[sepIdx+2], err)
|
return nil, fmt.Errorf("Parsing '%s' failed: source: %w", fields[sepIdx+2], err)
|
||||||
}
|
}
|
||||||
p.VfsOpts = fields[sepIdx+3]
|
p.VFSOptions = fields[sepIdx+3]
|
||||||
|
|
||||||
// Run a filter soon so we can skip parsing/adding entries
|
|
||||||
// the caller is not interested in
|
|
||||||
var skip, stop bool
|
|
||||||
if filter != nil {
|
|
||||||
skip, stop = filter(p)
|
|
||||||
if skip {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fill in the rest of the fields
|
|
||||||
|
|
||||||
// ignore any numbers parsing errors, as there should not be any
|
// ignore any numbers parsing errors, as there should not be any
|
||||||
p.ID, _ = strconv.Atoi(fields[0])
|
p.ID, _ = strconv.Atoi(fields[0])
|
||||||
@ -112,7 +101,7 @@ func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) {
|
|||||||
return nil, fmt.Errorf("Parsing '%s' failed: root: %w", fields[3], err)
|
return nil, fmt.Errorf("Parsing '%s' failed: root: %w", fields[3], err)
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Opts = fields[5]
|
p.Options = fields[5]
|
||||||
|
|
||||||
// zero or more optional fields
|
// zero or more optional fields
|
||||||
switch {
|
switch {
|
||||||
@ -124,11 +113,23 @@ func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) {
|
|||||||
p.Optional = strings.Join(fields[6:sepIdx-1], " ")
|
p.Optional = strings.Join(fields[6:sepIdx-1], " ")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Run the filter after parsing all of the fields.
|
||||||
|
var skip, stop bool
|
||||||
|
if filter != nil {
|
||||||
|
skip, stop = filter(p)
|
||||||
|
if skip {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
out = append(out, p)
|
out = append(out, p)
|
||||||
if stop {
|
if stop {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -141,12 +142,17 @@ func parseMountTable(filter FilterFunc) ([]*Info, error) {
|
|||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
return parseInfoFile(f, filter)
|
return GetMountsFromReader(f, filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PidMountInfo collects the mounts for a specific process ID. If the process
|
// PidMountInfo retrieves the list of mounts from a given process' mount
|
||||||
// ID is unknown, it is better to use `GetMounts` which will inspect
|
// namespace. Unless there is a need to get mounts from a mount namespace
|
||||||
// "/proc/self/mountinfo" instead.
|
// different from that of a calling process, use GetMounts.
|
||||||
|
//
|
||||||
|
// This function is Linux-specific.
|
||||||
|
//
|
||||||
|
// Deprecated: this will be removed before v1; use GetMountsFromReader with
|
||||||
|
// opened /proc/<pid>/mountinfo as an argument instead.
|
||||||
func PidMountInfo(pid int) ([]*Info, error) {
|
func PidMountInfo(pid int) ([]*Info, error) {
|
||||||
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
|
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -154,7 +160,7 @@ func PidMountInfo(pid int) ([]*Info, error) {
|
|||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
return parseInfoFile(f, nil)
|
return GetMountsFromReader(f, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// A few specific characters in mountinfo path entries (root and mountpoint)
|
// A few specific characters in mountinfo path entries (root and mountpoint)
|
||||||
@ -173,7 +179,7 @@ func unescape(path string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The following code is UTF-8 transparent as it only looks for some
|
// The following code is UTF-8 transparent as it only looks for some
|
||||||
// specific characters (backslach and 0..7) with values < utf8.RuneSelf,
|
// specific characters (backslash and 0..7) with values < utf8.RuneSelf,
|
||||||
// and everything else is passed through as is.
|
// and everything else is passed through as is.
|
||||||
buf := make([]byte, len(path))
|
buf := make([]byte, len(path))
|
||||||
bufLen := 0
|
bufLen := 0
|
||||||
|
7
vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go
generated
vendored
7
vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go
generated
vendored
@ -1,10 +1,9 @@
|
|||||||
// +build !windows,!linux,!freebsd freebsd,!cgo
|
// +build !windows,!linux,!freebsd,!openbsd freebsd,!cgo openbsd,!cgo
|
||||||
|
|
||||||
package mountinfo
|
package mountinfo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -14,10 +13,6 @@ func parseMountTable(_ FilterFunc) ([]*Info, error) {
|
|||||||
return nil, errNotImplemented
|
return nil, errNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseInfoFile(_ io.Reader, f FilterFunc) ([]*Info, error) {
|
|
||||||
return parseMountTable(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
func mounted(path string) (bool, error) {
|
func mounted(path string) (bool, error) {
|
||||||
return false, errNotImplemented
|
return false, errNotImplemented
|
||||||
}
|
}
|
||||||
|
6
vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go
generated
vendored
6
vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go
generated
vendored
@ -1,16 +1,10 @@
|
|||||||
package mountinfo
|
package mountinfo
|
||||||
|
|
||||||
import "io"
|
|
||||||
|
|
||||||
func parseMountTable(_ FilterFunc) ([]*Info, error) {
|
func parseMountTable(_ FilterFunc) ([]*Info, error) {
|
||||||
// Do NOT return an error!
|
// Do NOT return an error!
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseInfoFile(_ io.Reader, f FilterFunc) ([]*Info, error) {
|
|
||||||
return parseMountTable(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
func mounted(_ string) (bool, error) {
|
func mounted(_ string) (bool, error) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
11
vendor/modules.txt
vendored
11
vendor/modules.txt
vendored
@ -86,7 +86,7 @@ github.com/containers/buildah/pkg/secrets
|
|||||||
github.com/containers/buildah/pkg/supplemented
|
github.com/containers/buildah/pkg/supplemented
|
||||||
github.com/containers/buildah/pkg/umask
|
github.com/containers/buildah/pkg/umask
|
||||||
github.com/containers/buildah/util
|
github.com/containers/buildah/util
|
||||||
# github.com/containers/common v0.26.0
|
# github.com/containers/common v0.26.3
|
||||||
github.com/containers/common/pkg/apparmor
|
github.com/containers/common/pkg/apparmor
|
||||||
github.com/containers/common/pkg/apparmor/internal/supported
|
github.com/containers/common/pkg/apparmor/internal/supported
|
||||||
github.com/containers/common/pkg/auth
|
github.com/containers/common/pkg/auth
|
||||||
@ -102,7 +102,7 @@ github.com/containers/common/pkg/sysinfo
|
|||||||
github.com/containers/common/version
|
github.com/containers/common/version
|
||||||
# github.com/containers/conmon v2.0.20+incompatible
|
# github.com/containers/conmon v2.0.20+incompatible
|
||||||
github.com/containers/conmon/runner/config
|
github.com/containers/conmon/runner/config
|
||||||
# github.com/containers/image/v5 v5.6.0
|
# github.com/containers/image/v5 v5.7.0
|
||||||
github.com/containers/image/v5/copy
|
github.com/containers/image/v5/copy
|
||||||
github.com/containers/image/v5/directory
|
github.com/containers/image/v5/directory
|
||||||
github.com/containers/image/v5/directory/explicitfilepath
|
github.com/containers/image/v5/directory/explicitfilepath
|
||||||
@ -117,6 +117,7 @@ github.com/containers/image/v5/image
|
|||||||
github.com/containers/image/v5/internal/iolimits
|
github.com/containers/image/v5/internal/iolimits
|
||||||
github.com/containers/image/v5/internal/pkg/keyctl
|
github.com/containers/image/v5/internal/pkg/keyctl
|
||||||
github.com/containers/image/v5/internal/pkg/platform
|
github.com/containers/image/v5/internal/pkg/platform
|
||||||
|
github.com/containers/image/v5/internal/rootless
|
||||||
github.com/containers/image/v5/internal/tmpdir
|
github.com/containers/image/v5/internal/tmpdir
|
||||||
github.com/containers/image/v5/internal/uploadreader
|
github.com/containers/image/v5/internal/uploadreader
|
||||||
github.com/containers/image/v5/manifest
|
github.com/containers/image/v5/manifest
|
||||||
@ -164,7 +165,7 @@ github.com/containers/psgo/internal/dev
|
|||||||
github.com/containers/psgo/internal/host
|
github.com/containers/psgo/internal/host
|
||||||
github.com/containers/psgo/internal/proc
|
github.com/containers/psgo/internal/proc
|
||||||
github.com/containers/psgo/internal/process
|
github.com/containers/psgo/internal/process
|
||||||
# github.com/containers/storage v1.23.5
|
# github.com/containers/storage v1.23.7
|
||||||
github.com/containers/storage
|
github.com/containers/storage
|
||||||
github.com/containers/storage/drivers
|
github.com/containers/storage/drivers
|
||||||
github.com/containers/storage/drivers/aufs
|
github.com/containers/storage/drivers/aufs
|
||||||
@ -332,7 +333,7 @@ github.com/inconshreveable/mousetrap
|
|||||||
github.com/ishidawataru/sctp
|
github.com/ishidawataru/sctp
|
||||||
# github.com/json-iterator/go v1.1.10
|
# github.com/json-iterator/go v1.1.10
|
||||||
github.com/json-iterator/go
|
github.com/json-iterator/go
|
||||||
# github.com/klauspost/compress v1.11.0
|
# github.com/klauspost/compress v1.11.1
|
||||||
github.com/klauspost/compress/flate
|
github.com/klauspost/compress/flate
|
||||||
github.com/klauspost/compress/fse
|
github.com/klauspost/compress/fse
|
||||||
github.com/klauspost/compress/huff0
|
github.com/klauspost/compress/huff0
|
||||||
@ -351,7 +352,7 @@ github.com/matttproud/golang_protobuf_extensions/pbutil
|
|||||||
github.com/mistifyio/go-zfs
|
github.com/mistifyio/go-zfs
|
||||||
# github.com/moby/sys/mount v0.1.1
|
# github.com/moby/sys/mount v0.1.1
|
||||||
github.com/moby/sys/mount
|
github.com/moby/sys/mount
|
||||||
# github.com/moby/sys/mountinfo v0.2.0
|
# github.com/moby/sys/mountinfo v0.4.0
|
||||||
github.com/moby/sys/mountinfo
|
github.com/moby/sys/mountinfo
|
||||||
# github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2
|
# github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2
|
||||||
github.com/moby/term
|
github.com/moby/term
|
||||||
|
Reference in New Issue
Block a user