mirror of
https://github.com/containers/podman.git
synced 2025-06-23 02:18:13 +08:00
Merge pull request #18999 from Luap99/vendor
update c/{common,image,storage,buildah} to latest
This commit is contained in:
@ -8,6 +8,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/buildah/pkg/cli"
|
||||
"github.com/containers/common/pkg/config"
|
||||
cutil "github.com/containers/common/pkg/util"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
@ -346,7 +347,7 @@ func PullImage(imageName string, cliVals *entities.ContainerCreateOptions) (stri
|
||||
skipTLSVerify = types.NewOptionalBool(!cliVals.TLSVerify.Value())
|
||||
}
|
||||
|
||||
decConfig, err := util.DecryptConfig(cliVals.DecryptionKeys)
|
||||
decConfig, err := cli.DecryptConfig(cliVals.DecryptionKeys)
|
||||
if err != nil {
|
||||
return "unable to obtain decryption config", err
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/buildah/pkg/cli"
|
||||
"github.com/containers/common/pkg/auth"
|
||||
"github.com/containers/common/pkg/completion"
|
||||
"github.com/containers/image/v5/types"
|
||||
@ -164,7 +165,7 @@ func imagePull(cmd *cobra.Command, args []string) error {
|
||||
pullOptions.Password = creds.Password
|
||||
}
|
||||
|
||||
decConfig, err := util.DecryptConfig(pullOptions.DecryptionKeys)
|
||||
decConfig, err := cli.DecryptConfig(pullOptions.DecryptionKeys)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to obtain decryption config: %w", err)
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/buildah/pkg/cli"
|
||||
"github.com/containers/common/pkg/auth"
|
||||
"github.com/containers/common/pkg/completion"
|
||||
"github.com/containers/image/v5/types"
|
||||
@ -198,7 +199,7 @@ func imagePush(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
defer signingCleanup()
|
||||
|
||||
encConfig, encLayers, err := util.EncryptConfig(pushOptions.EncryptionKeys, pushOptions.EncryptLayers)
|
||||
encConfig, encLayers, err := cli.EncryptConfig(pushOptions.EncryptionKeys, pushOptions.EncryptLayers)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to obtain encryption config: %w", err)
|
||||
}
|
||||
|
12
go.mod
12
go.mod
@ -12,14 +12,14 @@ require (
|
||||
github.com/container-orchestrated-devices/container-device-interface v0.5.4
|
||||
github.com/containernetworking/cni v1.1.2
|
||||
github.com/containernetworking/plugins v1.3.0
|
||||
github.com/containers/buildah v1.30.1-0.20230504052500-e925b5852e07
|
||||
github.com/containers/common v0.53.1-0.20230626115555-370c89881624
|
||||
github.com/containers/buildah v1.30.1-0.20230627110136-33b7088fec7b
|
||||
github.com/containers/common v0.53.1-0.20230627061926-e6f314e59b81
|
||||
github.com/containers/conmon v2.0.20+incompatible
|
||||
github.com/containers/image/v5 v5.25.1-0.20230613183705-07ced6137083
|
||||
github.com/containers/image/v5 v5.25.1-0.20230623174242-68798a22ce3e
|
||||
github.com/containers/libhvee v0.0.5
|
||||
github.com/containers/ocicrypt v1.1.7
|
||||
github.com/containers/psgo v1.8.0
|
||||
github.com/containers/storage v1.46.2-0.20230616083707-cc0d208e5e1c
|
||||
github.com/containers/storage v1.47.0
|
||||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
github.com/coreos/stream-metadata-go v0.4.2
|
||||
github.com/crc-org/vfkit v0.0.5-0.20230602131541-3d57f09010c9
|
||||
@ -74,6 +74,7 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.8 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
@ -119,7 +120,6 @@ require (
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jinzhu/copier v0.3.5 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
@ -151,7 +151,7 @@ require (
|
||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||
github.com/sigstore/fulcio v1.3.1 // indirect
|
||||
github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 // indirect
|
||||
github.com/sigstore/sigstore v1.6.5 // indirect
|
||||
github.com/sigstore/sigstore v1.7.1 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
github.com/sylabs/sif/v2 v2.11.5 // indirect
|
||||
|
26
go.sum
26
go.sum
@ -22,6 +22,8 @@ cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIA
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic=
|
||||
@ -237,14 +239,14 @@ github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHV
|
||||
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
|
||||
github.com/containernetworking/plugins v1.3.0 h1:QVNXMT6XloyMUoO2wUOqWTC1hWFV62Q6mVDp5H1HnjM=
|
||||
github.com/containernetworking/plugins v1.3.0/go.mod h1:Pc2wcedTQQCVuROOOaLBPPxrEXqqXBFt3cZ+/yVg6l0=
|
||||
github.com/containers/buildah v1.30.1-0.20230504052500-e925b5852e07 h1:Bs2sNFh/fSYr4J6JJLFqzyn3dp6HhlA6ewFwRYUpeIE=
|
||||
github.com/containers/buildah v1.30.1-0.20230504052500-e925b5852e07/go.mod h1:6A/BK0YJLXL8+AqlbceKJrhUT+NtEgsvAc51F7TAllc=
|
||||
github.com/containers/common v0.53.1-0.20230626115555-370c89881624 h1:YBgjfoo0G3tR8vm225ghJnqOZOVv3tH1L1GbyRM9320=
|
||||
github.com/containers/common v0.53.1-0.20230626115555-370c89881624/go.mod h1:qE1MzGl69IoK7ZNCCH51+aLVjyQtnH0LiZe0wG32Jy0=
|
||||
github.com/containers/buildah v1.30.1-0.20230627110136-33b7088fec7b h1:cTb0Sxu/tIQ9uPIchFmkYs+uOtylhyO+0h2+i3XzisQ=
|
||||
github.com/containers/buildah v1.30.1-0.20230627110136-33b7088fec7b/go.mod h1:O2jiDd5+569W8cwqyLnRKiqAHOPTi/Kj+oDlFNsFg24=
|
||||
github.com/containers/common v0.53.1-0.20230627061926-e6f314e59b81 h1:axB9UaqlBcpVX4yA41OfshJd5emqOuQ/GMNxopyAX20=
|
||||
github.com/containers/common v0.53.1-0.20230627061926-e6f314e59b81/go.mod h1:BkgcpfdNC54M3fGDtHUjqt7teGNsuj9yGoWUC+YVhi4=
|
||||
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
||||
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||
github.com/containers/image/v5 v5.25.1-0.20230613183705-07ced6137083 h1:6Pbnll97ls6G0U3DSxaTqp7Sd8Fykc4gd7BUJm7Bpn8=
|
||||
github.com/containers/image/v5 v5.25.1-0.20230613183705-07ced6137083/go.mod h1:yRLIs3vw20kCSt3ZvRyX3cp4EIYjNUW6RX9uq2cZ8J8=
|
||||
github.com/containers/image/v5 v5.25.1-0.20230623174242-68798a22ce3e h1:4W/7KRo29f7zRGRruc3kSf18wNZB/loR1jTygi0TvRM=
|
||||
github.com/containers/image/v5 v5.25.1-0.20230623174242-68798a22ce3e/go.mod h1:3tWjWAL5TC/ZsaaBNkvTxdQqvlNJ463QF51m+oRtZwI=
|
||||
github.com/containers/libhvee v0.0.5 h1:5tUiF2eVe8XbVSPD/Os4dIU1gJWoQgtkQHIjQ5X7wpE=
|
||||
github.com/containers/libhvee v0.0.5/go.mod h1:AYsyMe44w9ylWWEZNW+IOzA7oZ2i/P9TChNljavhYMI=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
@ -257,8 +259,8 @@ github.com/containers/ocicrypt v1.1.7/go.mod h1:7CAhjcj2H8AYp5YvEie7oVSK2AhBY8Ns
|
||||
github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY=
|
||||
github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc=
|
||||
github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
|
||||
github.com/containers/storage v1.46.2-0.20230616083707-cc0d208e5e1c h1:hJP+UF9OzDaThxavD5isFbAFxbvb25TdFtjohAhH/dc=
|
||||
github.com/containers/storage v1.46.2-0.20230616083707-cc0d208e5e1c/go.mod h1:pRp3lkRo2qodb/ltpnudoXggrviRmaCmU5a5GhTBae0=
|
||||
github.com/containers/storage v1.47.0 h1:Tl/onL8yE/4QABc2kfPDaTSYijk3QrmXGrO21KXkj58=
|
||||
github.com/containers/storage v1.47.0/go.mod h1:pRp3lkRo2qodb/ltpnudoXggrviRmaCmU5a5GhTBae0=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||
@ -434,7 +436,7 @@ github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogB
|
||||
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
|
||||
github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
|
||||
github.com/go-rod/rod v0.113.1 h1:+Qb4K/vkR7BOhW6FhfhtLzUD3l11+0XlF4do+27sOQk=
|
||||
github.com/go-rod/rod v0.113.3 h1:oLiKZW721CCMwA5g7977cWfcAKQ+FuosP47Zf1QiDrA=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
@ -608,8 +610,6 @@ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
|
||||
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
@ -883,8 +883,8 @@ github.com/sigstore/fulcio v1.3.1 h1:0ntW9VbQbt2JytoSs8BOGB84A65eeyvGSavWteYp29Y
|
||||
github.com/sigstore/fulcio v1.3.1/go.mod h1:/XfqazOec45ulJZpyL9sq+OsVQ8g2UOVoNVi7abFgqU=
|
||||
github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 h1:x/WnxasgR40qGY67IHwioakXLuhDxJ10vF8/INuOTiI=
|
||||
github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12/go.mod h1:8c+a8Yo7r8gKuYbIaz+c3oOdw9iMXx+tMdOg2+b+2jQ=
|
||||
github.com/sigstore/sigstore v1.6.5 h1:/liHIo7YPJp6sN31DzBYDOuRPmN1xbzROMBE5DLllYM=
|
||||
github.com/sigstore/sigstore v1.6.5/go.mod h1:h+EoQsf9+6UKgNYxKhBcPgo4PZeEVfzAJxKRRIYhyN4=
|
||||
github.com/sigstore/sigstore v1.7.1 h1:fCATemikcBK0cG4+NcM940MfoIgmioY1vC6E66hXxks=
|
||||
github.com/sigstore/sigstore v1.7.1/go.mod h1:0PmMzfJP2Y9+lugD0wer4e7TihR5tM7NcIs3bQNk5xg=
|
||||
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
|
@ -403,7 +403,6 @@ var _ = Describe("Podman images", func() {
|
||||
Expect(output).To(ContainSubstring("Copying blob "))
|
||||
Expect(output).To(ContainSubstring("Copying config "))
|
||||
Expect(output).To(ContainSubstring("Writing manifest to image destination"))
|
||||
Expect(output).To(ContainSubstring("Storing signatures"))
|
||||
})
|
||||
|
||||
It("Build no options", func() {
|
||||
|
@ -18,8 +18,6 @@ import (
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/common/pkg/util"
|
||||
"github.com/containers/image/v5/types"
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
enchelpers "github.com/containers/ocicrypt/helpers"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/errorhandling"
|
||||
"github.com/containers/podman/v4/pkg/namespaces"
|
||||
@ -617,40 +615,6 @@ func SizeOfPath(path string) (uint64, error) {
|
||||
return uint64(size), err
|
||||
}
|
||||
|
||||
// EncryptConfig translates encryptionKeys into an EncriptionsConfig structure
|
||||
func EncryptConfig(encryptionKeys []string, encryptLayers []int) (*encconfig.EncryptConfig, *[]int, error) {
|
||||
var encLayers *[]int
|
||||
var encConfig *encconfig.EncryptConfig
|
||||
|
||||
if len(encryptionKeys) > 0 {
|
||||
// encryption
|
||||
encLayers = &encryptLayers
|
||||
ecc, err := enchelpers.CreateCryptoConfig(encryptionKeys, []string{})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("invalid encryption keys: %w", err)
|
||||
}
|
||||
cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{ecc})
|
||||
encConfig = cc.EncryptConfig
|
||||
}
|
||||
return encConfig, encLayers, nil
|
||||
}
|
||||
|
||||
// DecryptConfig translates decryptionKeys into a DescriptionConfig structure
|
||||
func DecryptConfig(decryptionKeys []string) (*encconfig.DecryptConfig, error) {
|
||||
var decryptConfig *encconfig.DecryptConfig
|
||||
if len(decryptionKeys) > 0 {
|
||||
// decryption
|
||||
dcc, err := enchelpers.CreateCryptoConfig([]string{}, decryptionKeys)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid decryption keys: %w", err)
|
||||
}
|
||||
cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{dcc})
|
||||
decryptConfig = cc.DecryptConfig
|
||||
}
|
||||
|
||||
return decryptConfig, nil
|
||||
}
|
||||
|
||||
// ParseRestartPolicy parses the value given to the --restart flag and returns the policy
|
||||
// and restart retries value
|
||||
func ParseRestartPolicy(policy string) (string, uint, error) {
|
||||
|
@ -263,19 +263,33 @@ skip_if_remote "different error messages between podman & podman-remote" \
|
||||
###############################################################################
|
||||
# BEGIN temporary workarounds that must be reevaluated periodically
|
||||
|
||||
# 2023-06-27 confirmed this is still broken, main @ 3794d067e
|
||||
skip_if_remote "FIXME: can this test be made to work under podman-remote?" \
|
||||
"bud-with-mount-cache-like-buildkit-verify-default-selinux-option"
|
||||
|
||||
# 2023-06-27 confirmed these are still broken, main @ 3794d067e
|
||||
skip_if_rootless_remote "FIXME: #17788 tar + namespaces over http" \
|
||||
"bud-http-context-with-Dockerfile" \
|
||||
"bud-http-context-dir-with-Dockerfile" \
|
||||
"bud-http-context-with-Containerfile"
|
||||
|
||||
# 2023-06-27 confirmed these are still broken, main @ 3794d067e
|
||||
skip_if_rootless_remote "FIXME: not sure if 17788 or some other bug" \
|
||||
"bud-github-context" \
|
||||
"bud with Dockerfile from stdin tar" \
|
||||
"build-with-network-test"
|
||||
|
||||
# 2023-06-27 UPDATE: this seems to be fixed? Maybe we can remove this skip?
|
||||
# 2023-05-04 fails with:
|
||||
# Error: creating build container: initializing source docker://debian:testing-slim: reading manifest testing-slim in quay.io/libpod/debian: manifest unknown
|
||||
#skip_if_remote "FIXME: 2023-05-04: new code, fails in podman-remote" \
|
||||
# "bud-verify-if-we-dont-clean-prexisting-path"
|
||||
|
||||
# 2023-06-27 confirmed this is still broken, main @ 3794d067e
|
||||
# 2023-06-13 buildah 4746 changed exit code & expected error message
|
||||
skip "FIXME: 2023-06-13 buildah PR 4746 broke this test" \
|
||||
"bud with encrypted FROM image"
|
||||
|
||||
# END temporary workarounds that must be reevaluated periodically
|
||||
###############################################################################
|
||||
|
||||
|
@ -451,7 +451,6 @@ RUN touch /file
|
||||
Expect(output).To(ContainSubstring("Copying blob "))
|
||||
Expect(output).To(ContainSubstring("Copying config "))
|
||||
Expect(output).To(ContainSubstring("Writing manifest to image destination"))
|
||||
Expect(output).To(ContainSubstring("Storing signatures"))
|
||||
|
||||
push = podmanTest.Podman([]string{"manifest", "push", "--compression-format=gzip", "--compression-level=2", "--tls-verify=false", "--creds=podmantest:wrongpasswd", "foo", "localhost:" + registry.Port + "/credstest"})
|
||||
push.WaitWithDefaultTimeout()
|
||||
|
@ -114,7 +114,6 @@ var _ = Describe("Podman push", func() {
|
||||
Expect(output).To(ContainSubstring("Copying blob "))
|
||||
Expect(output).To(ContainSubstring("Copying config "))
|
||||
Expect(output).To(ContainSubstring("Writing manifest to image destination"))
|
||||
Expect(output).To(ContainSubstring("Storing signatures"))
|
||||
|
||||
bitSize := 1024
|
||||
keyFileName := filepath.Join(podmanTest.TempDir, "key")
|
||||
|
@ -321,7 +321,7 @@ Deleted: $pauseID"
|
||||
# Without -q: verbose output, but only on podman-local, not remote
|
||||
run_podman commit my-container --format docker -m comment my-test-image1
|
||||
if ! is_remote; then
|
||||
assert "$output" =~ "Getting image.*Writing manif.*Storing signatu" \
|
||||
assert "$output" =~ "Getting image.*Writing manif" \
|
||||
"Without -q, verbose output"
|
||||
fi
|
||||
|
||||
|
@ -116,7 +116,7 @@ verify_iid_and_name() {
|
||||
|
||||
# Copy it there.
|
||||
run_podman image scp $newname ${notme}@localhost::
|
||||
is "$output" "Copying blob .*Copying config.*Writing manifest.*Storing signatures"
|
||||
is "$output" "Copying blob .*Copying config.*Writing manifest"
|
||||
|
||||
# confirm that image was copied. FIXME: also try $PODMAN image inspect?
|
||||
_sudo $PODMAN image exists $newname
|
||||
|
@ -9,4 +9,4 @@ name = "go"
|
||||
enabled = true
|
||||
|
||||
[analyzers.meta]
|
||||
import_path = "github.com/imdario/mergo"
|
||||
import_path = "dario.cat/mergo"
|
@ -46,13 +46,19 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the
|
||||
|
||||
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
|
||||
|
||||
### Important note
|
||||
### Important notes
|
||||
|
||||
#### 1.0.0
|
||||
|
||||
In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`.
|
||||
|
||||
#### 0.3.9
|
||||
|
||||
Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules.
|
||||
|
||||
Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
|
||||
|
||||
If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
|
||||
If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u dario.cat/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
|
||||
|
||||
### Donations
|
||||
|
||||
@ -110,11 +116,11 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont
|
||||
|
||||
## Install
|
||||
|
||||
go get github.com/imdario/mergo
|
||||
go get dario.cat/mergo
|
||||
|
||||
// use in your .go code
|
||||
import (
|
||||
"github.com/imdario/mergo"
|
||||
"dario.cat/mergo"
|
||||
)
|
||||
|
||||
## Usage
|
||||
@ -152,7 +158,7 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/imdario/mergo"
|
||||
"dario.cat/mergo"
|
||||
)
|
||||
|
||||
type Foo struct {
|
||||
@ -188,9 +194,9 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/imdario/mergo"
|
||||
"reflect"
|
||||
"time"
|
||||
"dario.cat/mergo"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type timeTransformer struct {
|
@ -8,30 +8,36 @@ A helper to merge structs and maps in Golang. Useful for configuration default v
|
||||
|
||||
Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
|
||||
|
||||
Status
|
||||
# Status
|
||||
|
||||
It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc.
|
||||
|
||||
Important note
|
||||
# Important notes
|
||||
|
||||
1.0.0
|
||||
|
||||
In 1.0.0 Mergo moves to a vanity URL `dario.cat/mergo`.
|
||||
|
||||
0.3.9
|
||||
|
||||
Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules.
|
||||
|
||||
Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code.
|
||||
|
||||
If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
|
||||
If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u dario.cat/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
|
||||
|
||||
Install
|
||||
# Install
|
||||
|
||||
Do your usual installation procedure:
|
||||
|
||||
go get github.com/imdario/mergo
|
||||
go get dario.cat/mergo
|
||||
|
||||
// use in your .go code
|
||||
import (
|
||||
"github.com/imdario/mergo"
|
||||
)
|
||||
// use in your .go code
|
||||
import (
|
||||
"dario.cat/mergo"
|
||||
)
|
||||
|
||||
Usage
|
||||
# Usage
|
||||
|
||||
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
|
||||
|
||||
@ -59,7 +65,7 @@ Here is a nice example:
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/imdario/mergo"
|
||||
"dario.cat/mergo"
|
||||
)
|
||||
|
||||
type Foo struct {
|
||||
@ -81,7 +87,7 @@ Here is a nice example:
|
||||
// {two 2}
|
||||
}
|
||||
|
||||
Transformers
|
||||
# Transformers
|
||||
|
||||
Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time?
|
||||
|
||||
@ -89,9 +95,9 @@ Transformers allow to merge specific types differently than in the default behav
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/imdario/mergo"
|
||||
"reflect"
|
||||
"time"
|
||||
"dario.cat/mergo"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type timeTransformer struct {
|
||||
@ -127,17 +133,16 @@ Transformers allow to merge specific types differently than in the default behav
|
||||
// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
|
||||
}
|
||||
|
||||
Contact me
|
||||
# Contact me
|
||||
|
||||
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario
|
||||
|
||||
About
|
||||
# About
|
||||
|
||||
Written by Dario Castañé: https://da.rio.hn
|
||||
|
||||
License
|
||||
# License
|
||||
|
||||
BSD 3-Clause license, as Go language.
|
||||
|
||||
*/
|
||||
package mergo
|
28
vendor/github.com/containers/buildah/.packit.sh
generated
vendored
28
vendor/github.com/containers/buildah/.packit.sh
generated
vendored
@ -1,28 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script handles any custom processing of the spec file generated using the `post-upstream-clone`
|
||||
# action and gets used by the fix-spec-file action in .packit.yaml.
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
# Get Version from define/types.go in HEAD
|
||||
VERSION=$(grep ^$'\tVersion' define/types.go | cut -d\" -f2 | sed -e 's/-/~/')
|
||||
|
||||
# Generate source tarball from HEAD
|
||||
git archive --prefix=buildah-$VERSION/ -o buildah-$VERSION.tar.gz HEAD
|
||||
|
||||
# RPM Spec modifications
|
||||
|
||||
# Use the Version from define/types.go in rpm spec
|
||||
sed -i "s/^Version:.*/Version: $VERSION/" buildah.spec
|
||||
|
||||
# Use Packit's supplied variable in the Release field in rpm spec.
|
||||
# buildah.spec is generated using `rpkg spec --outdir ./` as mentioned in the
|
||||
# `post-upstream-clone` action in .packit.yaml.
|
||||
sed -i "s/^Release:.*/Release: $PACKIT_RPMSPEC_RELEASE%{?dist}/" buildah.spec
|
||||
|
||||
# Use above generated tarball as Source in rpm spec
|
||||
sed -i "s/^Source:.*.tar.gz/Source: buildah-$VERSION.tar.gz/" buildah.spec
|
||||
|
||||
# Use the right build dir for autosetup stage in rpm spec
|
||||
sed -i "s/^%setup.*/%autosetup -Sgit -n %{name}-$VERSION/" buildah.spec
|
30
vendor/github.com/containers/buildah/.packit.yaml
generated
vendored
30
vendor/github.com/containers/buildah/.packit.yaml
generated
vendored
@ -4,8 +4,11 @@
|
||||
|
||||
# Build targets can be found at:
|
||||
# https://copr.fedorainfracloud.org/coprs/rhcontainerbot/packit-builds/
|
||||
# and
|
||||
# https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/
|
||||
|
||||
specfile_path: buildah.spec
|
||||
specfile_path: rpm/buildah.spec
|
||||
upstream_tag_template: v{version}
|
||||
|
||||
jobs:
|
||||
- &copr
|
||||
@ -16,15 +19,28 @@ jobs:
|
||||
enable_net: true
|
||||
srpm_build_deps:
|
||||
- make
|
||||
- rpkg
|
||||
actions:
|
||||
post-upstream-clone:
|
||||
- "rpkg spec --outdir ./"
|
||||
fix-spec-file:
|
||||
- "bash .packit.sh"
|
||||
|
||||
- <<: *copr
|
||||
# Run on commit to main branch
|
||||
trigger: commit
|
||||
branch: main
|
||||
project: podman-next
|
||||
|
||||
- job: propose_downstream
|
||||
trigger: release
|
||||
update_release: false
|
||||
dist_git_branches:
|
||||
- fedora-all
|
||||
actions:
|
||||
pre-sync:
|
||||
- "bash rpm/update-spec-provides.sh"
|
||||
|
||||
- job: koji_build
|
||||
trigger: commit
|
||||
dist_git_branches:
|
||||
- fedora-all
|
||||
|
||||
- job: bodhi_update
|
||||
trigger: commit
|
||||
dist_git_branches:
|
||||
- fedora-branched # rawhide updates are created automatically
|
||||
|
4
vendor/github.com/containers/buildah/Makefile
generated
vendored
4
vendor/github.com/containers/buildah/Makefile
generated
vendored
@ -1,7 +1,7 @@
|
||||
export GOPROXY=https://proxy.golang.org
|
||||
|
||||
APPARMORTAG := $(shell hack/apparmor_tag.sh)
|
||||
STORAGETAGS := $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./libdm_tag.sh) $(shell ./hack/libsubid_tag.sh)
|
||||
STORAGETAGS := exclude_graphdriver_devicemapper $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./hack/libsubid_tag.sh)
|
||||
SECURITYTAGS ?= seccomp $(APPARMORTAG)
|
||||
TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) $(shell ./hack/systemd_tag.sh)
|
||||
BUILDTAGS += $(TAGS)
|
||||
@ -27,7 +27,7 @@ RACEFLAGS := $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race
|
||||
COMMIT_NO ?= $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
GIT_COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),${COMMIT_NO}-dirty,${COMMIT_NO})
|
||||
SOURCE_DATE_EPOCH ?= $(if $(shell date +%s),$(shell date +%s),$(error "date failed"))
|
||||
STATIC_STORAGETAGS = "containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)"
|
||||
STATIC_STORAGETAGS = "containers_image_openpgp $(STORAGE_TAGS)"
|
||||
|
||||
# we get GNU make 3.x in MacOS build envs, which wants # to be escaped in
|
||||
# strings, while the 4.x we have on Linux doesn't. this is the documented
|
||||
|
165
vendor/github.com/containers/buildah/buildah.spec.rpkg
generated
vendored
165
vendor/github.com/containers/buildah/buildah.spec.rpkg
generated
vendored
@ -1,165 +0,0 @@
|
||||
# For automatic rebuilds in COPR
|
||||
|
||||
# The following tag is to get correct syntax highlighting for this file in vim text editor
|
||||
# vim: syntax=spec
|
||||
|
||||
# Any additinoal comments should go below this line or else syntax highlighting
|
||||
# may not work.
|
||||
|
||||
# CAUTION: This is not a replacement for RPMs provided by your distro.
|
||||
# Only intended to build and test the latest unreleased changes.
|
||||
|
||||
%global with_debug 1
|
||||
|
||||
# RHEL 8's default %%gobuild macro doesn't account for the BUILDTAGS variable, so we
|
||||
# set it separately here and do not depend on RHEL 8's go-srpm-macros package.
|
||||
%if !0%{?fedora} && 0%{?rhel} <= 8
|
||||
%define gobuild(o:) GO111MODULE=off go build -buildmode pie -compiler gc -tags="rpm_crashtraceback libtrust_openssl ${BUILDTAGS:-}" -ldflags "-linkmode=external -compressdwarf=false ${LDFLAGS:-} -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n') -extldflags '%__global_ldflags'" -a -v -x %{?**};
|
||||
%endif
|
||||
|
||||
%if 0%{?with_debug}
|
||||
%global _find_debuginfo_dwz_opts %{nil}
|
||||
%global _dwz_low_mem_die_limit 0
|
||||
%else
|
||||
%global debug_package %{nil}
|
||||
%endif
|
||||
|
||||
%global provider github
|
||||
%global provider_tld com
|
||||
%global project containers
|
||||
%global repo %{name}
|
||||
# https://github.com/containers/%%{name}
|
||||
%global import_path %{provider}.%{provider_tld}/%{project}/%{repo}
|
||||
%global git0 https://%{import_path}
|
||||
|
||||
Name: {{{ git_dir_name }}}
|
||||
Epoch: 101
|
||||
Version: {{{ git_dir_version }}}
|
||||
Release: 1%{?dist}
|
||||
Summary: Manage Pods, Containers and Container Images
|
||||
License: ASL 2.0
|
||||
URL: https://github.com/containers/buildah
|
||||
VCS: {{{ git_dir_vcs }}}
|
||||
Source: {{{ git_dir_pack }}}
|
||||
BuildRequires: device-mapper-devel
|
||||
BuildRequires: git-core
|
||||
BuildRequires: golang
|
||||
BuildRequires: glib2-devel
|
||||
BuildRequires: glibc-static
|
||||
BuildRequires: go-md2man
|
||||
%if 0%{?fedora} || 0%{?rhel} >= 9
|
||||
BuildRequires: go-rpm-macros
|
||||
%endif
|
||||
BuildRequires: gpgme-devel
|
||||
BuildRequires: libassuan-devel
|
||||
BuildRequires: make
|
||||
BuildRequires: ostree-devel
|
||||
BuildRequires: shadow-utils-subid-devel
|
||||
%if 0%{?fedora} && ! 0%{?rhel}
|
||||
BuildRequires: btrfs-progs-devel
|
||||
%endif
|
||||
Requires: containers-common-extra >= 4:1-78
|
||||
%if 0%{?rhel}
|
||||
BuildRequires: libseccomp-devel
|
||||
%else
|
||||
BuildRequires: libseccomp-static
|
||||
%endif
|
||||
Requires: libseccomp
|
||||
Suggests: cpp
|
||||
|
||||
%description
|
||||
The %{name} package provides a command line tool which can be used to
|
||||
* create a working container from scratch
|
||||
or
|
||||
* create a working container from an image as a starting point
|
||||
* mount/umount a working container's root file system for manipulation
|
||||
* save container's root file system layer to create a new image
|
||||
* delete a working container or an image.
|
||||
|
||||
%package tests
|
||||
Summary: Tests for %{name}
|
||||
Requires: %{name} = %{version}-%{release}
|
||||
Requires: bats
|
||||
Requires: bzip2
|
||||
Requires: podman
|
||||
Requires: golang
|
||||
Requires: jq
|
||||
Requires: httpd-tools
|
||||
Requires: openssl
|
||||
Requires: nmap-ncat
|
||||
Requires: git-daemon
|
||||
|
||||
%description tests
|
||||
%{summary}
|
||||
|
||||
This package contains system tests for %{name}
|
||||
|
||||
%prep
|
||||
{{{ git_dir_setup_macro }}}
|
||||
|
||||
%build
|
||||
%set_build_flags
|
||||
export GOPATH=$(pwd)/_build:$(pwd)
|
||||
export CGO_CFLAGS=$CFLAGS
|
||||
# These extra flags present in $CFLAGS have been skipped for now as they break the build
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-flto=auto//g')
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-Wp,D_GLIBCXX_ASSERTIONS//g')
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-specs=\/usr\/lib\/rpm\/redhat\/redhat-annobin-cc1//g')
|
||||
|
||||
%ifarch x86_64
|
||||
export CGO_CFLAGS+=" -m64 -mtune=generic -fcf-protection=full"
|
||||
%endif
|
||||
|
||||
mkdir _build
|
||||
pushd _build
|
||||
mkdir -p src/%{provider}.%{provider_tld}/%{project}
|
||||
ln -s $(dirs +1 -l) src/%{import_path}
|
||||
popd
|
||||
|
||||
mv vendor src
|
||||
|
||||
export CNI_VERSION=`grep '^# github.com/containernetworking/cni ' src/modules.txt | sed 's,.* ,,'`
|
||||
export LDFLAGS="-X main.buildInfo=`date +%s` -X main.cniVersion=${CNI_VERSION}"
|
||||
|
||||
export BUILDTAGS="$(hack/libsubid_tag.sh) seccomp selinux $(hack/systemd_tag.sh)"
|
||||
%if 0%{?rhel}
|
||||
export BUILDTAGS="$BUILDTAGS exclude_graphdriver_btrfs btrfs_noversion"
|
||||
%endif
|
||||
|
||||
%gobuild -o bin/%{name} %{import_path}/cmd/%{name}
|
||||
%gobuild -o bin/imgtype %{import_path}/tests/imgtype
|
||||
%gobuild -o bin/copy %{import_path}/tests/copy
|
||||
GOMD2MAN=go-md2man %{__make} -C docs
|
||||
|
||||
# This will copy the files generated by the `make` command above into
|
||||
# the installable rpm package.
|
||||
%install
|
||||
export GOPATH=$(pwd)/_build:$(pwd):%{gopath}
|
||||
make DESTDIR=%{buildroot} PREFIX=%{_prefix} install install.completions
|
||||
make DESTDIR=%{buildroot} PREFIX=%{_prefix} -C docs install
|
||||
|
||||
install -d -p %{buildroot}/%{_datadir}/%{name}/test/system
|
||||
cp -pav tests/. %{buildroot}/%{_datadir}/%{name}/test/system
|
||||
cp bin/imgtype %{buildroot}/%{_bindir}/%{name}-imgtype
|
||||
cp bin/copy %{buildroot}/%{_bindir}/%{name}-copy
|
||||
|
||||
rm -f %{buildroot}%{_mandir}/man5/{Containerfile.5*,containerignore.5*}
|
||||
|
||||
|
||||
%files
|
||||
%license LICENSE
|
||||
%doc README.md
|
||||
%{_bindir}/%{name}
|
||||
%{_mandir}/man1/%{name}*
|
||||
%dir %{_datadir}/bash-completion
|
||||
%dir %{_datadir}/bash-completion/completions
|
||||
%{_datadir}/bash-completion/completions/%{name}
|
||||
|
||||
%files tests
|
||||
%license LICENSE
|
||||
%{_bindir}/%{name}-imgtype
|
||||
%{_bindir}/%{name}-copy
|
||||
%{_datadir}/%{name}/test
|
||||
|
||||
%changelog
|
||||
{{{ git_dir_changelog }}}
|
4
vendor/github.com/containers/buildah/chroot/run_common.go
generated
vendored
4
vendor/github.com/containers/buildah/chroot/run_common.go
generated
vendored
@ -501,6 +501,10 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io
|
||||
// Apologize for the namespace configuration that we're about to ignore.
|
||||
logNamespaceDiagnostics(spec)
|
||||
|
||||
// We need to lock the thread so that PR_SET_PDEATHSIG won't trigger if the current thread exits.
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
// Start the parent subprocess.
|
||||
cmd := unshare.Command(append([]string{runUsingChrootExecCommand}, spec.Process.Args...)...)
|
||||
setPdeathsig(cmd.Cmd)
|
||||
|
2
vendor/github.com/containers/buildah/copier/copier.go
generated
vendored
2
vendor/github.com/containers/buildah/copier/copier.go
generated
vendored
@ -1721,7 +1721,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
|
||||
// no type flag for sockets
|
||||
default:
|
||||
return fmt.Errorf("unrecognized Typeflag %c", hdr.Typeflag)
|
||||
case tar.TypeReg, tar.TypeRegA:
|
||||
case tar.TypeReg:
|
||||
var written int64
|
||||
written, err = createFile(path, tr)
|
||||
// only check the length if there wasn't an error, which we'll
|
||||
|
5
vendor/github.com/containers/buildah/imagebuildah/build.go
generated
vendored
5
vendor/github.com/containers/buildah/imagebuildah/build.go
generated
vendored
@ -17,6 +17,7 @@ import (
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containers/buildah/define"
|
||||
internalUtil "github.com/containers/buildah/internal/util"
|
||||
"github.com/containers/buildah/pkg/parse"
|
||||
"github.com/containers/buildah/util"
|
||||
"github.com/containers/common/libimage"
|
||||
"github.com/containers/common/pkg/config"
|
||||
@ -65,7 +66,9 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
|
||||
if options.CommonBuildOpts == nil {
|
||||
options.CommonBuildOpts = &define.CommonBuildOptions{}
|
||||
}
|
||||
|
||||
if err := parse.Volumes(options.CommonBuildOpts.Volumes); err != nil {
|
||||
return "", nil, fmt.Errorf("validating volumes: %w", err)
|
||||
}
|
||||
if len(paths) == 0 {
|
||||
return "", nil, errors.New("building: no dockerfiles specified")
|
||||
}
|
||||
|
50
vendor/github.com/containers/buildah/imagebuildah/executor.go
generated
vendored
50
vendor/github.com/containers/buildah/imagebuildah/executor.go
generated
vendored
@ -470,34 +470,34 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE
|
||||
output := ""
|
||||
if stageIndex == len(stages)-1 {
|
||||
output = b.output
|
||||
}
|
||||
// Check if any labels were passed in via the API, and add a final line
|
||||
// to the Dockerfile that would provide the same result.
|
||||
// Reason: Docker adds label modification as a last step which can be
|
||||
// processed like regular steps, and if no modification is done to
|
||||
// layers, its easier to re-use cached layers.
|
||||
if len(b.labels) > 0 {
|
||||
var labelLine string
|
||||
labels := append([]string{}, b.labels...)
|
||||
for _, labelSpec := range labels {
|
||||
label := strings.SplitN(labelSpec, "=", 2)
|
||||
key := label[0]
|
||||
value := ""
|
||||
if len(label) > 1 {
|
||||
value = label[1]
|
||||
// Check if any labels were passed in via the API, and add a final line
|
||||
// to the Dockerfile that would provide the same result.
|
||||
// Reason: Docker adds label modification as a last step which can be
|
||||
// processed like regular steps, and if no modification is done to
|
||||
// layers, its easier to re-use cached layers.
|
||||
if len(b.labels) > 0 {
|
||||
var labelLine string
|
||||
labels := append([]string{}, b.labels...)
|
||||
for _, labelSpec := range labels {
|
||||
label := strings.SplitN(labelSpec, "=", 2)
|
||||
key := label[0]
|
||||
value := ""
|
||||
if len(label) > 1 {
|
||||
value = label[1]
|
||||
}
|
||||
// check only for an empty key since docker allows empty values
|
||||
if key != "" {
|
||||
labelLine += fmt.Sprintf(" %q=%q", key, value)
|
||||
}
|
||||
}
|
||||
// check only for an empty key since docker allows empty values
|
||||
if key != "" {
|
||||
labelLine += fmt.Sprintf(" %q=%q", key, value)
|
||||
if len(labelLine) > 0 {
|
||||
additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader("LABEL" + labelLine + "\n"))
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("while adding additional LABEL step: %w", err)
|
||||
}
|
||||
stage.Node.Children = append(stage.Node.Children, additionalNode.Children...)
|
||||
}
|
||||
}
|
||||
if len(labelLine) > 0 {
|
||||
additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader("LABEL" + labelLine + "\n"))
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("while adding additional LABEL step: %w", err)
|
||||
}
|
||||
stage.Node.Children = append(stage.Node.Children, additionalNode.Children...)
|
||||
}
|
||||
}
|
||||
|
||||
// If this stage is starting out with environment variables that were
|
||||
|
10
vendor/github.com/containers/buildah/install.md
generated
vendored
10
vendor/github.com/containers/buildah/install.md
generated
vendored
@ -161,7 +161,6 @@ Prior to installing Buildah, install the following packages on your Linux distro
|
||||
* bats
|
||||
* btrfs-progs-devel
|
||||
* bzip2
|
||||
* device-mapper-devel
|
||||
* git
|
||||
* go-md2man
|
||||
* gpgme-devel
|
||||
@ -181,7 +180,6 @@ In Fedora, you can use this command:
|
||||
golang \
|
||||
bats \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel \
|
||||
glib2-devel \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
@ -216,7 +214,6 @@ In RHEL and CentOS, run this command to install the build dependencies:
|
||||
golang \
|
||||
bats \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel \
|
||||
glib2-devel \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
@ -242,7 +239,6 @@ On openSUSE Tumbleweed, install go via `zypper in go`, then run this command:
|
||||
bzip2 \
|
||||
libgpgme-devel \
|
||||
libseccomp-devel \
|
||||
device-mapper-devel \
|
||||
libbtrfs-devel \
|
||||
go-md2man
|
||||
```
|
||||
@ -256,7 +252,7 @@ In Ubuntu jammy you can use these commands:
|
||||
|
||||
```
|
||||
sudo apt-get -y -qq update
|
||||
sudo apt-get -y install bats btrfs-progs git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo go-md2man make
|
||||
sudo apt-get -y install bats btrfs-progs git libapparmor-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo go-md2man make
|
||||
sudo apt-get -y install golang-1.18
|
||||
```
|
||||
Then to install Buildah on Ubuntu follow the steps in this example:
|
||||
@ -282,7 +278,7 @@ sudo gpg --export 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D >> /usr/share/keyri
|
||||
sudo echo 'deb [signed-by=/usr/share/keyrings/projectatomic-ppa.gpg] http://ppa.launchpad.net/projectatomic/ppa/ubuntu zesty main' > /etc/apt/sources.list.d/projectatomic-ppa.list
|
||||
sudo apt update
|
||||
sudo apt -y install -t stretch-backports golang
|
||||
sudo apt -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man
|
||||
sudo apt -y install bats btrfs-tools git libapparmor-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man
|
||||
```
|
||||
|
||||
The build steps on Debian are otherwise the same as Ubuntu, above.
|
||||
@ -319,7 +315,7 @@ cat /etc/containers/registries.conf
|
||||
# and 'registries.block'.
|
||||
|
||||
[registries.search]
|
||||
registries = ['docker.io', 'registry.fedoraproject.org', 'quay.io', 'registry.access.redhat.com', 'registry.centos.org']
|
||||
registries = ['docker.io', 'registry.fedoraproject.org', 'quay.io', 'registry.access.redhat.com']
|
||||
|
||||
# If you need to access insecure registries, add the registry's fully-qualified name.
|
||||
# An insecure registry is one that does not have a valid SSL certificate or only does HTTP.
|
||||
|
56
vendor/github.com/containers/buildah/internal/util/util.go
generated
vendored
56
vendor/github.com/containers/buildah/internal/util/util.go
generated
vendored
@ -8,9 +8,8 @@ import (
|
||||
|
||||
"github.com/containers/buildah/define"
|
||||
"github.com/containers/common/libimage"
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/image/v5/types"
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
enchelpers "github.com/containers/ocicrypt/helpers"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
@ -56,6 +55,13 @@ func GetTempDir() string {
|
||||
if tmpdir, ok := os.LookupEnv("TMPDIR"); ok {
|
||||
return tmpdir
|
||||
}
|
||||
containerConfig, err := config.Default()
|
||||
if err != nil {
|
||||
tmpdir, err := containerConfig.ImageCopyTmpDir()
|
||||
if err != nil {
|
||||
return tmpdir
|
||||
}
|
||||
}
|
||||
return "/var/tmp"
|
||||
}
|
||||
|
||||
@ -106,49 +112,3 @@ func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DecryptConfig translates decryptionKeys into a DescriptionConfig structure
|
||||
func DecryptConfig(decryptionKeys []string) (*encconfig.DecryptConfig, error) {
|
||||
decryptConfig := &encconfig.DecryptConfig{}
|
||||
if len(decryptionKeys) > 0 {
|
||||
// decryption
|
||||
dcc, err := enchelpers.CreateCryptoConfig([]string{}, decryptionKeys)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid decryption keys: %w", err)
|
||||
}
|
||||
cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{dcc})
|
||||
decryptConfig = cc.DecryptConfig
|
||||
}
|
||||
|
||||
return decryptConfig, nil
|
||||
}
|
||||
|
||||
// EncryptConfig translates encryptionKeys into a EncriptionsConfig structure
|
||||
func EncryptConfig(encryptionKeys []string, encryptLayers []int) (*encconfig.EncryptConfig, *[]int, error) {
|
||||
var encLayers *[]int
|
||||
var encConfig *encconfig.EncryptConfig
|
||||
|
||||
if len(encryptionKeys) > 0 {
|
||||
// encryption
|
||||
encLayers = &encryptLayers
|
||||
ecc, err := enchelpers.CreateCryptoConfig(encryptionKeys, []string{})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("invalid encryption keys: %w", err)
|
||||
}
|
||||
cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{ecc})
|
||||
encConfig = cc.EncryptConfig
|
||||
}
|
||||
return encConfig, encLayers, nil
|
||||
}
|
||||
|
||||
// GetFormat translates format string into either docker or OCI format constant
|
||||
func GetFormat(format string) (string, error) {
|
||||
switch format {
|
||||
case define.OCI:
|
||||
return define.OCIv1ImageManifest, nil
|
||||
case define.DOCKER:
|
||||
return define.Dockerv2ImageManifest, nil
|
||||
default:
|
||||
return "", fmt.Errorf("unrecognized image type %q", format)
|
||||
}
|
||||
}
|
||||
|
15
vendor/github.com/containers/buildah/libdm_tag.sh
generated
vendored
15
vendor/github.com/containers/buildah/libdm_tag.sh
generated
vendored
@ -1,15 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
tmpdir="$PWD/tmp.$RANDOM"
|
||||
mkdir -p "$tmpdir"
|
||||
trap 'rm -fr "$tmpdir"' EXIT
|
||||
${CC:-cc} ${CFLAGS} ${CPPFLAGS} ${LDFLAGS} -o "$tmpdir"/libdm_tag -x c - -ldevmapper > /dev/null 2> /dev/null << EOF
|
||||
#include <libdevmapper.h>
|
||||
int main() {
|
||||
struct dm_task *task;
|
||||
dm_task_deferred_remove(task);
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
if test $? -ne 0 ; then
|
||||
echo libdm_no_deferred_remove
|
||||
fi
|
7
vendor/github.com/containers/buildah/pkg/cli/build.go
generated
vendored
7
vendor/github.com/containers/buildah/pkg/cli/build.go
generated
vendored
@ -14,7 +14,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/buildah/define"
|
||||
iutil "github.com/containers/buildah/internal/util"
|
||||
"github.com/containers/buildah/pkg/parse"
|
||||
"github.com/containers/buildah/pkg/util"
|
||||
"github.com/containers/common/pkg/auth"
|
||||
@ -135,7 +134,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
|
||||
}
|
||||
|
||||
containerfiles := getContainerfiles(iopts.File)
|
||||
format, err := iutil.GetFormat(iopts.Format)
|
||||
format, err := GetFormat(iopts.Format)
|
||||
if err != nil {
|
||||
return options, nil, nil, err
|
||||
}
|
||||
@ -272,7 +271,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
|
||||
return options, nil, nil, err
|
||||
}
|
||||
|
||||
decryptConfig, err := iutil.DecryptConfig(iopts.DecryptionKeys)
|
||||
decryptConfig, err := DecryptConfig(iopts.DecryptionKeys)
|
||||
if err != nil {
|
||||
return options, nil, nil, fmt.Errorf("unable to obtain decrypt config: %w", err)
|
||||
}
|
||||
@ -433,7 +432,7 @@ func readBuildArgFile(buildargfile string, args map[string]string) error {
|
||||
return err
|
||||
}
|
||||
for _, arg := range strings.Split(string(argfile), "\n") {
|
||||
if len (arg) == 0 || arg[0] == '#' {
|
||||
if len(arg) == 0 || arg[0] == '#' {
|
||||
continue
|
||||
}
|
||||
readBuildArg(arg, args)
|
||||
|
48
vendor/github.com/containers/buildah/pkg/cli/common.go
generated
vendored
48
vendor/github.com/containers/buildah/pkg/cli/common.go
generated
vendored
@ -15,6 +15,8 @@ import (
|
||||
"github.com/containers/buildah/pkg/parse"
|
||||
commonComp "github.com/containers/common/pkg/completion"
|
||||
"github.com/containers/common/pkg/config"
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
enchelpers "github.com/containers/ocicrypt/helpers"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/spf13/pflag"
|
||||
@ -523,3 +525,49 @@ func LookupEnvVarReferences(specs, environ []string) []string {
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// DecryptConfig translates decryptionKeys into a DescriptionConfig structure
|
||||
func DecryptConfig(decryptionKeys []string) (*encconfig.DecryptConfig, error) {
|
||||
var decryptConfig *encconfig.DecryptConfig
|
||||
if len(decryptionKeys) > 0 {
|
||||
// decryption
|
||||
dcc, err := enchelpers.CreateCryptoConfig([]string{}, decryptionKeys)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid decryption keys: %w", err)
|
||||
}
|
||||
cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{dcc})
|
||||
decryptConfig = cc.DecryptConfig
|
||||
}
|
||||
|
||||
return decryptConfig, nil
|
||||
}
|
||||
|
||||
// EncryptConfig translates encryptionKeys into a EncriptionsConfig structure
|
||||
func EncryptConfig(encryptionKeys []string, encryptLayers []int) (*encconfig.EncryptConfig, *[]int, error) {
|
||||
var encLayers *[]int
|
||||
var encConfig *encconfig.EncryptConfig
|
||||
|
||||
if len(encryptionKeys) > 0 {
|
||||
// encryption
|
||||
encLayers = &encryptLayers
|
||||
ecc, err := enchelpers.CreateCryptoConfig(encryptionKeys, []string{})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("invalid encryption keys: %w", err)
|
||||
}
|
||||
cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{ecc})
|
||||
encConfig = cc.EncryptConfig
|
||||
}
|
||||
return encConfig, encLayers, nil
|
||||
}
|
||||
|
||||
// GetFormat translates format string into either docker or OCI format constant
|
||||
func GetFormat(format string) (string, error) {
|
||||
switch format {
|
||||
case define.OCI:
|
||||
return define.OCIv1ImageManifest, nil
|
||||
case define.DOCKER:
|
||||
return define.Dockerv2ImageManifest, nil
|
||||
default:
|
||||
return "", fmt.Errorf("unrecognized image type %q", format)
|
||||
}
|
||||
}
|
||||
|
9
vendor/github.com/containers/buildah/pkg/parse/parse.go
generated
vendored
9
vendor/github.com/containers/buildah/pkg/parse/parse.go
generated
vendored
@ -17,6 +17,7 @@ import (
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containers/buildah/define"
|
||||
internalParse "github.com/containers/buildah/internal/parse"
|
||||
internalUtil "github.com/containers/buildah/internal/util"
|
||||
"github.com/containers/buildah/pkg/sshagent"
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/common/pkg/parse"
|
||||
@ -154,9 +155,6 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
|
||||
return nil, fmt.Errorf("invalid --shm-size: %w", err)
|
||||
}
|
||||
volumes, _ := flags.GetStringArray("volume")
|
||||
if err := Volumes(volumes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuPeriod, _ := flags.GetUint64("cpu-period")
|
||||
cpuQuota, _ := flags.GetInt64("cpu-quota")
|
||||
cpuShares, _ := flags.GetUint64("cpu-shares")
|
||||
@ -999,10 +997,7 @@ func isValidDeviceMode(mode string) bool {
|
||||
}
|
||||
|
||||
func GetTempDir() string {
|
||||
if tmpdir, ok := os.LookupEnv("TMPDIR"); ok {
|
||||
return tmpdir
|
||||
}
|
||||
return "/var/tmp"
|
||||
return internalUtil.GetTempDir()
|
||||
}
|
||||
|
||||
// Secrets parses the --secret flag
|
||||
|
135
vendor/github.com/containers/buildah/run_common.go
generated
vendored
135
vendor/github.com/containers/buildah/run_common.go
generated
vendored
@ -35,6 +35,7 @@ import (
|
||||
"github.com/containers/common/libnetwork/network"
|
||||
"github.com/containers/common/libnetwork/resolvconf"
|
||||
netTypes "github.com/containers/common/libnetwork/types"
|
||||
netUtil "github.com/containers/common/libnetwork/util"
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/common/pkg/subscriptions"
|
||||
imageTypes "github.com/containers/image/v5/types"
|
||||
@ -117,7 +118,7 @@ func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServe
|
||||
}
|
||||
|
||||
// generateHosts creates a containers hosts file
|
||||
func (b *Builder) generateHosts(rdir string, chownOpts *idtools.IDPair, imageRoot string) (string, error) {
|
||||
func (b *Builder) generateHosts(rdir string, chownOpts *idtools.IDPair, imageRoot string, spec *spec.Spec) (string, error) {
|
||||
conf, err := config.Default()
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -128,12 +129,34 @@ func (b *Builder) generateHosts(rdir string, chownOpts *idtools.IDPair, imageRoo
|
||||
return "", err
|
||||
}
|
||||
|
||||
var entries etchosts.HostEntries
|
||||
isHost := true
|
||||
if spec.Linux != nil {
|
||||
for _, ns := range spec.Linux.Namespaces {
|
||||
if ns.Type == specs.NetworkNamespace {
|
||||
isHost = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// add host entry for local ip when running in host network
|
||||
if spec.Hostname != "" && isHost {
|
||||
ip := netUtil.GetLocalIP()
|
||||
if ip != "" {
|
||||
entries = append(entries, etchosts.HostEntry{
|
||||
Names: []string{spec.Hostname},
|
||||
IP: ip,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
targetfile := filepath.Join(rdir, "hosts")
|
||||
if err := etchosts.New(&etchosts.Params{
|
||||
BaseFile: path,
|
||||
ExtraHosts: b.CommonBuildOpts.AddHost,
|
||||
HostContainersInternalIP: etchosts.GetHostContainersInternalIP(conf, nil, nil),
|
||||
TargetFile: targetfile,
|
||||
ContainerIPs: entries,
|
||||
}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -368,6 +391,9 @@ func checkAndOverrideIsolationOptions(isolation define.Isolation, options *RunOp
|
||||
if (pidns != nil && pidns.Host) && (userns != nil && !userns.Host) {
|
||||
return fmt.Errorf("not allowed to mix host PID namespace with container user namespace")
|
||||
}
|
||||
case IsolationChroot:
|
||||
logrus.Info("network namespace isolation not supported with chroot isolation, forcing host network")
|
||||
options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.NetworkNamespace), Host: true})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1105,8 +1131,12 @@ func runUsingRuntimeMain() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options RunOptions, configureNetwork bool, configureNetworks,
|
||||
func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options RunOptions, configureNetwork bool, networkString string,
|
||||
moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName, buildContainerName, hostsFile string) (err error) {
|
||||
// Lock the caller to a single OS-level thread.
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
var confwg sync.WaitGroup
|
||||
config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{
|
||||
Options: options,
|
||||
@ -1207,7 +1237,7 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run
|
||||
return fmt.Errorf("parsing pid %s as a number: %w", string(pidValue), err)
|
||||
}
|
||||
|
||||
teardown, netstatus, err := b.runConfigureNetwork(pid, isolation, options, configureNetworks, containerName)
|
||||
teardown, netstatus, err := b.runConfigureNetwork(pid, isolation, options, networkString, containerName)
|
||||
if teardown != nil {
|
||||
defer teardown()
|
||||
}
|
||||
@ -1217,13 +1247,7 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run
|
||||
|
||||
// only add hosts if we manage the hosts file
|
||||
if hostsFile != "" {
|
||||
var entries etchosts.HostEntries
|
||||
if netstatus != nil {
|
||||
entries = etchosts.GetNetworkHostEntries(netstatus, spec.Hostname, buildContainerName)
|
||||
} else {
|
||||
// we have slirp4netns, default to slirp4netns ip since this is not configurable in buildah
|
||||
entries = etchosts.HostEntries{{IP: "10.0.2.100", Names: []string{spec.Hostname, buildContainerName}}}
|
||||
}
|
||||
entries := etchosts.GetNetworkHostEntries(netstatus, spec.Hostname, buildContainerName)
|
||||
// make sure to sync this with (b *Builder) generateHosts()
|
||||
err = etchosts.Add(hostsFile, entries)
|
||||
if err != nil {
|
||||
@ -1328,7 +1352,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
|
||||
processGID: int(processGID),
|
||||
}
|
||||
// Get the list of mounts that are just for this Run() call.
|
||||
runMounts, mountArtifacts, err := b.runSetupRunMounts(runFileMounts, runMountInfo, idMaps)
|
||||
runMounts, mountArtifacts, err := b.runSetupRunMounts(mountPoint, runFileMounts, runMountInfo, idMaps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1464,10 +1488,28 @@ func cleanableDestinationListFromMounts(mounts []spec.Mount) []string {
|
||||
return mountDest
|
||||
}
|
||||
|
||||
func checkIfMountDestinationPreExists(root string, dest string) (bool, error) {
|
||||
statResults, err := copier.Stat(root, "", copier.StatOptions{}, []string{dest})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(statResults) > 0 {
|
||||
// We created exact path for globbing so it will
|
||||
// return only one result.
|
||||
if statResults[0].Error != "" && len(statResults[0].Globbed) == 0 {
|
||||
// Path do not exsits.
|
||||
return false, nil
|
||||
}
|
||||
// Path exists.
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// runSetupRunMounts sets up mounts that exist only in this RUN, not in subsequent runs
|
||||
//
|
||||
// If this function succeeds, the caller must unlock runMountArtifacts.TargetLocks (when??)
|
||||
func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMaps IDMaps) ([]spec.Mount, *runMountArtifacts, error) {
|
||||
func (b *Builder) runSetupRunMounts(mountPoint string, mounts []string, sources runMountInfo, idMaps IDMaps) ([]spec.Mount, *runMountArtifacts, error) {
|
||||
// If `type` is not set default to TypeBind
|
||||
mountType := define.TypeBind
|
||||
mountTargets := make([]string, 0, 10)
|
||||
@ -1485,6 +1527,11 @@ func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMap
|
||||
}
|
||||
}()
|
||||
for _, mount := range mounts {
|
||||
var mountSpec *spec.Mount
|
||||
var err error
|
||||
var envFile, image string
|
||||
var agent *sshagent.AgentServer
|
||||
var tl *lockfile.LockFile
|
||||
tokens := strings.Split(mount, ",")
|
||||
for _, field := range tokens {
|
||||
if strings.HasPrefix(field, "type=") {
|
||||
@ -1497,63 +1544,71 @@ func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMap
|
||||
}
|
||||
switch mountType {
|
||||
case "secret":
|
||||
mount, envFile, err := b.getSecretMount(tokens, sources.Secrets, idMaps, sources.WorkDir)
|
||||
mountSpec, envFile, err = b.getSecretMount(tokens, sources.Secrets, idMaps, sources.WorkDir)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if mount != nil {
|
||||
finalMounts = append(finalMounts, *mount)
|
||||
mountTargets = append(mountTargets, mount.Destination)
|
||||
if mountSpec != nil {
|
||||
finalMounts = append(finalMounts, *mountSpec)
|
||||
if envFile != "" {
|
||||
tmpFiles = append(tmpFiles, envFile)
|
||||
}
|
||||
}
|
||||
case "ssh":
|
||||
mount, agent, err := b.getSSHMount(tokens, sshCount, sources.SSHSources, idMaps)
|
||||
mountSpec, agent, err = b.getSSHMount(tokens, sshCount, sources.SSHSources, idMaps)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if mount != nil {
|
||||
finalMounts = append(finalMounts, *mount)
|
||||
mountTargets = append(mountTargets, mount.Destination)
|
||||
if mountSpec != nil {
|
||||
finalMounts = append(finalMounts, *mountSpec)
|
||||
agents = append(agents, agent)
|
||||
if sshCount == 0 {
|
||||
defaultSSHSock = mount.Destination
|
||||
defaultSSHSock = mountSpec.Destination
|
||||
}
|
||||
// Count is needed as the default destination of the ssh sock inside the container is /run/buildkit/ssh_agent.{i}
|
||||
sshCount++
|
||||
}
|
||||
case define.TypeBind:
|
||||
mount, image, err := b.getBindMount(tokens, sources.SystemContext, sources.ContextDir, sources.StageMountPoints, idMaps, sources.WorkDir)
|
||||
mountSpec, image, err = b.getBindMount(tokens, sources.SystemContext, sources.ContextDir, sources.StageMountPoints, idMaps, sources.WorkDir)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
finalMounts = append(finalMounts, *mount)
|
||||
mountTargets = append(mountTargets, mount.Destination)
|
||||
finalMounts = append(finalMounts, *mountSpec)
|
||||
// only perform cleanup if image was mounted ignore everything else
|
||||
if image != "" {
|
||||
mountImages = append(mountImages, image)
|
||||
}
|
||||
case "tmpfs":
|
||||
mount, err := b.getTmpfsMount(tokens, idMaps)
|
||||
mountSpec, err = b.getTmpfsMount(tokens, idMaps)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
finalMounts = append(finalMounts, *mount)
|
||||
mountTargets = append(mountTargets, mount.Destination)
|
||||
finalMounts = append(finalMounts, *mountSpec)
|
||||
case "cache":
|
||||
mount, tl, err := b.getCacheMount(tokens, sources.StageMountPoints, idMaps, sources.WorkDir)
|
||||
mountSpec, tl, err = b.getCacheMount(tokens, sources.StageMountPoints, idMaps, sources.WorkDir)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
finalMounts = append(finalMounts, *mount)
|
||||
mountTargets = append(mountTargets, mount.Destination)
|
||||
finalMounts = append(finalMounts, *mountSpec)
|
||||
if tl != nil {
|
||||
targetLocks = append(targetLocks, tl)
|
||||
}
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("invalid mount type %q", mountType)
|
||||
}
|
||||
|
||||
if mountSpec != nil {
|
||||
pathPreExists, err := checkIfMountDestinationPreExists(mountPoint, mountSpec.Destination)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !pathPreExists {
|
||||
// In such case it means that the path did not exists before
|
||||
// creating any new mounts therefore we must clean the newly
|
||||
// created directory after this step.
|
||||
mountTargets = append(mountTargets, mountSpec.Destination)
|
||||
}
|
||||
}
|
||||
}
|
||||
succeeded = true
|
||||
artifacts := &runMountArtifacts{
|
||||
@ -1622,9 +1677,12 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr
|
||||
target = filepath.Join(workdir, target)
|
||||
}
|
||||
case "required":
|
||||
required, err = strconv.ParseBool(kv[1])
|
||||
if err != nil {
|
||||
return nil, "", errInvalidSyntax
|
||||
required = true
|
||||
if len(kv) > 1 {
|
||||
required, err = strconv.ParseBool(kv[1])
|
||||
if err != nil {
|
||||
return nil, "", errInvalidSyntax
|
||||
}
|
||||
}
|
||||
case "mode":
|
||||
mode64, err := strconv.ParseUint(kv[1], 8, 32)
|
||||
@ -1876,7 +1934,6 @@ func (b *Builder) cleanupRunMounts(context *imageTypes.SystemContext, mountpoint
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
opts := copier.RemoveOptions{
|
||||
All: true,
|
||||
}
|
||||
@ -1900,3 +1957,13 @@ func (b *Builder) cleanupRunMounts(context *imageTypes.SystemContext, mountpoint
|
||||
internalParse.UnlockLockArray(artifacts.TargetLocks)
|
||||
return prevErr
|
||||
}
|
||||
|
||||
// setPdeathsig sets a parent-death signal for the process
|
||||
// the goroutine that starts the child process should lock itself to
|
||||
// a native thread using runtime.LockOSThread() until the child exits
|
||||
func setPdeathsig(cmd *exec.Cmd) {
|
||||
if cmd.SysProcAttr == nil {
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{}
|
||||
}
|
||||
cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
|
||||
}
|
||||
|
39
vendor/github.com/containers/buildah/run_freebsd.go
generated
vendored
39
vendor/github.com/containers/buildah/run_freebsd.go
generated
vendored
@ -7,10 +7,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/containers/buildah/bind"
|
||||
@ -147,7 +145,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
|
||||
setupTerminal(g, options.Terminal, options.TerminalSize)
|
||||
|
||||
configureNetwork, configureNetworks, err := b.configureNamespaces(g, &options)
|
||||
configureNetwork, networkString, err := b.configureNamespaces(g, &options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -198,7 +196,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
|
||||
hostFile := ""
|
||||
if !options.NoHosts && !contains(volumes, config.DefaultHostsFile) && options.ConfigureNetwork != define.NetworkDisabled {
|
||||
hostFile, err = b.generateHosts(path, rootIDPair, mountPoint)
|
||||
hostFile, err = b.generateHosts(path, rootIDPair, mountPoint, spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -282,7 +280,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
} else {
|
||||
moreCreateArgs = nil
|
||||
}
|
||||
err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, containerName, b.Container, hostFile)
|
||||
err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, networkString, moreCreateArgs, spec, mountPoint, path, containerName, b.Container, hostFile)
|
||||
case IsolationChroot:
|
||||
err = chroot.RunUsingChroot(spec, path, homeDir, options.Stdin, options.Stdout, options.Stderr)
|
||||
default:
|
||||
@ -376,11 +374,16 @@ func setupCapabilities(g *generate.Generator, defaultCapabilities, adds, drops [
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, configureNetworks []string, containerName string) (teardown func(), netStatus map[string]nettypes.StatusBlock, err error) {
|
||||
func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, networkString string, containerName string) (teardown func(), netStatus map[string]nettypes.StatusBlock, err error) {
|
||||
//if isolation == IsolationOCIRootless {
|
||||
//return setupRootlessNetwork(pid)
|
||||
//}
|
||||
|
||||
var configureNetworks []string
|
||||
if len(networkString) > 0 {
|
||||
configureNetworks = strings.Split(networkString, ",")
|
||||
}
|
||||
|
||||
if len(configureNetworks) == 0 {
|
||||
configureNetworks = []string{b.NetworkInterface.DefaultNetworkName()}
|
||||
}
|
||||
@ -415,7 +418,7 @@ func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, optio
|
||||
return teardown, nil, nil
|
||||
}
|
||||
|
||||
func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, configureNetworks []string, configureUTS bool, err error) {
|
||||
func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, networkString string, configureUTS bool, err error) {
|
||||
// Set namespace options in the container configuration.
|
||||
for _, namespaceOption := range namespaceOptions {
|
||||
switch namespaceOption.Name {
|
||||
@ -423,7 +426,7 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti
|
||||
configureNetwork = false
|
||||
if !namespaceOption.Host && (namespaceOption.Path == "" || !filepath.IsAbs(namespaceOption.Path)) {
|
||||
if namespaceOption.Path != "" && !filepath.IsAbs(namespaceOption.Path) {
|
||||
configureNetworks = strings.Split(namespaceOption.Path, ",")
|
||||
networkString = namespaceOption.Path
|
||||
namespaceOption.Path = ""
|
||||
}
|
||||
configureNetwork = (policy != define.NetworkDisabled)
|
||||
@ -439,13 +442,13 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti
|
||||
// equivalents for UTS and and network namespaces.
|
||||
}
|
||||
|
||||
return configureNetwork, configureNetworks, configureUTS, nil
|
||||
return configureNetwork, networkString, configureUTS, nil
|
||||
}
|
||||
|
||||
func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, []string, error) {
|
||||
func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, string, error) {
|
||||
defaultNamespaceOptions, err := DefaultNamespaceOptions()
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
return false, "", err
|
||||
}
|
||||
|
||||
namespaceOptions := defaultNamespaceOptions
|
||||
@ -466,9 +469,9 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions
|
||||
}
|
||||
}
|
||||
|
||||
configureNetwork, configureNetworks, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy)
|
||||
configureNetwork, networkString, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
return false, "", err
|
||||
}
|
||||
|
||||
if configureUTS {
|
||||
@ -495,7 +498,7 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions
|
||||
spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname))
|
||||
}
|
||||
|
||||
return configureNetwork, configureNetworks, nil
|
||||
return configureNetwork, networkString, nil
|
||||
}
|
||||
|
||||
func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts []specs.Mount) {
|
||||
@ -531,14 +534,6 @@ func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string)
|
||||
return nil
|
||||
}
|
||||
|
||||
// setPdeathsig sets a parent-death signal for the process
|
||||
func setPdeathsig(cmd *exec.Cmd) {
|
||||
if cmd.SysProcAttr == nil {
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{}
|
||||
}
|
||||
cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
|
||||
}
|
||||
|
||||
// Create pipes to use for relaying stdio.
|
||||
func runMakeStdioPipe(uid, gid int) ([][]int, error) {
|
||||
stdioPipe := make([][]int, 3)
|
||||
|
236
vendor/github.com/containers/buildah/run_linux.go
generated
vendored
236
vendor/github.com/containers/buildah/run_linux.go
generated
vendored
@ -7,14 +7,13 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
"github.com/containers/buildah/bind"
|
||||
"github.com/containers/buildah/chroot"
|
||||
"github.com/containers/buildah/copier"
|
||||
@ -24,8 +23,11 @@ import (
|
||||
"github.com/containers/buildah/pkg/overlay"
|
||||
"github.com/containers/buildah/pkg/parse"
|
||||
"github.com/containers/buildah/util"
|
||||
"github.com/containers/common/libnetwork/pasta"
|
||||
"github.com/containers/common/libnetwork/resolvconf"
|
||||
"github.com/containers/common/libnetwork/slirp4netns"
|
||||
nettypes "github.com/containers/common/libnetwork/types"
|
||||
netUtil "github.com/containers/common/libnetwork/util"
|
||||
"github.com/containers/common/pkg/capabilities"
|
||||
"github.com/containers/common/pkg/chown"
|
||||
"github.com/containers/common/pkg/config"
|
||||
@ -202,16 +204,11 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
|
||||
setupTerminal(g, options.Terminal, options.TerminalSize)
|
||||
|
||||
configureNetwork, configureNetworks, err := b.configureNamespaces(g, &options)
|
||||
configureNetwork, networkString, err := b.configureNamespaces(g, &options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// rootless and networks are not supported
|
||||
if len(configureNetworks) > 0 && isolation == IsolationOCIRootless {
|
||||
return errors.New("cannot use networks as rootless")
|
||||
}
|
||||
|
||||
homeDir, err := b.configureUIDGID(g, mountPoint, options)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -264,7 +261,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
|
||||
hostFile := ""
|
||||
if !options.NoHosts && !contains(volumes, config.DefaultHostsFile) && options.ConfigureNetwork != define.NetworkDisabled {
|
||||
hostFile, err = b.generateHosts(path, rootIDPair, mountPoint)
|
||||
hostFile, err = b.generateHosts(path, rootIDPair, mountPoint, spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -366,7 +363,7 @@ rootless=%d
|
||||
if options.NoPivot {
|
||||
moreCreateArgs = append(moreCreateArgs, "--no-pivot")
|
||||
}
|
||||
err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec,
|
||||
err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, networkString, moreCreateArgs, spec,
|
||||
mountPoint, path, define.Package+"-"+filepath.Base(path), b.Container, hostFile)
|
||||
case IsolationChroot:
|
||||
err = chroot.RunUsingChroot(spec, path, homeDir, options.Stdin, options.Stdout, options.Stderr)
|
||||
@ -375,7 +372,7 @@ rootless=%d
|
||||
if options.NoPivot {
|
||||
moreCreateArgs = append(moreCreateArgs, "--no-pivot")
|
||||
}
|
||||
err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec,
|
||||
err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, networkString, moreCreateArgs, spec,
|
||||
mountPoint, path, define.Package+"-"+filepath.Base(path), b.Container, hostFile)
|
||||
default:
|
||||
err = errors.New("don't know how to run this command")
|
||||
@ -420,7 +417,7 @@ func (b *Builder) setupOCIHooks(config *spec.Spec, hasVolumes bool) (map[string]
|
||||
}
|
||||
}
|
||||
|
||||
hookErr, err := hooksExec.RuntimeConfigFilter(context.Background(), allHooks["precreate"], config, hooksExec.DefaultPostKillTimeout)
|
||||
hookErr, err := hooksExec.RuntimeConfigFilter(context.Background(), allHooks["precreate"], config, hooksExec.DefaultPostKillTimeout) //nolint:staticcheck
|
||||
if err != nil {
|
||||
logrus.Warnf("Container: precreate hook: %v", err)
|
||||
if hookErr != nil && hookErr != err {
|
||||
@ -475,80 +472,122 @@ func addCommonOptsToSpec(commonOpts *define.CommonBuildOptions, g *generate.Gene
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupRootlessNetwork(pid int) (teardown func(), err error) {
|
||||
slirp4netns, err := exec.LookPath("slirp4netns")
|
||||
func setupSlirp4netnsNetwork(netns, cid string, options []string) (func(), map[string]nettypes.StatusBlock, error) {
|
||||
defConfig, err := config.Default()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, fmt.Errorf("failed to get container config: %w", err)
|
||||
}
|
||||
|
||||
rootlessSlirpSyncR, rootlessSlirpSyncW, err := os.Pipe()
|
||||
// we need the TmpDir for the slirp4netns code
|
||||
if err := os.MkdirAll(defConfig.Engine.TmpDir, 0o751); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create tempdir: %w", err)
|
||||
}
|
||||
res, err := slirp4netns.Setup(&slirp4netns.SetupOptions{
|
||||
Config: defConfig,
|
||||
ContainerID: cid,
|
||||
Netns: netns,
|
||||
ExtraOptions: options,
|
||||
Pdeathsig: syscall.SIGKILL,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create slirp4netns sync pipe: %w", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
defer rootlessSlirpSyncR.Close()
|
||||
|
||||
// Be sure there are no fds inherited to slirp4netns except the sync pipe
|
||||
files, err := os.ReadDir("/proc/self/fd")
|
||||
ip, err := slirp4netns.GetIP(res.Subnet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot list open fds: %w", err)
|
||||
}
|
||||
for _, f := range files {
|
||||
fd, err := strconv.Atoi(f.Name())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse fd: %w", err)
|
||||
}
|
||||
if fd == int(rootlessSlirpSyncW.Fd()) {
|
||||
continue
|
||||
}
|
||||
unix.CloseOnExec(fd)
|
||||
return nil, nil, fmt.Errorf("get slirp4netns ip: %w", err)
|
||||
}
|
||||
|
||||
cmd := exec.Command(slirp4netns, "--mtu", "65520", "-r", "3", "-c", strconv.Itoa(pid), "tap0")
|
||||
setPdeathsig(cmd)
|
||||
cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil
|
||||
cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW}
|
||||
|
||||
err = cmd.Start()
|
||||
rootlessSlirpSyncW.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot start slirp4netns: %w", err)
|
||||
}
|
||||
|
||||
b := make([]byte, 1)
|
||||
for {
|
||||
if err := rootlessSlirpSyncR.SetDeadline(time.Now().Add(1 * time.Second)); err != nil {
|
||||
return nil, fmt.Errorf("setting slirp4netns pipe timeout: %w", err)
|
||||
}
|
||||
if _, err := rootlessSlirpSyncR.Read(b); err == nil {
|
||||
break
|
||||
} else {
|
||||
if os.IsTimeout(err) {
|
||||
// Check if the process is still running.
|
||||
var status syscall.WaitStatus
|
||||
_, err := syscall.Wait4(cmd.Process.Pid, &status, syscall.WNOHANG, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read slirp4netns process status: %w", err)
|
||||
}
|
||||
if status.Exited() || status.Signaled() {
|
||||
return nil, errors.New("slirp4netns failed")
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read from slirp4netns sync pipe: %w", err)
|
||||
}
|
||||
// create fake status to make sure we get the correct ip in hosts
|
||||
subnet := nettypes.IPNet{IPNet: net.IPNet{
|
||||
IP: *ip,
|
||||
Mask: res.Subnet.Mask,
|
||||
}}
|
||||
netStatus := map[string]nettypes.StatusBlock{
|
||||
slirp4netns.BinaryName: nettypes.StatusBlock{
|
||||
Interfaces: map[string]nettypes.NetInterface{
|
||||
"tap0": {
|
||||
Subnets: []nettypes.NetAddress{{IPNet: subnet}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return func() {
|
||||
cmd.Process.Kill() // nolint:errcheck
|
||||
cmd.Wait() // nolint:errcheck
|
||||
}, nil
|
||||
syscall.Kill(res.Pid, syscall.SIGKILL) // nolint:errcheck
|
||||
var status syscall.WaitStatus
|
||||
syscall.Wait4(res.Pid, &status, 0, nil) // nolint:errcheck
|
||||
}, netStatus, nil
|
||||
}
|
||||
|
||||
func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, configureNetworks []string, containerName string) (teardown func(), netStatus map[string]nettypes.StatusBlock, err error) {
|
||||
func setupPasta(netns string, options []string) (func(), map[string]nettypes.StatusBlock, error) {
|
||||
defConfig, err := config.Default()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get container config: %w", err)
|
||||
}
|
||||
|
||||
err = pasta.Setup(&pasta.SetupOptions{
|
||||
Config: defConfig,
|
||||
Netns: netns,
|
||||
ExtraOptions: options,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var ip string
|
||||
err = ns.WithNetNSPath(netns, func(_ ns.NetNS) error {
|
||||
// get the first ip in the netns and use this as our ip for /etc/hosts
|
||||
ip = netUtil.GetLocalIP()
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// create fake status to make sure we get the correct ip in hosts
|
||||
subnet := nettypes.IPNet{IPNet: net.IPNet{
|
||||
IP: net.ParseIP(ip),
|
||||
Mask: net.IPv4Mask(255, 255, 255, 0),
|
||||
}}
|
||||
netStatus := map[string]nettypes.StatusBlock{
|
||||
slirp4netns.BinaryName: nettypes.StatusBlock{
|
||||
Interfaces: map[string]nettypes.NetInterface{
|
||||
"tap0": {
|
||||
Subnets: []nettypes.NetAddress{{IPNet: subnet}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return nil, netStatus, nil
|
||||
}
|
||||
|
||||
func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, network, containerName string) (teardown func(), netStatus map[string]nettypes.StatusBlock, err error) {
|
||||
netns := fmt.Sprintf("/proc/%d/ns/net", pid)
|
||||
var configureNetworks []string
|
||||
|
||||
name, networkOpts, hasOpts := strings.Cut(network, ":")
|
||||
var netOpts []string
|
||||
if hasOpts {
|
||||
netOpts = strings.Split(networkOpts, ",")
|
||||
}
|
||||
switch {
|
||||
case name == slirp4netns.BinaryName,
|
||||
isolation == IsolationOCIRootless && name == "":
|
||||
return setupSlirp4netnsNetwork(netns, containerName, netOpts)
|
||||
case name == pasta.BinaryName:
|
||||
return setupPasta(netns, netOpts)
|
||||
|
||||
// Basically default case except we make sure to not split an empty
|
||||
// name as this would return a slice with one empty string which is
|
||||
// not a valid network name.
|
||||
case len(network) > 0:
|
||||
// old syntax allow comma separated network names
|
||||
configureNetworks = strings.Split(network, ",")
|
||||
}
|
||||
|
||||
if isolation == IsolationOCIRootless {
|
||||
teardown, err = setupRootlessNetwork(pid)
|
||||
return teardown, nil, err
|
||||
return nil, nil, errors.New("cannot use networks as rootless")
|
||||
}
|
||||
|
||||
if len(configureNetworks) == 0 {
|
||||
@ -560,7 +599,6 @@ func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, optio
|
||||
// interfaces. Ensure this by opening a handle to the network
|
||||
// namespace, and using our copy to both configure and
|
||||
// deconfigure it.
|
||||
netns := fmt.Sprintf("/proc/%d/ns/net", pid)
|
||||
netFD, err := unix.Open(netns, unix.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("opening network namespace: %w", err)
|
||||
@ -615,10 +653,10 @@ func runMakeStdioPipe(uid, gid int) ([][]int, error) {
|
||||
return stdioPipe, nil
|
||||
}
|
||||
|
||||
func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, configureNetworks []string, configureUTS bool, err error) {
|
||||
func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, networkString string, configureUTS bool, err error) {
|
||||
defaultContainerConfig, err := config.Default()
|
||||
if err != nil {
|
||||
return false, nil, false, fmt.Errorf("failed to get container config: %w", err)
|
||||
return false, "", false, fmt.Errorf("failed to get container config: %w", err)
|
||||
}
|
||||
|
||||
addSysctl := func(prefixes []string) error {
|
||||
@ -644,7 +682,7 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti
|
||||
case string(specs.IPCNamespace):
|
||||
if !namespaceOption.Host {
|
||||
if err := addSysctl([]string{"fs.mqueue"}); err != nil {
|
||||
return false, nil, false, err
|
||||
return false, "", false, err
|
||||
}
|
||||
}
|
||||
case string(specs.UserNamespace):
|
||||
@ -657,7 +695,7 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti
|
||||
configureNetwork = false
|
||||
if !namespaceOption.Host && (namespaceOption.Path == "" || !filepath.IsAbs(namespaceOption.Path)) {
|
||||
if namespaceOption.Path != "" && !filepath.IsAbs(namespaceOption.Path) {
|
||||
configureNetworks = strings.Split(namespaceOption.Path, ",")
|
||||
networkString = namespaceOption.Path
|
||||
namespaceOption.Path = ""
|
||||
}
|
||||
configureNetwork = (policy != define.NetworkDisabled)
|
||||
@ -669,30 +707,30 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti
|
||||
configureUTS = true
|
||||
}
|
||||
if err := addSysctl([]string{"kernel.hostname", "kernel.domainame"}); err != nil {
|
||||
return false, nil, false, err
|
||||
return false, "", false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if namespaceOption.Host {
|
||||
if err := g.RemoveLinuxNamespace(namespaceOption.Name); err != nil {
|
||||
return false, nil, false, fmt.Errorf("removing %q namespace for run: %w", namespaceOption.Name, err)
|
||||
return false, "", false, fmt.Errorf("removing %q namespace for run: %w", namespaceOption.Name, err)
|
||||
}
|
||||
} else if err := g.AddOrReplaceLinuxNamespace(namespaceOption.Name, namespaceOption.Path); err != nil {
|
||||
if namespaceOption.Path == "" {
|
||||
return false, nil, false, fmt.Errorf("adding new %q namespace for run: %w", namespaceOption.Name, err)
|
||||
return false, "", false, fmt.Errorf("adding new %q namespace for run: %w", namespaceOption.Name, err)
|
||||
}
|
||||
return false, nil, false, fmt.Errorf("adding %q namespace %q for run: %w", namespaceOption.Name, namespaceOption.Path, err)
|
||||
return false, "", false, fmt.Errorf("adding %q namespace %q for run: %w", namespaceOption.Name, namespaceOption.Path, err)
|
||||
}
|
||||
}
|
||||
|
||||
// If we've got mappings, we're going to have to create a user namespace.
|
||||
if len(idmapOptions.UIDMap) > 0 || len(idmapOptions.GIDMap) > 0 || configureUserns {
|
||||
if err := g.AddOrReplaceLinuxNamespace(string(specs.UserNamespace), ""); err != nil {
|
||||
return false, nil, false, fmt.Errorf("adding new %q namespace for run: %w", string(specs.UserNamespace), err)
|
||||
return false, "", false, fmt.Errorf("adding new %q namespace for run: %w", string(specs.UserNamespace), err)
|
||||
}
|
||||
hostUidmap, hostGidmap, err := unshare.GetHostIDMappings("")
|
||||
if err != nil {
|
||||
return false, nil, false, err
|
||||
return false, "", false, err
|
||||
}
|
||||
for _, m := range idmapOptions.UIDMap {
|
||||
g.AddLinuxUIDMapping(m.HostID, m.ContainerID, m.Size)
|
||||
@ -712,23 +750,23 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti
|
||||
}
|
||||
if !specifiedNetwork {
|
||||
if err := g.AddOrReplaceLinuxNamespace(string(specs.NetworkNamespace), ""); err != nil {
|
||||
return false, nil, false, fmt.Errorf("adding new %q namespace for run: %w", string(specs.NetworkNamespace), err)
|
||||
return false, "", false, fmt.Errorf("adding new %q namespace for run: %w", string(specs.NetworkNamespace), err)
|
||||
}
|
||||
configureNetwork = (policy != define.NetworkDisabled)
|
||||
}
|
||||
} else {
|
||||
if err := g.RemoveLinuxNamespace(string(specs.UserNamespace)); err != nil {
|
||||
return false, nil, false, fmt.Errorf("removing %q namespace for run: %w", string(specs.UserNamespace), err)
|
||||
return false, "", false, fmt.Errorf("removing %q namespace for run: %w", string(specs.UserNamespace), err)
|
||||
}
|
||||
if !specifiedNetwork {
|
||||
if err := g.RemoveLinuxNamespace(string(specs.NetworkNamespace)); err != nil {
|
||||
return false, nil, false, fmt.Errorf("removing %q namespace for run: %w", string(specs.NetworkNamespace), err)
|
||||
return false, "", false, fmt.Errorf("removing %q namespace for run: %w", string(specs.NetworkNamespace), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if configureNetwork {
|
||||
if err := addSysctl([]string{"net"}); err != nil {
|
||||
return false, nil, false, err
|
||||
return false, "", false, err
|
||||
}
|
||||
for name, val := range define.DefaultNetworkSysctl {
|
||||
// Check that the sysctl we are adding is actually supported
|
||||
@ -736,7 +774,7 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti
|
||||
p := filepath.Join("/proc/sys", strings.Replace(name, ".", "/", -1))
|
||||
_, err := os.Stat(p)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return false, nil, false, err
|
||||
return false, "", false, err
|
||||
}
|
||||
if err == nil {
|
||||
g.AddLinuxSysctl(name, val)
|
||||
@ -745,13 +783,13 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti
|
||||
}
|
||||
}
|
||||
}
|
||||
return configureNetwork, configureNetworks, configureUTS, nil
|
||||
return configureNetwork, networkString, configureUTS, nil
|
||||
}
|
||||
|
||||
func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, []string, error) {
|
||||
func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, string, error) {
|
||||
defaultNamespaceOptions, err := DefaultNamespaceOptions()
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
return false, "", err
|
||||
}
|
||||
|
||||
namespaceOptions := defaultNamespaceOptions
|
||||
@ -774,9 +812,9 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions
|
||||
if networkPolicy == NetworkDisabled {
|
||||
namespaceOptions.AddOrReplace(define.NamespaceOptions{{Name: string(specs.NetworkNamespace), Host: false}}...)
|
||||
}
|
||||
configureNetwork, configureNetworks, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy)
|
||||
configureNetwork, networkString, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
return false, "", err
|
||||
}
|
||||
|
||||
if configureUTS {
|
||||
@ -803,7 +841,7 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions
|
||||
spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname))
|
||||
}
|
||||
|
||||
return configureNetwork, configureNetworks, nil
|
||||
return configureNetwork, networkString, nil
|
||||
}
|
||||
|
||||
func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts []specs.Mount) {
|
||||
@ -1228,11 +1266,3 @@ func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]int
|
||||
succeeded = true
|
||||
return &volumes[0], targetLock, nil
|
||||
}
|
||||
|
||||
// setPdeathsig sets a parent-death signal for the process
|
||||
func setPdeathsig(cmd *exec.Cmd) {
|
||||
if cmd.SysProcAttr == nil {
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{}
|
||||
}
|
||||
cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
|
||||
}
|
||||
|
65
vendor/github.com/containers/common/pkg/secrets/secrets.go
generated
vendored
65
vendor/github.com/containers/common/pkg/secrets/secrets.go
generated
vendored
@ -79,6 +79,8 @@ type Secret struct {
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
// CreatedAt is when the secret was created
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
// UpdatedAt is when the secret was updated
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
// Driver is the driver used to store secret data
|
||||
Driver string `json:"driver"`
|
||||
// DriverOptions are extra options used to run this driver
|
||||
@ -112,6 +114,8 @@ type StoreOptions struct {
|
||||
Metadata map[string]string
|
||||
// Labels are labels on the secret
|
||||
Labels map[string]string
|
||||
// Replace existing secret
|
||||
Replace bool
|
||||
}
|
||||
|
||||
// NewManager creates a new secrets manager
|
||||
@ -140,6 +144,28 @@ func NewManager(rootPath string) (*SecretsManager, error) {
|
||||
return manager, nil
|
||||
}
|
||||
|
||||
func (s *SecretsManager) newSecret(name string) (*Secret, error) {
|
||||
secr := new(Secret)
|
||||
secr.Name = name
|
||||
secr.CreatedAt = time.Now()
|
||||
secr.UpdatedAt = secr.CreatedAt
|
||||
|
||||
for {
|
||||
newID := stringid.GenerateNonCryptoID()
|
||||
// GenerateNonCryptoID() gives 64 characters, so we truncate to correct length
|
||||
newID = newID[0:secretIDLength]
|
||||
_, err := s.lookupSecret(newID)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNoSuchSecret) {
|
||||
secr.ID = newID
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return secr, nil
|
||||
}
|
||||
|
||||
// Store takes a name, creates a secret and stores the secret metadata and the secret payload.
|
||||
// It returns a generated ID that is associated with the secret.
|
||||
// The max size for secret data is 512kB.
|
||||
@ -152,7 +178,7 @@ func (s *SecretsManager) Store(name string, data []byte, driverType string, opti
|
||||
if !(len(data) > 0 && len(data) < maxSecretSize) {
|
||||
return "", errDataSize
|
||||
}
|
||||
|
||||
var secr *Secret
|
||||
s.lockfile.Lock()
|
||||
defer s.lockfile.Unlock()
|
||||
|
||||
@ -160,23 +186,22 @@ func (s *SecretsManager) Store(name string, data []byte, driverType string, opti
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if exist {
|
||||
return "", fmt.Errorf("%s: %w", name, errSecretNameInUse)
|
||||
}
|
||||
|
||||
secr := new(Secret)
|
||||
secr.Name = name
|
||||
|
||||
for {
|
||||
newID := stringid.GenerateNonCryptoID()
|
||||
// GenerateNonCryptoID() gives 64 characters, so we truncate to correct length
|
||||
newID = newID[0:secretIDLength]
|
||||
_, err := s.lookupSecret(newID)
|
||||
if !options.Replace {
|
||||
return "", fmt.Errorf("%s: %w", name, errSecretNameInUse)
|
||||
}
|
||||
secr, err = s.lookupSecret(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
secr.UpdatedAt = time.Now()
|
||||
} else {
|
||||
if options.Replace {
|
||||
return "", fmt.Errorf("%s: %w", name, ErrNoSuchSecret)
|
||||
}
|
||||
secr, err = s.newSecret(name)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNoSuchSecret) {
|
||||
secr.ID = newID
|
||||
break
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
@ -193,7 +218,6 @@ func (s *SecretsManager) Store(name string, data []byte, driverType string, opti
|
||||
|
||||
secr.Driver = driverType
|
||||
secr.Metadata = options.Metadata
|
||||
secr.CreatedAt = time.Now()
|
||||
secr.DriverOptions = options.DriverOpts
|
||||
secr.Labels = options.Labels
|
||||
|
||||
@ -201,6 +225,13 @@ func (s *SecretsManager) Store(name string, data []byte, driverType string, opti
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if options.Replace {
|
||||
err = driver.Delete(secr.ID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("replacing secret %s: %w", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
err = driver.Store(secr.ID, data)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("creating secret %s: %w", name, err)
|
||||
|
8
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
8
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
@ -256,9 +256,11 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
|
||||
}
|
||||
sigs = append(sigs, newSigs...)
|
||||
|
||||
c.Printf("Storing signatures\n")
|
||||
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil {
|
||||
return nil, "", "", fmt.Errorf("writing signatures: %w", err)
|
||||
if len(sigs) > 0 {
|
||||
c.Printf("Storing signatures\n")
|
||||
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil {
|
||||
return nil, "", "", fmt.Errorf("writing signatures: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return manifestBytes, retManifestType, retManifestDigest, nil
|
||||
|
2
vendor/github.com/containers/image/v5/openshift/openshift-copies.go
generated
vendored
2
vendor/github.com/containers/image/v5/openshift/openshift-copies.go
generated
vendored
@ -17,8 +17,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"dario.cat/mergo"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@ -1 +1 @@
|
||||
1.47.0-dev
|
||||
1.47.0
|
||||
|
11
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
11
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
@ -628,18 +628,13 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
switch len(options.Options) {
|
||||
case 0:
|
||||
case 1:
|
||||
if options.Options[0] == "ro" {
|
||||
for _, opt := range options.Options {
|
||||
if opt == "ro" {
|
||||
// ignore "ro" option
|
||||
break
|
||||
continue
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
return "", fmt.Errorf("btrfs driver does not support mount options")
|
||||
}
|
||||
|
||||
if !st.IsDir() {
|
||||
return "", fmt.Errorf("%s: not a directory", dir)
|
||||
}
|
||||
|
2
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
@ -187,6 +187,8 @@ type DriverWithDifferOutput struct {
|
||||
UncompressedDigest digest.Digest
|
||||
Metadata string
|
||||
BigData map[string][]byte
|
||||
TarSplit []byte
|
||||
TOCDigest digest.Digest
|
||||
}
|
||||
|
||||
// Differ defines the interface for using a custom differ.
|
||||
|
3
vendor/github.com/containers/storage/drivers/driver_linux.go
generated
vendored
3
vendor/github.com/containers/storage/drivers/driver_linux.go
generated
vendored
@ -60,6 +60,8 @@ const (
|
||||
FsMagicCephFs = FsMagic(0x00C36400)
|
||||
// FsMagicCIFS filesystem id for CIFS
|
||||
FsMagicCIFS = FsMagic(0xFF534D42)
|
||||
// FsMagicEROFS filesystem id for EROFS
|
||||
FsMagicEROFS = FsMagic(0xE0F5E1E2)
|
||||
// FsMagicFHGFS filesystem id for FHGFS
|
||||
FsMagicFHGFSFs = FsMagic(0x19830326)
|
||||
// FsMagicIBRIX filesystem id for IBRIX
|
||||
@ -106,6 +108,7 @@ var (
|
||||
FsMagicBtrfs: "btrfs",
|
||||
FsMagicCramfs: "cramfs",
|
||||
FsMagicEcryptfs: "ecryptfs",
|
||||
FsMagicEROFS: "erofs",
|
||||
FsMagicExtfs: "extfs",
|
||||
FsMagicF2fs: "f2fs",
|
||||
FsMagicGPFS: "gpfs",
|
||||
|
1
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
@ -55,6 +55,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
|
||||
|
||||
options := MountOpts{
|
||||
MountLabel: mountLabel,
|
||||
Options: []string{"ro"},
|
||||
}
|
||||
layerFs, err := driver.Get(id, options)
|
||||
if err != nil {
|
||||
|
3
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
3
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@ -1952,6 +1952,9 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory stri
|
||||
if err := os.RemoveAll(diff); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
diffOutput.UncompressedDigest = diffOutput.TOCDigest
|
||||
|
||||
return os.Rename(stagingDirectory, diff)
|
||||
}
|
||||
|
||||
|
11
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
11
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
@ -226,15 +226,12 @@ func (d *Driver) Remove(id string) error {
|
||||
// Get returns the directory for the given id.
|
||||
func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
|
||||
dir := d.dir(id)
|
||||
switch len(options.Options) {
|
||||
case 0:
|
||||
case 1:
|
||||
if options.Options[0] == "ro" {
|
||||
|
||||
for _, opt := range options.Options {
|
||||
if opt == "ro" {
|
||||
// ignore "ro" option
|
||||
break
|
||||
continue
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
return "", fmt.Errorf("vfs driver does not support mount options")
|
||||
}
|
||||
if st, err := os.Stat(dir); err != nil {
|
||||
|
25
vendor/github.com/containers/storage/layers.go
generated
vendored
25
vendor/github.com/containers/storage/layers.go
generated
vendored
@ -2392,8 +2392,26 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
|
||||
layer.UncompressedDigest = diffOutput.UncompressedDigest
|
||||
layer.UncompressedSize = diffOutput.Size
|
||||
layer.Metadata = diffOutput.Metadata
|
||||
if err = r.saveFor(layer); err != nil {
|
||||
return err
|
||||
if len(diffOutput.TarSplit) != 0 {
|
||||
tsdata := bytes.Buffer{}
|
||||
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
|
||||
if err != nil {
|
||||
compressor = pgzip.NewWriter(&tsdata)
|
||||
}
|
||||
if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that
|
||||
logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err)
|
||||
}
|
||||
if _, err := compressor.Write(diffOutput.TarSplit); err != nil {
|
||||
compressor.Close()
|
||||
return err
|
||||
}
|
||||
compressor.Close()
|
||||
if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0o600); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for k, v := range diffOutput.BigData {
|
||||
if err := r.SetBigData(id, k, bytes.NewReader(v)); err != nil {
|
||||
@ -2403,6 +2421,9 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = r.saveFor(layer); err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
34
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
34
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
@ -516,14 +516,14 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
|
||||
|
||||
iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
if field != "entries" {
|
||||
if strings.ToLower(field) != "entries" {
|
||||
iter.Skip()
|
||||
continue
|
||||
}
|
||||
for iter.ReadArray() {
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
switch field {
|
||||
case "type", "name", "linkName", "digest", "chunkDigest", "chunkType":
|
||||
switch strings.ToLower(field) {
|
||||
case "type", "name", "linkname", "digest", "chunkdigest", "chunktype", "modtime", "accesstime", "changetime":
|
||||
count += len(iter.ReadStringAsSlice())
|
||||
case "xattrs":
|
||||
for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
|
||||
@ -548,33 +548,33 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
|
||||
|
||||
iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
if field == "version" {
|
||||
if strings.ToLower(field) == "version" {
|
||||
toc.Version = iter.ReadInt()
|
||||
continue
|
||||
}
|
||||
if field != "entries" {
|
||||
if strings.ToLower(field) != "entries" {
|
||||
iter.Skip()
|
||||
continue
|
||||
}
|
||||
for iter.ReadArray() {
|
||||
var m internal.FileMetadata
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
switch field {
|
||||
switch strings.ToLower(field) {
|
||||
case "type":
|
||||
m.Type = getString(iter.ReadStringAsSlice())
|
||||
case "name":
|
||||
m.Name = getString(iter.ReadStringAsSlice())
|
||||
case "linkName":
|
||||
case "linkname":
|
||||
m.Linkname = getString(iter.ReadStringAsSlice())
|
||||
case "mode":
|
||||
m.Mode = iter.ReadInt64()
|
||||
case "size":
|
||||
m.Size = iter.ReadInt64()
|
||||
case "UID":
|
||||
case "uid":
|
||||
m.UID = iter.ReadInt()
|
||||
case "GID":
|
||||
case "gid":
|
||||
m.GID = iter.ReadInt()
|
||||
case "ModTime":
|
||||
case "modtime":
|
||||
time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -592,23 +592,23 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
|
||||
return nil, err
|
||||
}
|
||||
m.ChangeTime = &time
|
||||
case "devMajor":
|
||||
case "devmajor":
|
||||
m.Devmajor = iter.ReadInt64()
|
||||
case "devMinor":
|
||||
case "devminor":
|
||||
m.Devminor = iter.ReadInt64()
|
||||
case "digest":
|
||||
m.Digest = getString(iter.ReadStringAsSlice())
|
||||
case "offset":
|
||||
m.Offset = iter.ReadInt64()
|
||||
case "endOffset":
|
||||
case "endoffset":
|
||||
m.EndOffset = iter.ReadInt64()
|
||||
case "chunkSize":
|
||||
case "chunksize":
|
||||
m.ChunkSize = iter.ReadInt64()
|
||||
case "chunkOffset":
|
||||
case "chunkoffset":
|
||||
m.ChunkOffset = iter.ReadInt64()
|
||||
case "chunkDigest":
|
||||
case "chunkdigest":
|
||||
m.ChunkDigest = getString(iter.ReadStringAsSlice())
|
||||
case "chunkType":
|
||||
case "chunktype":
|
||||
m.ChunkType = getString(iter.ReadStringAsSlice())
|
||||
case "xattrs":
|
||||
m.Xattrs = make(map[string]string)
|
||||
|
129
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
129
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
@ -150,22 +150,32 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
|
||||
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
|
||||
// be specified.
|
||||
// This function uses the io.github.containers.zstd-chunked. annotations when specified.
|
||||
func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) {
|
||||
func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, []byte, int64, error) {
|
||||
footerSize := int64(internal.FooterSizeSupported)
|
||||
if blobSize <= footerSize {
|
||||
return nil, 0, errors.New("blob too small")
|
||||
return nil, nil, 0, errors.New("blob too small")
|
||||
}
|
||||
|
||||
manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey]
|
||||
if manifestChecksumAnnotation == "" {
|
||||
return nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
|
||||
return nil, nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
|
||||
}
|
||||
|
||||
var offset, length, lengthUncompressed, manifestType uint64
|
||||
|
||||
var offsetTarSplit, lengthTarSplit, lengthUncompressedTarSplit uint64
|
||||
tarSplitChecksumAnnotation := ""
|
||||
|
||||
if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" {
|
||||
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil {
|
||||
return nil, 0, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found {
|
||||
if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &offsetTarSplit, &lengthTarSplit, &lengthUncompressedTarSplit); err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
tarSplitChecksumAnnotation = annotations[internal.TarSplitChecksumKey]
|
||||
}
|
||||
} else {
|
||||
chunk := ImageSourceChunk{
|
||||
@ -174,39 +184,39 @@ func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable
|
||||
}
|
||||
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
var reader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
reader = r
|
||||
case err := <-errs:
|
||||
return nil, 0, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
footer := make([]byte, footerSize)
|
||||
if _, err := io.ReadFull(reader, footer); err != nil {
|
||||
return nil, 0, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
offset = binary.LittleEndian.Uint64(footer[0:8])
|
||||
length = binary.LittleEndian.Uint64(footer[8:16])
|
||||
lengthUncompressed = binary.LittleEndian.Uint64(footer[16:24])
|
||||
manifestType = binary.LittleEndian.Uint64(footer[24:32])
|
||||
if !isZstdChunkedFrameMagic(footer[32:40]) {
|
||||
return nil, 0, errors.New("invalid magic number")
|
||||
if !isZstdChunkedFrameMagic(footer[48:56]) {
|
||||
return nil, nil, 0, errors.New("invalid magic number")
|
||||
}
|
||||
}
|
||||
|
||||
if manifestType != internal.ManifestTypeCRFS {
|
||||
return nil, 0, errors.New("invalid manifest type")
|
||||
return nil, nil, 0, errors.New("invalid manifest type")
|
||||
}
|
||||
|
||||
// set a reasonable limit
|
||||
if length > (1<<20)*50 {
|
||||
return nil, 0, errors.New("manifest too big")
|
||||
return nil, nil, 0, errors.New("manifest too big")
|
||||
}
|
||||
if lengthUncompressed > (1<<20)*50 {
|
||||
return nil, 0, errors.New("manifest too big")
|
||||
return nil, nil, 0, errors.New("manifest too big")
|
||||
}
|
||||
|
||||
chunk := ImageSourceChunk{
|
||||
@ -214,47 +224,86 @@ func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable
|
||||
Length: length,
|
||||
}
|
||||
|
||||
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||
chunks := []ImageSourceChunk{chunk}
|
||||
|
||||
if offsetTarSplit > 0 {
|
||||
chunkTarSplit := ImageSourceChunk{
|
||||
Offset: offsetTarSplit,
|
||||
Length: lengthTarSplit,
|
||||
}
|
||||
chunks = append(chunks, chunkTarSplit)
|
||||
}
|
||||
|
||||
parts, errs, err := blobStream.GetBlobAt(chunks)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
var reader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
reader = r
|
||||
case err := <-errs:
|
||||
return nil, 0, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
manifest := make([]byte, length)
|
||||
if _, err := io.ReadFull(reader, manifest); err != nil {
|
||||
return nil, 0, err
|
||||
readBlob := func(len uint64) ([]byte, error) {
|
||||
var reader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
reader = r
|
||||
case err := <-errs:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blob := make([]byte, len)
|
||||
if _, err := io.ReadFull(reader, blob); err != nil {
|
||||
reader.Close()
|
||||
return nil, err
|
||||
}
|
||||
if err := reader.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blob, nil
|
||||
}
|
||||
|
||||
manifestDigester := digest.Canonical.Digester()
|
||||
manifestChecksum := manifestDigester.Hash()
|
||||
if _, err := manifestChecksum.Write(manifest); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
d, err := digest.Parse(manifestChecksumAnnotation)
|
||||
manifest, err := readBlob(length)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
if manifestDigester.Digest() != d {
|
||||
return nil, 0, errors.New("invalid manifest checksum")
|
||||
|
||||
decodedBlob, err := decodeAndValidateBlob(manifest, lengthUncompressed, manifestChecksumAnnotation)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
decodedTarSplit := []byte{}
|
||||
if offsetTarSplit > 0 {
|
||||
tarSplit, err := readBlob(lengthTarSplit)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
decodedTarSplit, err = decodeAndValidateBlob(tarSplit, lengthUncompressedTarSplit, tarSplitChecksumAnnotation)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
}
|
||||
return decodedBlob, decodedTarSplit, int64(offset), err
|
||||
}
|
||||
|
||||
func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedUncompressedChecksum string) ([]byte, error) {
|
||||
d, err := digest.Parse(expectedUncompressedChecksum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blobDigester := d.Algorithm().Digester()
|
||||
blobChecksum := blobDigester.Hash()
|
||||
if _, err := blobChecksum.Write(blob); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if blobDigester.Digest() != d {
|
||||
return nil, fmt.Errorf("invalid blob checksum, expected checksum %s, got %s", d, blobDigester.Digest())
|
||||
}
|
||||
|
||||
decoder, err := zstd.NewReader(nil) //nolint:contextcheck
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, err
|
||||
}
|
||||
defer decoder.Close()
|
||||
|
||||
b := make([]byte, 0, lengthUncompressed)
|
||||
if decoded, err := decoder.DecodeAll(manifest, b); err == nil {
|
||||
return decoded, int64(offset), nil
|
||||
}
|
||||
|
||||
return manifest, int64(offset), nil
|
||||
return decoder.DecodeAll(blob, b)
|
||||
}
|
||||
|
72
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
72
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
@ -6,13 +6,17 @@ package compressor
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
"github.com/vbatts/tar-split/tar/asm"
|
||||
"github.com/vbatts/tar-split/tar/storage"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -198,11 +202,55 @@ type chunk struct {
|
||||
ChunkType string
|
||||
}
|
||||
|
||||
type tarSplitData struct {
|
||||
compressed *bytes.Buffer
|
||||
digester digest.Digester
|
||||
uncompressedCounter *ioutils.WriteCounter
|
||||
zstd *zstd.Encoder
|
||||
packer storage.Packer
|
||||
}
|
||||
|
||||
func newTarSplitData(level int) (*tarSplitData, error) {
|
||||
compressed := bytes.NewBuffer(nil)
|
||||
digester := digest.Canonical.Digester()
|
||||
|
||||
zstdWriter, err := internal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uncompressedCounter := ioutils.NewWriteCounter(zstdWriter)
|
||||
metaPacker := storage.NewJSONPacker(uncompressedCounter)
|
||||
|
||||
return &tarSplitData{
|
||||
compressed: compressed,
|
||||
digester: digester,
|
||||
uncompressedCounter: uncompressedCounter,
|
||||
zstd: zstdWriter,
|
||||
packer: metaPacker,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
|
||||
// total written so far. Used to retrieve partial offsets in the file
|
||||
dest := ioutils.NewWriteCounter(destFile)
|
||||
|
||||
tr := tar.NewReader(reader)
|
||||
tarSplitData, err := newTarSplitData(level)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if tarSplitData.zstd != nil {
|
||||
tarSplitData.zstd.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
its, err := asm.NewInputTarStream(reader, tarSplitData.packer, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tr := tar.NewReader(its)
|
||||
tr.RawAccounting = true
|
||||
|
||||
buf := make([]byte, 4096)
|
||||
@ -214,7 +262,6 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
||||
defer func() {
|
||||
if zstdWriter != nil {
|
||||
zstdWriter.Close()
|
||||
zstdWriter.Flush()
|
||||
}
|
||||
}()
|
||||
|
||||
@ -224,9 +271,6 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
||||
if err := zstdWriter.Close(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := zstdWriter.Flush(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
offset = dest.Count
|
||||
zstdWriter.Reset(dest)
|
||||
}
|
||||
@ -373,9 +417,11 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
||||
|
||||
rawBytes := tr.RawBytes()
|
||||
if _, err := zstdWriter.Write(rawBytes); err != nil {
|
||||
zstdWriter.Close()
|
||||
return err
|
||||
}
|
||||
if err := zstdWriter.Flush(); err != nil {
|
||||
zstdWriter.Close()
|
||||
return err
|
||||
}
|
||||
if err := zstdWriter.Close(); err != nil {
|
||||
@ -383,7 +429,21 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
||||
}
|
||||
zstdWriter = nil
|
||||
|
||||
return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), metadata, level)
|
||||
if err := tarSplitData.zstd.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tarSplitData.zstd.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
tarSplitData.zstd = nil
|
||||
|
||||
ts := internal.TarSplitData{
|
||||
Data: tarSplitData.compressed.Bytes(),
|
||||
Digest: tarSplitData.digester.Digest(),
|
||||
UncompressedSize: tarSplitData.uncompressedCounter.Count,
|
||||
}
|
||||
|
||||
return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level)
|
||||
}
|
||||
|
||||
type zstdChunkedWriter struct {
|
||||
|
30
vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
generated
vendored
30
vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
generated
vendored
@ -90,6 +90,8 @@ func GetType(t byte) (string, error) {
|
||||
const (
|
||||
ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum"
|
||||
ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position"
|
||||
TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum"
|
||||
TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position"
|
||||
|
||||
// ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
|
||||
ManifestTypeCRFS = 1
|
||||
@ -97,7 +99,7 @@ const (
|
||||
// FooterSizeSupported is the footer size supported by this implementation.
|
||||
// Newer versions of the image format might increase this value, so reject
|
||||
// any version that is not supported.
|
||||
FooterSizeSupported = 40
|
||||
FooterSizeSupported = 56
|
||||
)
|
||||
|
||||
var (
|
||||
@ -125,9 +127,16 @@ func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []FileMetadata, level int) error {
|
||||
type TarSplitData struct {
|
||||
Data []byte
|
||||
Digest digest.Digest
|
||||
UncompressedSize int64
|
||||
}
|
||||
|
||||
func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, tarSplitData *TarSplitData, metadata []FileMetadata, level int) error {
|
||||
// 8 is the size of the zstd skippable frame header + the frame size
|
||||
manifestOffset := offset + 8
|
||||
const zstdSkippableFrameHeader = 8
|
||||
manifestOffset := offset + zstdSkippableFrameHeader
|
||||
|
||||
toc := TOC{
|
||||
Version: 1,
|
||||
@ -167,13 +176,20 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
|
||||
return err
|
||||
}
|
||||
|
||||
outMetadata[TarSplitChecksumKey] = tarSplitData.Digest.String()
|
||||
tarSplitOffset := manifestOffset + uint64(len(compressedManifest)) + zstdSkippableFrameHeader
|
||||
outMetadata[TarSplitInfoKey] = fmt.Sprintf("%d:%d:%d", tarSplitOffset, len(tarSplitData.Data), tarSplitData.UncompressedSize)
|
||||
if err := appendZstdSkippableFrame(dest, tarSplitData.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Store the offset to the manifest and its size in LE order
|
||||
manifestDataLE := make([]byte, FooterSizeSupported)
|
||||
binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset)
|
||||
binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest)))
|
||||
binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest)))
|
||||
binary.LittleEndian.PutUint64(manifestDataLE[24:], uint64(ManifestTypeCRFS))
|
||||
copy(manifestDataLE[32:], ZstdChunkedFrameMagic)
|
||||
binary.LittleEndian.PutUint64(manifestDataLE[8*1:], uint64(len(compressedManifest)))
|
||||
binary.LittleEndian.PutUint64(manifestDataLE[8*2:], uint64(len(manifest)))
|
||||
binary.LittleEndian.PutUint64(manifestDataLE[8*3:], uint64(ManifestTypeCRFS))
|
||||
copy(manifestDataLE[8*4:], ZstdChunkedFrameMagic)
|
||||
|
||||
return appendZstdSkippableFrame(dest, manifestDataLE)
|
||||
}
|
||||
|
78
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
78
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
@ -55,6 +55,7 @@ type compressedFileType int
|
||||
type chunkedDiffer struct {
|
||||
stream ImageSourceSeekable
|
||||
manifest []byte
|
||||
tarSplit []byte
|
||||
layersCache *layersCache
|
||||
tocOffset int64
|
||||
fileType compressedFileType
|
||||
@ -64,6 +65,8 @@ type chunkedDiffer struct {
|
||||
gzipReader *pgzip.Reader
|
||||
zstdReader *zstd.Decoder
|
||||
rawReader io.Reader
|
||||
|
||||
tocDigest digest.Digest
|
||||
}
|
||||
|
||||
var xattrsToIgnore = map[string]interface{}{
|
||||
@ -135,6 +138,26 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us
|
||||
return dstFile, st.Size(), nil
|
||||
}
|
||||
|
||||
// GetTOCDigest returns the digest of the TOC as recorded in the annotations.
|
||||
// This is an experimental feature and may be changed/removed in the future.
|
||||
func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
|
||||
if tocDigest, ok := annotations[estargz.TOCJSONDigestAnnotation]; ok {
|
||||
d, err := digest.Parse(tocDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &d, nil
|
||||
}
|
||||
if tocDigest, ok := annotations[internal.ManifestChecksumKey]; ok {
|
||||
d, err := digest.Parse(tocDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &d, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||
if _, ok := annotations[internal.ManifestChecksumKey]; ok {
|
||||
@ -147,7 +170,7 @@ func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotat
|
||||
}
|
||||
|
||||
func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) {
|
||||
manifest, tocOffset, err := readZstdChunkedManifest(ctx, iss, blobSize, annotations)
|
||||
manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(ctx, iss, blobSize, annotations)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||
}
|
||||
@ -156,13 +179,20 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tocDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[internal.ManifestChecksumKey], err)
|
||||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
stream: iss,
|
||||
manifest: manifest,
|
||||
layersCache: layersCache,
|
||||
tocOffset: tocOffset,
|
||||
fileType: fileTypeZstdChunked,
|
||||
layersCache: layersCache,
|
||||
manifest: manifest,
|
||||
stream: iss,
|
||||
tarSplit: tarSplit,
|
||||
tocOffset: tocOffset,
|
||||
tocDigest: tocDigest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -176,6 +206,11 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tocDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[estargz.TOCJSONDigestAnnotation], err)
|
||||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
stream: iss,
|
||||
@ -183,6 +218,7 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize
|
||||
layersCache: layersCache,
|
||||
tocOffset: tocOffset,
|
||||
fileType: fileTypeEstargz,
|
||||
tocDigest: tocDigest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -363,6 +399,24 @@ func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOption
|
||||
return nil
|
||||
}
|
||||
|
||||
func mapToSlice(inputMap map[uint32]struct{}) []uint32 {
|
||||
var out []uint32
|
||||
for value := range inputMap {
|
||||
out = append(out, value)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func collectIDs(entries []internal.FileMetadata) ([]uint32, []uint32) {
|
||||
uids := make(map[uint32]struct{})
|
||||
gids := make(map[uint32]struct{})
|
||||
for _, entry := range entries {
|
||||
uids[uint32(entry.UID)] = struct{}{}
|
||||
gids[uint32(entry.GID)] = struct{}{}
|
||||
}
|
||||
return mapToSlice(uids), mapToSlice(gids)
|
||||
}
|
||||
|
||||
type originFile struct {
|
||||
Root string
|
||||
Path string
|
||||
@ -1271,12 +1325,13 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
|
||||
}
|
||||
}()
|
||||
|
||||
bigData := map[string][]byte{
|
||||
bigDataKey: c.manifest,
|
||||
}
|
||||
output := graphdriver.DriverWithDifferOutput{
|
||||
Differ: c,
|
||||
BigData: bigData,
|
||||
Differ: c,
|
||||
TarSplit: c.tarSplit,
|
||||
BigData: map[string][]byte{
|
||||
bigDataKey: c.manifest,
|
||||
},
|
||||
TOCDigest: c.tocDigest,
|
||||
}
|
||||
|
||||
storeOpts, err := types.DefaultStoreOptionsAutoDetectUID()
|
||||
@ -1305,6 +1360,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
|
||||
|
||||
var missingParts []missingPart
|
||||
|
||||
output.UIDs, output.GIDs = collectIDs(toc.Entries)
|
||||
|
||||
mergedEntries, totalSize, err := c.mergeTocEntries(c.fileType, toc.Entries)
|
||||
if err != nil {
|
||||
return output, err
|
||||
@ -1579,6 +1636,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
|
||||
if totalChunksSize > 0 {
|
||||
logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize))
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
|
9
vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
generated
vendored
9
vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
generated
vendored
@ -9,9 +9,16 @@ import (
|
||||
|
||||
storage "github.com/containers/storage"
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||
return nil, errors.New("format not supported on this architecture")
|
||||
return nil, errors.New("format not supported on this system")
|
||||
}
|
||||
|
||||
// GetTOCDigest returns the digest of the TOC as recorded in the annotations.
|
||||
// This is an experimental feature and may be changed/removed in the future.
|
||||
func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
|
||||
return nil, errors.New("format not supported on this system")
|
||||
}
|
||||
|
23
vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go
generated
vendored
23
vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go
generated
vendored
@ -52,16 +52,32 @@ type Image struct {
|
||||
|
||||
// Cosign describes a container image signed using Cosign
|
||||
type Cosign struct {
|
||||
Image name.Digest
|
||||
Annotations map[string]interface{}
|
||||
Image name.Digest
|
||||
// ClaimedIdentity is what the signer claims the image to be; usually a registry.com/…/repo:tag, but can also use a digest instead.
|
||||
// ALMOST ALL consumers MUST verify that ClaimedIdentity in the signature is correct given how user refers to the image;
|
||||
// e.g. if the user asks to access a signed image example.com/repo/mysql:3.14,
|
||||
// it is ALMOST ALWAYS necessary to validate that ClaimedIdentity = example.com/repo/mysql:3.14
|
||||
//
|
||||
// Considerations:
|
||||
// - The user might refer to an image using a digest (example.com/repo/mysql@sha256:…); in that case the registry/…/repo should still match
|
||||
// - If the image is multi-arch, ClaimedIdentity usually refers to the top-level multi-arch image index also on the per-arch images
|
||||
// (possibly even if ClaimedIdentity contains a digest!)
|
||||
// - Older versions of cosign generate signatures where ClaimedIdentity only contains a registry/…/repo ; signature consumers should allow users
|
||||
// to determine whether such images should be accepted (and, long-term, the default SHOULD be to reject them)
|
||||
ClaimedIdentity string
|
||||
Annotations map[string]interface{}
|
||||
}
|
||||
|
||||
// SimpleContainerImage returns information about a container image in the github.com/containers/image/signature format
|
||||
func (p Cosign) SimpleContainerImage() SimpleContainerImage {
|
||||
dockerReference := p.Image.Repository.Name()
|
||||
if p.ClaimedIdentity != "" {
|
||||
dockerReference = p.ClaimedIdentity
|
||||
}
|
||||
return SimpleContainerImage{
|
||||
Critical: Critical{
|
||||
Identity: Identity{
|
||||
DockerReference: p.Image.Repository.Name(),
|
||||
DockerReference: dockerReference,
|
||||
},
|
||||
Image: Image{
|
||||
DockerManifestDigest: p.Image.DigestStr(),
|
||||
@ -98,6 +114,7 @@ func (p *Cosign) UnmarshalJSON(data []byte) error {
|
||||
return fmt.Errorf("could not parse image digest string %q: %w", digestStr, err)
|
||||
}
|
||||
p.Image = digest
|
||||
p.ClaimedIdentity = simple.Critical.Identity.DockerReference
|
||||
p.Annotations = simple.Optional
|
||||
return nil
|
||||
}
|
||||
|
18
vendor/modules.txt
vendored
18
vendor/modules.txt
vendored
@ -1,3 +1,6 @@
|
||||
# dario.cat/mergo v1.0.0
|
||||
## explicit; go 1.13
|
||||
dario.cat/mergo
|
||||
# github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161
|
||||
## explicit; go 1.16
|
||||
github.com/Azure/go-ansiterm
|
||||
@ -102,7 +105,7 @@ github.com/containernetworking/cni/pkg/version
|
||||
# github.com/containernetworking/plugins v1.3.0
|
||||
## explicit; go 1.20
|
||||
github.com/containernetworking/plugins/pkg/ns
|
||||
# github.com/containers/buildah v1.30.1-0.20230504052500-e925b5852e07
|
||||
# github.com/containers/buildah v1.30.1-0.20230627110136-33b7088fec7b
|
||||
## explicit; go 1.18
|
||||
github.com/containers/buildah
|
||||
github.com/containers/buildah/bind
|
||||
@ -125,7 +128,7 @@ github.com/containers/buildah/pkg/rusage
|
||||
github.com/containers/buildah/pkg/sshagent
|
||||
github.com/containers/buildah/pkg/util
|
||||
github.com/containers/buildah/util
|
||||
# github.com/containers/common v0.53.1-0.20230626115555-370c89881624
|
||||
# github.com/containers/common v0.53.1-0.20230627061926-e6f314e59b81
|
||||
## explicit; go 1.18
|
||||
github.com/containers/common/libimage
|
||||
github.com/containers/common/libimage/define
|
||||
@ -183,7 +186,7 @@ github.com/containers/common/version
|
||||
# github.com/containers/conmon v2.0.20+incompatible
|
||||
## explicit
|
||||
github.com/containers/conmon/runner/config
|
||||
# github.com/containers/image/v5 v5.25.1-0.20230613183705-07ced6137083
|
||||
# github.com/containers/image/v5 v5.25.1-0.20230623174242-68798a22ce3e
|
||||
## explicit; go 1.18
|
||||
github.com/containers/image/v5/copy
|
||||
github.com/containers/image/v5/directory
|
||||
@ -290,7 +293,7 @@ github.com/containers/psgo/internal/dev
|
||||
github.com/containers/psgo/internal/host
|
||||
github.com/containers/psgo/internal/proc
|
||||
github.com/containers/psgo/internal/process
|
||||
# github.com/containers/storage v1.46.2-0.20230616083707-cc0d208e5e1c
|
||||
# github.com/containers/storage v1.47.0
|
||||
## explicit; go 1.19
|
||||
github.com/containers/storage
|
||||
github.com/containers/storage/drivers
|
||||
@ -580,9 +583,6 @@ github.com/hashicorp/go-multierror
|
||||
# github.com/hashicorp/go-retryablehttp v0.7.4
|
||||
## explicit; go 1.13
|
||||
github.com/hashicorp/go-retryablehttp
|
||||
# github.com/imdario/mergo v0.3.16
|
||||
## explicit; go 1.13
|
||||
github.com/imdario/mergo
|
||||
# github.com/inconshreveable/mousetrap v1.1.0
|
||||
## explicit; go 1.18
|
||||
github.com/inconshreveable/mousetrap
|
||||
@ -816,8 +816,8 @@ github.com/sigstore/rekor/pkg/generated/client/pubkey
|
||||
github.com/sigstore/rekor/pkg/generated/client/tlog
|
||||
github.com/sigstore/rekor/pkg/generated/models
|
||||
github.com/sigstore/rekor/pkg/util
|
||||
# github.com/sigstore/sigstore v1.6.5
|
||||
## explicit; go 1.18
|
||||
# github.com/sigstore/sigstore v1.7.1
|
||||
## explicit; go 1.19
|
||||
github.com/sigstore/sigstore/pkg/cryptoutils
|
||||
github.com/sigstore/sigstore/pkg/oauth
|
||||
github.com/sigstore/sigstore/pkg/oauthflow
|
||||
|
Reference in New Issue
Block a user