From 174631f7261efc0003251723acd9dc354b4f9312 Mon Sep 17 00:00:00 2001 From: Matt Heon Date: Tue, 23 Jan 2024 15:13:45 -0500 Subject: [PATCH 1/2] Convert SpecGen values to be nullable where possible SpecGen is our primary container creation abstraction, and is used to connect our CLI to the Libpod container creation backend. Because container creation has a million options (I exaggerate only slightly), the struct is composed of several other structs, many of which are quite large. The core problem is that SpecGen is also an API type - it's used in remote Podman. There, we have a client and a server, and we want to respect the server's containers.conf. But how do we tell what parts of SpecGen were set by the client explicitly, and what parts were not? If we're not using nullable values, an explicit empty string and a value never being set are identical - and we can't tell if it's safe to grab a default from the server's containers.conf. Fortunately, we only really need to do this for booleans. An empty string is sufficient to tell us that a string was unset (even if the user explicitly gave us an empty string for an option, filling in a default from the config file is acceptable). This makes things a lot simpler. My initial attempt at this changed everything, including strings, and it was far larger and more painful. Also, begin the first steps of removing all uses of containers.conf defaults from client-side. Two are gone entirely, the rest are marked as remove-when-possible. [NO NEW TESTS NEEDED] This is just a refactor. Signed-off-by: Matt Heon --- pkg/api/handlers/compat/containers_create.go | 5 +- pkg/api/handlers/libpod/containers_create.go | 4 +- pkg/bindings/test/attach_test.go | 3 +- pkg/bindings/test/common_test.go | 3 +- pkg/bindings/test/containers_test.go | 6 +- pkg/bindings/test/create_test.go | 3 +- pkg/domain/entities/pods.go | 3 +- pkg/domain/infra/abi/containers.go | 3 +- pkg/domain/infra/abi/play.go | 1 - pkg/specgen/container_validate.go | 6 +- pkg/specgen/generate/container.go | 24 +++- pkg/specgen/generate/container_create.go | 30 ++-- pkg/specgen/generate/kube/kube.go | 25 ++-- pkg/specgen/generate/namespaces.go | 4 +- pkg/specgen/generate/namespaces_linux.go | 2 +- pkg/specgen/generate/oci.go | 2 +- pkg/specgen/generate/oci_freebsd.go | 12 +- pkg/specgen/generate/oci_linux.go | 34 +++-- pkg/specgen/generate/pod_create.go | 5 +- pkg/specgen/generate/pod_create_test.go | 7 +- pkg/specgen/generate/ports.go | 2 +- pkg/specgen/generate/security_freebsd.go | 6 +- pkg/specgen/generate/security_linux.go | 20 +-- pkg/specgen/generate/storage.go | 4 +- pkg/specgen/specgen.go | 142 +++++++++++-------- pkg/specgen/specgen_test.go | 15 +- pkg/specgenutil/specgen.go | 67 +++++---- pkg/specgenutil/volumes.go | 1 + test/system/160-volumes.bats | 2 +- 29 files changed, 263 insertions(+), 178 deletions(-) diff --git a/pkg/api/handlers/compat/containers_create.go b/pkg/api/handlers/compat/containers_create.go index 7e821ad40f..8565ed8f25 100644 --- a/pkg/api/handlers/compat/containers_create.go +++ b/pkg/api/handlers/compat/containers_create.go @@ -115,7 +115,8 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) { return } // moby always create the working directory - sg.CreateWorkingDir = true + localTrue := true + sg.CreateWorkingDir = &localTrue // moby doesn't inherit /etc/hosts from host sg.BaseHostsFile = "none" @@ -420,7 +421,7 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C Expose: expose, GroupAdd: cc.HostConfig.GroupAdd, Hostname: cc.Config.Hostname, - ImageVolume: define.TypeBind, + ImageVolume: "anonymous", Init: init, Interactive: cc.Config.OpenStdin, IPC: string(cc.HostConfig.IpcMode), diff --git a/pkg/api/handlers/libpod/containers_create.go b/pkg/api/handlers/libpod/containers_create.go index 28f2ff5dbe..645a7ea220 100644 --- a/pkg/api/handlers/libpod/containers_create.go +++ b/pkg/api/handlers/libpod/containers_create.go @@ -30,11 +30,11 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) { // we have to set the default before we decode to make sure the correct default is set when the field is unset sg := specgen.SpecGenerator{ ContainerNetworkConfig: specgen.ContainerNetworkConfig{ - UseImageHosts: conf.Containers.NoHosts, + UseImageHosts: &conf.Containers.NoHosts, }, ContainerSecurityConfig: specgen.ContainerSecurityConfig{ Umask: conf.Containers.Umask, - Privileged: conf.Containers.Privileged, + Privileged: &conf.Containers.Privileged, }, } diff --git a/pkg/bindings/test/attach_test.go b/pkg/bindings/test/attach_test.go index 5849514061..b1789f6add 100644 --- a/pkg/bindings/test/attach_test.go +++ b/pkg/bindings/test/attach_test.go @@ -67,7 +67,8 @@ var _ = Describe("Podman containers attach", func() { It("can echo data via cat in container", func() { s := specgen.NewSpecGenerator(alpine.name, false) s.Name = "CatAttachTest" - s.Terminal = true + localTrue := true + s.Terminal = &localTrue s.Command = []string{"/bin/cat"} ctnr, err := containers.CreateWithSpec(bt.conn, s, nil) Expect(err).ShouldNot(HaveOccurred()) diff --git a/pkg/bindings/test/common_test.go b/pkg/bindings/test/common_test.go index acc9a62cf7..a23b3c6401 100644 --- a/pkg/bindings/test/common_test.go +++ b/pkg/bindings/test/common_test.go @@ -203,7 +203,8 @@ func (b *bindingTest) restoreImageFromCache(i testImage) { // and add or append the alpine image to it func (b *bindingTest) RunTopContainer(containerName *string, podName *string) (string, error) { s := specgen.NewSpecGenerator(alpine.name, false) - s.Terminal = false + terminal := false + s.Terminal = &terminal s.Command = []string{"/usr/bin/top"} if containerName != nil { s.Name = *containerName diff --git a/pkg/bindings/test/containers_test.go b/pkg/bindings/test/containers_test.go index 18c753b90e..12ff16dbd2 100644 --- a/pkg/bindings/test/containers_test.go +++ b/pkg/bindings/test/containers_test.go @@ -360,7 +360,8 @@ var _ = Describe("Podman containers ", func() { It("logging", func() { stdoutChan := make(chan string, 10) s := specgen.NewSpecGenerator(alpine.name, false) - s.Terminal = true + terminal := true + s.Terminal = &terminal s.Command = []string{"date", "-R"} r, err := containers.CreateWithSpec(bt.conn, s, nil) Expect(err).ToNot(HaveOccurred()) @@ -774,7 +775,8 @@ var _ = Describe("Podman containers ", func() { _, err = bt.RunTopContainer(&name2, nil) Expect(err).ToNot(HaveOccurred()) s := specgen.NewSpecGenerator(alpine.name, false) - s.Terminal = true + terminal := true + s.Terminal = &terminal s.Command = []string{"date", "-R"} _, err = containers.CreateWithSpec(bt.conn, s, nil) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/bindings/test/create_test.go b/pkg/bindings/test/create_test.go index ee3f43750d..abb0df95e1 100644 --- a/pkg/bindings/test/create_test.go +++ b/pkg/bindings/test/create_test.go @@ -33,7 +33,8 @@ var _ = Describe("Create containers ", func() { It("create a container running top", func() { s := specgen.NewSpecGenerator(alpine.name, false) s.Command = []string{"top"} - s.Terminal = true + terminal := true + s.Terminal = &terminal s.Name = "top" ctr, err := containers.CreateWithSpec(bt.conn, s, nil) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/domain/entities/pods.go b/pkg/domain/entities/pods.go index 32b6b2f2ee..879d78b253 100644 --- a/pkg/domain/entities/pods.go +++ b/pkg/domain/entities/pods.go @@ -5,7 +5,6 @@ import ( "strings" commonFlag "github.com/containers/common/pkg/flag" - "github.com/containers/podman/v4/libpod/define" "github.com/containers/podman/v4/pkg/domain/entities/types" "github.com/containers/podman/v4/pkg/specgen" "github.com/containers/podman/v4/pkg/util" @@ -271,7 +270,7 @@ type ContainerCreateOptions struct { func NewInfraContainerCreateOptions() ContainerCreateOptions { options := ContainerCreateOptions{ IsInfra: true, - ImageVolume: define.TypeBind, + ImageVolume: "anonymous", MemorySwappiness: -1, } return options diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go index 13aa1e193d..4ab040a52b 100644 --- a/pkg/domain/infra/abi/containers.go +++ b/pkg/domain/infra/abi/containers.go @@ -1701,7 +1701,8 @@ func (ic *ContainerEngine) ContainerClone(ctx context.Context, ctrCloneOpts enti } // if we do not pass term, running ctrs exit - spec.Terminal = c.Terminal() + localTerm := c.Terminal() + spec.Terminal = &localTerm // Print warnings if len(out) > 0 { diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go index 3ca8f18a5d..c977704554 100644 --- a/pkg/domain/infra/abi/play.go +++ b/pkg/domain/infra/abi/play.go @@ -75,7 +75,6 @@ func (ic *ContainerEngine) createServiceContainer(ctx context.Context, name stri } ctrOpts := entities.ContainerCreateOptions{ // Inherited from infra containers - ImageVolume: define.TypeBind, IsInfra: false, MemorySwappiness: -1, ReadOnly: true, diff --git a/pkg/specgen/container_validate.go b/pkg/specgen/container_validate.go index 2371434656..223a3d8fc8 100644 --- a/pkg/specgen/container_validate.go +++ b/pkg/specgen/container_validate.go @@ -31,7 +31,7 @@ func (s *SpecGenerator) Validate() error { if len(s.Networks) > 0 { return fmt.Errorf("networks must be defined when the pod is created: %w", define.ErrNetworkOnPodContainer) } - if len(s.PortMappings) > 0 || s.PublishExposedPorts { + if len(s.PortMappings) > 0 || (s.PublishExposedPorts != nil && *s.PublishExposedPorts) { return fmt.Errorf("published or exposed ports must be defined when the pod is created: %w", define.ErrNetworkOnPodContainer) } if len(s.HostAdd) > 0 { @@ -102,7 +102,7 @@ func (s *SpecGenerator) Validate() error { // ContainerNetworkConfig // // useimageresolveconf conflicts with dnsserver, dnssearch, dnsoption - if s.UseImageResolvConf { + if s.UseImageResolvConf != nil && *s.UseImageResolvConf { if len(s.DNSServers) > 0 { return exclusiveOptions("UseImageResolvConf", "DNSServer") } @@ -114,7 +114,7 @@ func (s *SpecGenerator) Validate() error { } } // UseImageHosts and HostAdd are exclusive - if s.UseImageHosts && len(s.HostAdd) > 0 { + if (s.UseImageHosts != nil && *s.UseImageHosts) && len(s.HostAdd) > 0 { return exclusiveOptions("UseImageHosts", "HostAdd") } diff --git a/pkg/specgen/generate/container.go b/pkg/specgen/generate/container.go index 11541ab809..1f5cb8945f 100644 --- a/pkg/specgen/generate/container.go +++ b/pkg/specgen/generate/container.go @@ -110,7 +110,15 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat } // Get Default Environment from containers.conf - defaultEnvs, err := envLib.ParseSlice(rtc.GetDefaultEnvEx(s.EnvHost, s.HTTPProxy)) + envHost := false + if s.EnvHost != nil { + envHost = *s.EnvHost + } + httpProxy := false + if s.HTTPProxy != nil { + httpProxy = *s.HTTPProxy + } + defaultEnvs, err := envLib.ParseSlice(rtc.GetDefaultEnvEx(envHost, httpProxy)) if err != nil { return nil, fmt.Errorf("parsing fields in containers.conf: %w", err) } @@ -129,7 +137,7 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat // add default terminal to env if tty flag is set _, ok := defaultEnvs["TERM"] - if s.Terminal && !ok { + if (s.Terminal != nil && *s.Terminal) && !ok { defaultEnvs["TERM"] = "xterm" } @@ -154,7 +162,7 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat delete(defaultEnvs, e) } - if s.UnsetEnvAll { + if s.UnsetEnvAll != nil && *s.UnsetEnvAll { defaultEnvs = make(map[string]string) } // First transform the os env into a map. We need it for the labels later in @@ -162,9 +170,9 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat osEnv := envLib.Map(os.Environ()) // Caller Specified defaults - if s.EnvHost { + if envHost { defaultEnvs = envLib.Join(defaultEnvs, osEnv) - } else if s.HTTPProxy { + } else if httpProxy { for _, envSpec := range config.ProxyEnv { if v, ok := osEnv[envSpec]; ok { defaultEnvs[envSpec] = v @@ -303,6 +311,10 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat warnings = append(warnings, "Port mappings have been discarded as one of the Host, Container, Pod, and None network modes are in use") } + if len(s.ImageVolumeMode) == 0 { + s.ImageVolumeMode = rtc.Engine.ImageVolumeMode + } + return warnings, nil } @@ -497,7 +509,7 @@ func ConfigToSpec(rt *libpod.Runtime, specg *specgen.SpecGenerator, containerID // mapSecurityConfig takes a libpod.ContainerSecurityConfig and converts it to a specgen.ContinerSecurityConfig func mapSecurityConfig(c *libpod.ContainerConfig, s *specgen.SpecGenerator) { - s.Privileged = c.Privileged + s.Privileged = &c.Privileged s.SelinuxOpts = append(s.SelinuxOpts, c.LabelOpts...) s.User = c.User s.Groups = c.Groups diff --git a/pkg/specgen/generate/container_create.go b/pkg/specgen/generate/container_create.go index 6c71263248..927d64d446 100644 --- a/pkg/specgen/generate/container_create.go +++ b/pkg/specgen/generate/container_create.go @@ -152,7 +152,11 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener } if s.Rootfs != "" { - options = append(options, libpod.WithRootFS(s.Rootfs, s.RootfsOverlay, s.RootfsMapping)) + rootfsOverlay := false + if s.RootfsOverlay != nil { + rootfsOverlay = *s.RootfsOverlay + } + options = append(options, libpod.WithRootFS(s.Rootfs, rootfsOverlay, s.RootfsMapping)) } newImage, resolvedImageName, imageData, err := getImageFromSpec(ctx, rt, s) @@ -358,7 +362,7 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l options = append(options, libpod.WithPreserveFD(s.PreserveFD)) } - if s.Stdin { + if s.Stdin != nil && *s.Stdin { options = append(options, libpod.WithStdin()) } @@ -368,7 +372,7 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l if s.Umask != "" { options = append(options, libpod.WithUmask(s.Umask)) } - if s.Volatile { + if s.Volatile != nil && *s.Volatile { options = append(options, libpod.WithVolatile()) } if s.PasswdEntry != "" { @@ -381,7 +385,7 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l options = append(options, libpod.WithBaseHostsFile(s.BaseHostsFile)) } - if s.Privileged { + if s.IsPrivileged() { options = append(options, libpod.WithMountAllDevices()) } @@ -520,7 +524,7 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l if s.WorkDir == "" { s.WorkDir = "/" } - if s.CreateWorkingDir { + if s.CreateWorkingDir != nil && *s.CreateWorkingDir { options = append(options, libpod.WithCreateWorkingDir()) } if s.StopSignal != nil { @@ -547,8 +551,8 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l options = append(options, libpod.WithLogDriver(s.LogConfiguration.Driver)) } } - if s.ContainerSecurityConfig.LabelNested { - options = append(options, libpod.WithLabelNested(s.ContainerSecurityConfig.LabelNested)) + if s.LabelNested != nil { + options = append(options, libpod.WithLabelNested(*s.LabelNested)) } // Security options if len(s.SelinuxOpts) > 0 { @@ -567,8 +571,10 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l options = append(options, libpod.WithSecLabels(selinuxOpts)) } } - options = append(options, libpod.WithPrivileged(s.Privileged)) - options = append(options, libpod.WithReadWriteTmpfs(s.ReadWriteTmpfs)) + options = append(options, libpod.WithPrivileged(s.IsPrivileged())) + if s.ReadWriteTmpfs != nil { + options = append(options, libpod.WithReadWriteTmpfs(*s.ReadWriteTmpfs)) + } // Get namespace related options namespaceOpts, err := namespaceOptions(s, rt, pod, imageData) @@ -588,7 +594,11 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l options = append(options, libpod.WithShmSizeSystemd(*s.ShmSizeSystemd)) } if s.Rootfs != "" { - options = append(options, libpod.WithRootFS(s.Rootfs, s.RootfsOverlay, s.RootfsMapping)) + rootfsOverlay := false + if s.RootfsOverlay != nil { + rootfsOverlay = *s.RootfsOverlay + } + options = append(options, libpod.WithRootFS(s.Rootfs, rootfsOverlay, s.RootfsMapping)) } // Default used if not overridden on command line diff --git a/pkg/specgen/generate/kube/kube.go b/pkg/specgen/generate/kube/kube.go index f10fc21c68..c9361380a9 100644 --- a/pkg/specgen/generate/kube/kube.go +++ b/pkg/specgen/generate/kube/kube.go @@ -182,6 +182,8 @@ type CtrSpecGenOptions struct { } func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGenerator, error) { + localTrue := true + s := specgen.NewSpecGenerator(opts.Container.Image, false) rtc, err := config.Default() @@ -211,7 +213,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener s.Name = fmt.Sprintf("%s-%s", opts.PodName, opts.Container.Name) - s.Terminal = opts.Container.TTY + s.Terminal = &opts.Container.TTY s.Pod = opts.PodID @@ -390,7 +392,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener if label, ok := opts.Annotations[define.InspectAnnotationLabel+"/"+opts.Container.Name]; ok { if label == "nested" { - s.ContainerSecurityConfig.LabelNested = true + s.ContainerSecurityConfig.LabelNested = &localTrue } if !slices.Contains(s.ContainerSecurityConfig.SelinuxOpts, label) { s.ContainerSecurityConfig.SelinuxOpts = append(s.ContainerSecurityConfig.SelinuxOpts, label) @@ -403,7 +405,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener if err != nil { return nil, err } - s.Remove = autoremoveAsBool + s.Remove = &autoremoveAsBool s.Annotations[define.InspectAnnotationAutoremove] = autoremove } @@ -413,7 +415,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener return nil, err } - s.Init = initAsBool + s.Init = &initAsBool s.Annotations[define.InspectAnnotationInit] = init } @@ -423,7 +425,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener if err != nil { return nil, err } - s.PublishExposedPorts = publishAllAsBool + s.PublishExposedPorts = &publishAllAsBool } s.Annotations[define.InspectAnnotationPublishAll] = publishAll @@ -589,10 +591,12 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener } if ro := opts.ReadOnly; ro != itypes.OptionalBoolUndefined { - s.ReadOnlyFilesystem = ro == itypes.OptionalBoolTrue + localRO := ro == itypes.OptionalBoolTrue + s.ReadOnlyFilesystem = &localRO } // This should default to true for kubernetes yaml - s.ReadWriteTmpfs = true + + s.ReadWriteTmpfs = &localTrue // Make sure the container runs in a systemd unit which is // stored as a label at container creation. @@ -819,14 +823,15 @@ func setupSecurityContext(s *specgen.SpecGenerator, securityContext *v1.Security } if securityContext.ReadOnlyRootFilesystem != nil { - s.ReadOnlyFilesystem = *securityContext.ReadOnlyRootFilesystem + s.ReadOnlyFilesystem = securityContext.ReadOnlyRootFilesystem } if securityContext.Privileged != nil { - s.Privileged = *securityContext.Privileged + s.Privileged = securityContext.Privileged } if securityContext.AllowPrivilegeEscalation != nil { - s.NoNewPrivileges = !*securityContext.AllowPrivilegeEscalation + localNNP := !*securityContext.AllowPrivilegeEscalation + s.NoNewPrivileges = &localNNP } if securityContext.ProcMount != nil && *securityContext.ProcMount == v1.UnmaskedProcMount { diff --git a/pkg/specgen/generate/namespaces.go b/pkg/specgen/generate/namespaces.go index bc93091d17..fc9022b19a 100644 --- a/pkg/specgen/generate/namespaces.go +++ b/pkg/specgen/generate/namespaces.go @@ -367,7 +367,7 @@ func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod. toReturn = append(toReturn, libpod.WithNetNS(portMappings, expose, postConfigureNetNS, "bridge", s.Networks)) } - if s.UseImageHosts { + if s.UseImageHosts != nil && *s.UseImageHosts { toReturn = append(toReturn, libpod.WithUseImageHosts()) } else if len(s.HostAdd) > 0 { toReturn = append(toReturn, libpod.WithHosts(s.HostAdd)) @@ -375,7 +375,7 @@ func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod. if len(s.DNSSearch) > 0 { toReturn = append(toReturn, libpod.WithDNSSearch(s.DNSSearch)) } - if s.UseImageResolvConf { + if s.UseImageResolvConf != nil && *s.UseImageResolvConf { toReturn = append(toReturn, libpod.WithUseImageResolvConf()) } else if len(s.DNSServers) > 0 { var dnsServers []string diff --git a/pkg/specgen/generate/namespaces_linux.go b/pkg/specgen/generate/namespaces_linux.go index 7c8aed85d6..ecfc6b87e8 100644 --- a/pkg/specgen/generate/namespaces_linux.go +++ b/pkg/specgen/generate/namespaces_linux.go @@ -152,7 +152,7 @@ func specConfigureNamespaces(s *specgen.SpecGenerator, g *generate.Generator, rt if g.Config.Annotations == nil { g.Config.Annotations = make(map[string]string) } - if s.PublishExposedPorts { + if s.PublishExposedPorts != nil && *s.PublishExposedPorts { g.Config.Annotations[define.InspectAnnotationPublishAll] = define.InspectResponseTrue } diff --git a/pkg/specgen/generate/oci.go b/pkg/specgen/generate/oci.go index 07fa1a8709..8a7d606c3c 100644 --- a/pkg/specgen/generate/oci.go +++ b/pkg/specgen/generate/oci.go @@ -50,7 +50,7 @@ func makeCommand(s *specgen.SpecGenerator, imageData *libimage.ImageData) []stri finalCommand = []string{""} } - if s.Init { + if s.Init != nil && *s.Init { // bind mount for this binary is added in addContainerInitBinary() finalCommand = append([]string{define.ContainerInitPath, "--"}, finalCommand...) } diff --git a/pkg/specgen/generate/oci_freebsd.go b/pkg/specgen/generate/oci_freebsd.go index feede3ed8a..86c35b27b7 100644 --- a/pkg/specgen/generate/oci_freebsd.go +++ b/pkg/specgen/generate/oci_freebsd.go @@ -43,7 +43,9 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt g.SetProcessArgs(finalCmd) - g.SetProcessTerminal(s.Terminal) + if s.Terminal != nil { + g.SetProcessTerminal(*s.Terminal) + } for key, val := range s.Annotations { g.AddAnnotation(key, val) @@ -51,7 +53,7 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt // Devices var userDevices []spec.LinuxDevice - if !s.Privileged { + if !s.IsPrivileged() { // add default devices from containers.conf for _, device := range rtc.Containers.Devices.Get() { if err = DevicesFromPath(&g, device); err != nil { @@ -145,7 +147,7 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt configSpec.Annotations = make(map[string]string) } - if s.Remove { + if s.Remove != nil && *s.Remove { configSpec.Annotations[define.InspectAnnotationAutoremove] = define.InspectResponseTrue } @@ -153,11 +155,11 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt configSpec.Annotations[define.InspectAnnotationVolumesFrom] = strings.Join(s.VolumesFrom, ",") } - if s.Privileged { + if s.IsPrivileged() { configSpec.Annotations[define.InspectAnnotationPrivileged] = define.InspectResponseTrue } - if s.Init { + if s.Init != nil && *s.Init { configSpec.Annotations[define.InspectAnnotationInit] = define.InspectResponseTrue } diff --git a/pkg/specgen/generate/oci_linux.go b/pkg/specgen/generate/oci_linux.go index 8c926f1b69..adcc5a73a4 100644 --- a/pkg/specgen/generate/oci_linux.go +++ b/pkg/specgen/generate/oci_linux.go @@ -100,7 +100,7 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt canMountSys := canMountSys(isRootless, isNewUserns, s) - if s.Privileged && canMountSys { + if s.IsPrivileged() && canMountSys { cgroupPerm = "rw" g.RemoveMount("/sys") sysMnt := spec.Mount{ @@ -115,7 +115,7 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt addCgroup = false g.RemoveMount("/sys") r := "ro" - if s.Privileged { + if s.IsPrivileged() { r = "rw" } sysMnt := spec.Mount{ @@ -134,7 +134,7 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt Options: []string{"rprivate", "nosuid", "noexec", "nodev", r}, } g.AddMount(sysFsCgroupMnt) - if !s.Privileged && isRootless { + if !s.IsPrivileged() && isRootless { g.AddLinuxMaskedPaths("/sys/kernel") } } @@ -211,7 +211,9 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt g.SetProcessArgs(finalCmd) - g.SetProcessTerminal(s.Terminal) + if s.Terminal != nil { + g.SetProcessTerminal(*s.Terminal) + } for key, val := range s.Annotations { g.AddAnnotation(key, val) @@ -247,13 +249,13 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt // Devices // set the default rule at the beginning of device configuration - if !inUserNS && !s.Privileged { + if !inUserNS && !s.IsPrivileged() { g.AddLinuxResourcesDevice(false, "", nil, nil, "rwm") } var userDevices []spec.LinuxDevice - if !s.Privileged { + if !s.IsPrivileged() { // add default devices from containers.conf for _, device := range rtc.Containers.Devices.Get() { if err = DevicesFromPath(&g, device); err != nil { @@ -278,13 +280,13 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt if isRootless && len(s.DeviceCgroupRule) > 0 { return nil, fmt.Errorf("device cgroup rules are not supported in rootless mode or in a user namespace") } - if !isRootless && !s.Privileged { + if !isRootless && !s.IsPrivileged() { for _, dev := range s.DeviceCgroupRule { g.AddLinuxResourcesDevice(true, dev.Type, dev.Major, dev.Minor, dev.Access) } } - BlockAccessToKernelFilesystems(s.Privileged, s.PidNS.IsHost(), s.Mask, s.Unmask, &g) + BlockAccessToKernelFilesystems(s.IsPrivileged(), s.PidNS.IsHost(), s.Mask, s.Unmask, &g) g.ClearProcessEnv() for name, val := range s.Env { @@ -315,7 +317,7 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt configSpec.Annotations = make(map[string]string) } - if s.Remove { + if s.Remove != nil && *s.Remove { configSpec.Annotations[define.InspectAnnotationAutoremove] = define.InspectResponseTrue } @@ -323,11 +325,11 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt configSpec.Annotations[define.InspectAnnotationVolumesFrom] = strings.Join(s.VolumesFrom, ",") } - if s.Privileged { + if s.IsPrivileged() { configSpec.Annotations[define.InspectAnnotationPrivileged] = define.InspectResponseTrue } - if s.Init { + if s.Init != nil && *s.Init { configSpec.Annotations[define.InspectAnnotationInit] = define.InspectResponseTrue } @@ -336,7 +338,15 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt } setProcOpts(s, &g) - if s.ReadOnlyFilesystem && !s.ReadWriteTmpfs { + roFS := false + if s.ReadOnlyFilesystem != nil { + roFS = *s.ReadOnlyFilesystem + } + rwTmpfs := false + if s.ReadWriteTmpfs != nil { + rwTmpfs = *s.ReadWriteTmpfs + } + if roFS && !rwTmpfs { setDevOptsReadOnly(&g) } diff --git a/pkg/specgen/generate/pod_create.go b/pkg/specgen/generate/pod_create.go index 3d72574e5d..c502134ded 100644 --- a/pkg/specgen/generate/pod_create.go +++ b/pkg/specgen/generate/pod_create.go @@ -250,7 +250,8 @@ func MapSpec(p *specgen.PodSpecGenerator) (*specgen.SpecGenerator, error) { spec.DNSSearch = p.DNSSearch } if p.NoManageResolvConf { - spec.UseImageResolvConf = true + localTrue := true + spec.UseImageResolvConf = &localTrue } if len(p.Networks) > 0 { spec.Networks = p.Networks @@ -260,7 +261,7 @@ func MapSpec(p *specgen.PodSpecGenerator) (*specgen.SpecGenerator, error) { spec.CNINetworks = p.CNINetworks } if p.NoManageHosts { - spec.UseImageHosts = p.NoManageHosts + spec.UseImageHosts = &p.NoManageHosts } if len(p.InfraConmonPidFile) > 0 { diff --git a/pkg/specgen/generate/pod_create_test.go b/pkg/specgen/generate/pod_create_test.go index 87c00030a4..54c3de88ed 100644 --- a/pkg/specgen/generate/pod_create_test.go +++ b/pkg/specgen/generate/pod_create_test.go @@ -8,6 +8,7 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/podman/v4/pkg/specgen" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "testing" ) @@ -54,9 +55,11 @@ func TestMapSpecCopyPodSpecToInfraContainerSpec(t *testing.T) { assert.Equal(t, dnsServers, mappedSpec.DNSServers) assert.Equal(t, dnsOptions, mappedSpec.DNSOptions) assert.Equal(t, dnsSearch, mappedSpec.DNSSearch) - assert.True(t, mappedSpec.UseImageResolvConf) + require.NotNil(t, mappedSpec.UseImageResolvConf) + assert.True(t, *mappedSpec.UseImageResolvConf) assert.Equal(t, networks, mappedSpec.Networks) - assert.True(t, mappedSpec.UseImageHosts) + require.NotNil(t, mappedSpec.UseImageHosts) + assert.True(t, *mappedSpec.UseImageHosts) assert.Equal(t, conmonPidFile, mappedSpec.ConmonPidFile) assert.Equal(t, infraImage, mappedSpec.Image) } diff --git a/pkg/specgen/generate/ports.go b/pkg/specgen/generate/ports.go index 3284d9181e..22b230ab12 100644 --- a/pkg/specgen/generate/ports.go +++ b/pkg/specgen/generate/ports.go @@ -365,7 +365,7 @@ func createPortMappings(s *specgen.SpecGenerator, imageData *libimage.ImageData) } publishPorts := toExpose - if !s.PublishExposedPorts { + if s.PublishExposedPorts == nil || !*s.PublishExposedPorts { publishPorts = nil } diff --git a/pkg/specgen/generate/security_freebsd.go b/pkg/specgen/generate/security_freebsd.go index 2df1c759e2..4a7b8542c7 100644 --- a/pkg/specgen/generate/security_freebsd.go +++ b/pkg/specgen/generate/security_freebsd.go @@ -18,7 +18,7 @@ func setLabelOpts(s *specgen.SpecGenerator, runtime *libpod.Runtime, pidConfig s func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, newImage *libimage.Image, rtc *config.Config) error { // If this is a privileged container, change the devfs ruleset to expose all devices. - if s.Privileged { + if s.IsPrivileged() { for k, m := range g.Config.Mounts { if m.Type == "devfs" { m.Options = []string{ @@ -29,7 +29,9 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, } } - g.SetRootReadonly(s.ReadOnlyFilesystem) + if s.ReadOnlyFilesystem != nil { + g.SetRootReadonly(*s.ReadOnlyFilesystem) + } return nil } diff --git a/pkg/specgen/generate/security_linux.go b/pkg/specgen/generate/security_linux.go index ff288285f1..b81d2ac373 100644 --- a/pkg/specgen/generate/security_linux.go +++ b/pkg/specgen/generate/security_linux.go @@ -23,7 +23,7 @@ import ( // setLabelOpts sets the label options of the SecurityConfig according to the // input. func setLabelOpts(s *specgen.SpecGenerator, runtime *libpod.Runtime, pidConfig specgen.Namespace, ipcConfig specgen.Namespace) error { - if !runtime.EnableLabeling() || s.Privileged { + if !runtime.EnableLabeling() || s.IsPrivileged() { s.SelinuxOpts = label.DisableSecOpt() return nil } @@ -70,7 +70,7 @@ func setupApparmor(s *specgen.SpecGenerator, rtc *config.Config, g *generate.Gen return nil } // If privileged and caller did not specify apparmor profiles return - if s.Privileged && !hasProfile { + if s.IsPrivileged() && !hasProfile { return nil } if !hasProfile { @@ -90,7 +90,7 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, ) // HANDLE CAPABILITIES // NOTE: Must happen before SECCOMP - if s.Privileged { + if s.IsPrivileged() { g.SetupPrivileged(true) caplist, err = capabilities.BoundingSet() if err != nil { @@ -127,9 +127,9 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, capsRequiredRequested = strings.Split(val, ",") } } - if !s.Privileged && len(capsRequiredRequested) == 1 && capsRequiredRequested[0] == "" { + if !s.IsPrivileged() && len(capsRequiredRequested) == 1 && capsRequiredRequested[0] == "" { caplist = []string{} - } else if !s.Privileged && len(capsRequiredRequested) > 0 { + } else if !s.IsPrivileged() && len(capsRequiredRequested) > 0 { // Pass capRequiredRequested in CapAdd field to normalize capabilities names capsRequired, err := capabilities.MergeCapabilities(nil, capsRequiredRequested, nil) if err != nil { @@ -192,7 +192,9 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, } } - g.SetProcessNoNewPrivileges(s.NoNewPrivileges) + if s.NoNewPrivileges != nil { + g.SetProcessNoNewPrivileges(*s.NoNewPrivileges) + } if err := setupApparmor(s, rtc, g); err != nil { return err @@ -209,11 +211,13 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, // Clear default Seccomp profile from Generator for unconfined containers // and privileged containers which do not specify a seccomp profile. - if s.SeccompProfilePath == "unconfined" || (s.Privileged && (s.SeccompProfilePath == "" || s.SeccompProfilePath == config.SeccompOverridePath || s.SeccompProfilePath == config.SeccompDefaultPath)) { + if s.SeccompProfilePath == "unconfined" || (s.IsPrivileged() && (s.SeccompProfilePath == "" || s.SeccompProfilePath == config.SeccompOverridePath || s.SeccompProfilePath == config.SeccompDefaultPath)) { configSpec.Linux.Seccomp = nil } - g.SetRootReadonly(s.ReadOnlyFilesystem) + if s.ReadOnlyFilesystem != nil { + g.SetRootReadonly(*s.ReadOnlyFilesystem) + } noUseIPC := s.IpcNS.NSMode == specgen.FromContainer || s.IpcNS.NSMode == specgen.FromPod || s.IpcNS.NSMode == specgen.Host noUseNet := s.NetNS.NSMode == specgen.FromContainer || s.NetNS.NSMode == specgen.FromPod || s.NetNS.NSMode == specgen.Host diff --git a/pkg/specgen/generate/storage.go b/pkg/specgen/generate/storage.go index 8c65783b43..d4b0d4563d 100644 --- a/pkg/specgen/generate/storage.go +++ b/pkg/specgen/generate/storage.go @@ -128,7 +128,7 @@ func finalizeMounts(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Ru } // If requested, add container init binary - if s.Init { + if s.Init != nil && *s.Init { initPath := s.InitPath if initPath == "" { initPath, err = rtc.FindInitBinary() @@ -180,7 +180,7 @@ func finalizeMounts(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Ru } } - if s.ReadWriteTmpfs { + if s.ReadWriteTmpfs != nil && *s.ReadWriteTmpfs { runPath, err := imageRunPath(ctx, img) if err != nil { return nil, nil, nil, err diff --git a/pkg/specgen/specgen.go b/pkg/specgen/specgen.go index b65e1c0836..fab854a51a 100644 --- a/pkg/specgen/specgen.go +++ b/pkg/specgen/specgen.go @@ -52,20 +52,21 @@ type ContainerBasicConfig struct { Command []string `json:"command,omitempty"` // EnvHost indicates that the host environment should be added to container // Optional. - EnvHost bool `json:"env_host,omitempty"` + EnvHost *bool `json:"env_host,omitempty"` // EnvHTTPProxy indicates that the http host proxy environment variables // should be added to container // Optional. - HTTPProxy bool `json:"httpproxy,omitempty"` + HTTPProxy *bool `json:"httpproxy,omitempty"` // Env is a set of environment variables that will be set in the // container. // Optional. Env map[string]string `json:"env,omitempty"` // Terminal is whether the container will create a PTY. // Optional. - Terminal bool `json:"terminal,omitempty"` + Terminal *bool `json:"terminal,omitempty"` // Stdin is whether the container will keep its STDIN open. - Stdin bool `json:"stdin,omitempty"` + // Optional. + Stdin *bool `json:"stdin,omitempty"` // Labels are key-value pairs that are used to add metadata to // containers. // Optional. @@ -90,6 +91,7 @@ type ContainerBasicConfig struct { // Timeout is a maximum time in seconds the container will run before // main process is sent SIGKILL. // If 0 is used, signal will not be sent. Container can run indefinitely + // if they do not stop after the default termination signal. // Optional. Timeout uint `json:"timeout,omitempty"` // LogConfiguration describes the logging for a container including @@ -101,15 +103,6 @@ type ContainerBasicConfig struct { // If not given, a default location will be used. // Optional. ConmonPidFile string `json:"conmon_pid_file,omitempty"` - // RawImageName is the user-specified and unprocessed input referring - // to a local or a remote image. - RawImageName string `json:"raw_image_name,omitempty"` - // ImageOS is the user-specified image OS - ImageOS string `json:"image_os,omitempty"` - // ImageArch is the user-specified image architecture - ImageArch string `json:"image_arch,omitempty"` - // ImageVariant is the user-specified image variant - ImageVariant string `json:"image_variant,omitempty"` // RestartPolicy is the container's restart policy - an action which // will be taken when the container exits. // If not given, the default policy, which does nothing, will be used. @@ -140,10 +133,8 @@ type ContainerBasicConfig struct { // "container" - let the OCI runtime deal with it, advertise conmon's MAINPID // "conmon-only" - advertise conmon's MAINPID, send READY when started, don't pass to OCI // "ignore" - unset NOTIFY_SOCKET - SdNotifyMode string `json:"sdnotifyMode,omitempty"` - // Namespace is the libpod namespace the container will be placed in. // Optional. - Namespace string `json:"namespace,omitempty"` + SdNotifyMode string `json:"sdnotifyMode,omitempty"` // PidNS is the container's PID namespace. // It defaults to private. // Mandatory. @@ -165,8 +156,9 @@ type ContainerBasicConfig struct { // Sysctl sets kernel parameters for the container Sysctl map[string]string `json:"sysctl,omitempty"` // Remove indicates if the container should be removed once it has been started - // and exits - Remove bool `json:"remove,omitempty"` + // and exits. + // Optional. + Remove *bool `json:"remove,omitempty"` // ContainerCreateCommand is the command that was used to create this // container. // This will be shown in the output of Inspect() on the container, and @@ -194,23 +186,26 @@ type ContainerBasicConfig struct { // container. Dependencies can be specified by name or full/partial ID. // Optional. DependencyContainers []string `json:"dependencyContainers,omitempty"` - // PidFile is the file that saves container process id. - // set tags as `json:"-"` for not supported remote + // PidFile is the file that saves container's PID. + // Not supported for remote clients, so not serialized in specgen JSON. // Optional. PidFile string `json:"-"` // EnvSecrets are secrets that will be set as environment variables // Optional. EnvSecrets map[string]string `json:"secret_env,omitempty"` // InitContainerType describes if this container is an init container - // and if so, what type: always or once + // and if so, what type: always or once. + // Optional. InitContainerType string `json:"init_container_type"` // Personality allows users to configure different execution domains. // Execution domains tell Linux how to map signal numbers into signal actions. // The execution domain system allows Linux to provide limited support // for binaries compiled under other UNIX-like operating systems. + // Optional. Personality *spec.LinuxPersonality `json:"personality,omitempty"` // EnvMerge takes the specified environment variables from image and preprocess them before injecting them into the // container. + // Optional. EnvMerge []string `json:"envmerge,omitempty"` // UnsetEnv unsets the specified default environment variables from the image or from buildin or containers.conf // Optional. @@ -218,12 +213,14 @@ type ContainerBasicConfig struct { // UnsetEnvAll unsetall default environment variables from the image or from buildin or containers.conf // UnsetEnvAll unsets all default environment variables from the image or from buildin // Optional. - UnsetEnvAll bool `json:"unsetenvall,omitempty"` + UnsetEnvAll *bool `json:"unsetenvall,omitempty"` // Passwd is a container run option that determines if we are validating users/groups before running the container Passwd *bool `json:"manage_password,omitempty"` - // PasswdEntry specifies arbitrary data to append to a file. + // PasswdEntry specifies an arbitrary string to append to the container's /etc/passwd file. + // Optional. PasswdEntry string `json:"passwd_entry,omitempty"` - // GroupEntry specifies arbitrary data to append to a file. + // GroupEntry specifies an arbitrary string to append to the container's /etc/group file. + // Optional. GroupEntry string `json:"group_entry,omitempty"` } @@ -236,15 +233,33 @@ type ContainerStorageConfig struct { // Conflicts with Rootfs. // At least one of Image or Rootfs must be specified. Image string `json:"image"` + // RawImageName is the user-specified and unprocessed input referring + // to a local or a remote image. + // Optional, but strongly encouraged to be set if Image is set. + RawImageName string `json:"raw_image_name,omitempty"` + // ImageOS is the user-specified OS of the image. + // Used to select a different variant from a manifest list. + // Optional. + ImageOS string `json:"image_os,omitempty"` + // ImageArch is the user-specified image architecture. + // Used to select a different variant from a manifest list. + // Optional. + ImageArch string `json:"image_arch,omitempty"` + // ImageVariant is the user-specified image variant. + // Used to select a different variant from a manifest list. + // Optional. + ImageVariant string `json:"image_variant,omitempty"` // Rootfs is the path to a directory that will be used as the // container's root filesystem. No modification will be made to the // directory, it will be directly mounted into the container as root. // Conflicts with Image. // At least one of Image or Rootfs must be specified. Rootfs string `json:"rootfs,omitempty"` - // RootfsOverlay tells if rootfs is actually an overlay on top of base path - RootfsOverlay bool `json:"rootfs_overlay,omitempty"` - // RootfsMapping specifies if there are mappings to apply to the rootfs. + // RootfsOverlay tells if rootfs is actually an overlay on top of base path. + // Optional. + RootfsOverlay *bool `json:"rootfs_overlay,omitempty"` + // RootfsMapping specifies if there are UID/GID mappings to apply to the rootfs. + // Optional. RootfsMapping *string `json:"rootfs_mapping,omitempty"` // ImageVolumeMode indicates how image volumes will be created. // Supported modes are "ignore" (do not create), "tmpfs" (create as @@ -257,10 +272,12 @@ type ContainerStorageConfig struct { // may optionally be followed by a : and then one or more // comma-separated options. Valid options are 'ro', 'rw', and 'z'. // Options will be used for all volumes sourced from the container. + // Optional. VolumesFrom []string `json:"volumes_from,omitempty"` // Init specifies that an init binary will be mounted into the // container, and will be used as PID1. - Init bool `json:"init,omitempty"` + // Optional. + Init *bool `json:"init,omitempty"` // InitPath specifies the path to the init binary that will be added if // Init is specified above. If not specified, the default set in the // Libpod config will be used. Ignored if Init above is not set. @@ -288,7 +305,8 @@ type ContainerStorageConfig struct { // DeviceCgroupRule are device cgroup rules that allow containers // to use additional types of devices. DeviceCgroupRule []spec.LinuxDeviceCgroup `json:"device_cgroup_rule,omitempty"` - // DevicesFrom is a way to ensure your container inherits device specific information from another container + // DevicesFrom specifies that this container will mount the device(s) from other container(s). + // Optional. DevicesFrom []string `json:"devices_from,omitempty"` // HostDeviceList is used to recreate the mounted device on inherited containers HostDeviceList []spec.LinuxDevice `json:"host_device_list,omitempty"` @@ -312,7 +330,7 @@ type ContainerStorageConfig struct { // Create the working directory if it doesn't exist. // If unset, it doesn't create it. // Optional. - CreateWorkingDir bool `json:"create_working_dir,omitempty"` + CreateWorkingDir *bool `json:"create_working_dir,omitempty"` // StorageOpts is the container's storage options // Optional. StorageOpts map[string]string `json:"storage_opts,omitempty"` @@ -325,10 +343,12 @@ type ContainerStorageConfig struct { Secrets []Secret `json:"secrets,omitempty"` // Volatile specifies whether the container storage can be optimized // at the cost of not syncing all the dirty files in memory. - Volatile bool `json:"volatile,omitempty"` + // Optional. + Volatile *bool `json:"volatile,omitempty"` // ChrootDirs is an additional set of directories that need to be // treated as root directories. Standard bind mounts will be mounted // into paths relative to these directories. + // Optional. ChrootDirs []string `json:"chroot_directories,omitempty"` } @@ -343,7 +363,8 @@ type ContainerSecurityConfig struct { // (Though SELinux can be manually re-enabled). // TODO: this conflicts with things. // TODO: this does more. - Privileged bool `json:"privileged,omitempty"` + // Optional. + Privileged *bool `json:"privileged,omitempty"` // User is the user the container will be run as. // Can be given as a UID or a username; if a username, it will be // resolved within the container, using the container's /etc/passwd. @@ -382,7 +403,8 @@ type ContainerSecurityConfig struct { // NoNewPrivileges is whether the container will set the no new // privileges flag on create, which disables gaining additional // privileges (e.g. via setuid) in the container. - NoNewPrivileges bool `json:"no_new_privileges,omitempty"` + // Optional. + NoNewPrivileges *bool `json:"no_new_privileges,omitempty"` // UserNS is the container's user namespace. // It defaults to host, indicating that no user namespace will be // created. @@ -394,15 +416,18 @@ type ContainerSecurityConfig struct { // Required if UserNS is private. IDMappings *types.IDMappingOptions `json:"idmappings,omitempty"` // ReadOnlyFilesystem indicates that everything will be mounted - // as read-only - ReadOnlyFilesystem bool `json:"read_only_filesystem,omitempty"` + // as read-only. + // Optional. + ReadOnlyFilesystem *bool `json:"read_only_filesystem,omitempty"` // ReadWriteTmpfs indicates that when running with a ReadOnlyFilesystem - // mount temporary file systems - ReadWriteTmpfs bool `json:"read_write_tmpfs,omitempty"` + // mount temporary file systems. + // Optional. + ReadWriteTmpfs *bool `json:"read_write_tmpfs,omitempty"` // LabelNested indicates whether or not the container is allowed to - // run fully nested containers including labelling - LabelNested bool `json:"label_nested,omitempty"` + // run fully nested containers including SELinux labelling. + // Optional. + LabelNested *bool `json:"label_nested,omitempty"` // Umask is the umask the init process of the container will be run with. Umask string `json:"umask,omitempty"` @@ -412,8 +437,10 @@ type ContainerSecurityConfig struct { // given in addition to the default list. // Optional Mask []string `json:"mask,omitempty"` - // Unmask is the path we want to unmask in the container. To override - // all the default paths that are masked, set unmask=ALL. + // Unmask a path in the container. Some paths are masked by default, + // preventing them from being accessed within the container; this undoes + // that masking. If ALL is passed, all paths will be unmasked. + // Optional. Unmask []string `json:"unmask,omitempty"` } @@ -424,8 +451,9 @@ type ContainerCgroupConfig struct { // It defaults to private. // Mandatory. CgroupNS Namespace `json:"cgroupns,omitempty"` - // CgroupsMode sets a policy for how cgroups will be created in the + // CgroupsMode sets a policy for how cgroups will be created for the // container, including the ability to disable creation entirely. + // Optional. CgroupsMode string `json:"cgroups_mode,omitempty"` // CgroupParent is the container's Cgroup parent. // If not set, the default for the current cgroup driver will be used. @@ -449,7 +477,8 @@ type ContainerNetworkConfig struct { // This is based on ports set in Expose below, and any ports specified // by the Image (if one is given). // Only available if NetNS is set to Bridge or Slirp. - PublishExposedPorts bool `json:"publish_image_ports,omitempty"` + // Optional. + PublishExposedPorts *bool `json:"publish_image_ports,omitempty"` // Expose is a number of ports that will be forwarded to the container // if PublishExposedPorts is set. // Expose is a map of uint16 (port number) to a string representing @@ -466,6 +495,7 @@ type ContainerNetworkConfig struct { // network interface name for this container on the specific network. // If the map is empty and the bridge network mode is set the container // will be joined to the default network. + // Optional. Networks map[string]nettypes.PerNetworkOptions // CNINetworks is a list of CNI networks to join the container to. // If this list is empty, the default CNI network will be joined @@ -478,7 +508,8 @@ type ContainerNetworkConfig struct { // UseImageResolvConf indicates that resolv.conf should not be managed // by Podman, but instead sourced from the image. // Conflicts with DNSServer, DNSSearch, DNSOption. - UseImageResolvConf bool `json:"use_image_resolve_conf,omitempty"` + // Optional. + UseImageResolvConf *bool `json:"use_image_resolve_conf,omitempty"` // DNSServers is a set of DNS servers that will be used in the // container's resolv.conf, replacing the host's DNS Servers which are // used by default. @@ -500,13 +531,8 @@ type ContainerNetworkConfig struct { // UseImageHosts indicates that /etc/hosts should not be managed by // Podman, and instead sourced from the image. // Conflicts with HostAdd. - // Do not set omitempty here, if this is false it should be set to not get - // the server default. - // Ideally this would be a pointer so we could differentiate between an - // explicitly false/true and unset (containers.conf default). However - // specgen is stable so we can not change this right now. - // TODO (5.0): change to pointer - UseImageHosts bool `json:"use_image_hosts"` + // Optional. + UseImageHosts *bool `json:"use_image_hosts,omitempty"` // BaseHostsFile is the path to a hosts file, the entries from this file // are added to the containers hosts file. As special value "image" is // allowed which uses the /etc/hosts file from within the image and "none" @@ -556,10 +582,6 @@ type ContainerResourceConfig struct { // that are used to configure cgroup v2. // Optional. CgroupConf map[string]string `json:"unified,omitempty"` - // CPU period of the cpuset, determined by --cpus - CPUPeriod uint64 `json:"cpu_period,omitempty"` - // CPU quota of the cpuset, determined by --cpus - CPUQuota int64 `json:"cpu_quota,omitempty"` } // ContainerHealthCheckConfig describes a container healthcheck with attributes @@ -590,6 +612,13 @@ type SpecGenerator struct { cacheLibImage } +func (s *SpecGenerator) IsPrivileged() bool { + if s.Privileged != nil { + return *s.Privileged + } + return false +} + func (s *SpecGenerator) IsInitContainer() bool { return len(s.InitContainerType) != 0 } @@ -623,7 +652,8 @@ func NewSpecGenerator(arg string, rootfs bool) *SpecGenerator { if lastColonIndex != -1 { lastPart := csc.Rootfs[lastColonIndex+1:] if lastPart == "O" { - csc.RootfsOverlay = true + localTrue := true + csc.RootfsOverlay = &localTrue csc.Rootfs = csc.Rootfs[:lastColonIndex] } else if lastPart == "idmap" || strings.HasPrefix(lastPart, "idmap=") { csc.RootfsMapping = &lastPart diff --git a/pkg/specgen/specgen_test.go b/pkg/specgen/specgen_test.go index b11bd30748..4277564aff 100644 --- a/pkg/specgen/specgen_test.go +++ b/pkg/specgen/specgen_test.go @@ -9,18 +9,19 @@ import ( func TestNewSpecGeneratorWithRootfs(t *testing.T) { idmap := "idmap" idmapMappings := "idmap=uids=1-1-2000" + localTrue := true tests := []struct { rootfs string - expectedRootfsOverlay bool + expectedRootfsOverlay *bool expectedRootfs string expectedMapping *string }{ - {"/root/a:b:O", true, "/root/a:b", nil}, - {"/root/a:b/c:O", true, "/root/a:b/c", nil}, - {"/root/a:b/c:", false, "/root/a:b/c:", nil}, - {"/root/a/b", false, "/root/a/b", nil}, - {"/root/a:b/c:idmap", false, "/root/a:b/c", &idmap}, - {"/root/a:b/c:idmap=uids=1-1-2000", false, "/root/a:b/c", &idmapMappings}, + {"/root/a:b:O", &localTrue, "/root/a:b", nil}, + {"/root/a:b/c:O", &localTrue, "/root/a:b/c", nil}, + {"/root/a:b/c:", nil, "/root/a:b/c:", nil}, + {"/root/a/b", nil, "/root/a/b", nil}, + {"/root/a:b/c:idmap", nil, "/root/a:b/c", &idmap}, + {"/root/a:b/c:idmap=uids=1-1-2000", nil, "/root/a:b/c", &idmapMappings}, } for _, args := range tests { val := NewSpecGenerator(args.rootfs, true) diff --git a/pkg/specgenutil/specgen.go b/pkg/specgenutil/specgen.go index e9e90cae43..633e576598 100644 --- a/pkg/specgenutil/specgen.go +++ b/pkg/specgenutil/specgen.go @@ -226,6 +226,7 @@ func setNamespaces(rtc *config.Config, s *specgen.SpecGenerator, c *entities.Con if ns, ok := os.LookupEnv("PODMAN_USERNS"); ok { userns = ns } else { + // TODO: This should be moved into pkg/specgen/generate so we don't use the client's containers.conf userns = rtc.Containers.UserNS } } @@ -326,6 +327,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions return err } + // TODO: This needs to move into pkg/specgen/generate so we aren't using containers.conf on the client. if rtc.Containers.EnableLabeledUsers { defSecurityOpts, err := currentLabelOpts() if err != nil { @@ -334,6 +336,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions c.SecurityOpt = append(defSecurityOpts, c.SecurityOpt...) } + // validate flags as needed if err := validate(c); err != nil { return err @@ -389,8 +392,8 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions return err } - if !s.Terminal { - s.Terminal = c.TTY + if s.Terminal == nil { + s.Terminal = &c.TTY } if err := verifyExpose(c.Expose); err != nil { @@ -401,8 +404,8 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions if c.Net != nil { s.PortMappings = c.Net.PublishPorts } - if !s.PublishExposedPorts { - s.PublishExposedPorts = c.PublishAll + if s.PublishExposedPorts == nil { + s.PublishExposedPorts = &c.PublishAll } if len(s.Pod) == 0 || len(c.Pod) > 0 { @@ -450,12 +453,12 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions // any case. osEnv := envLib.Map(os.Environ()) - if !s.EnvHost { - s.EnvHost = c.EnvHost + if s.EnvHost == nil { + s.EnvHost = &c.EnvHost } - if !s.HTTPProxy { - s.HTTPProxy = c.HTTPProxy + if s.HTTPProxy == nil { + s.HTTPProxy = &c.HTTPProxy } // env-file overrides any previous variables @@ -572,12 +575,12 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions if c.Net != nil { s.HostAdd = c.Net.AddHosts - s.UseImageResolvConf = c.Net.UseImageResolvConf + s.UseImageResolvConf = &c.Net.UseImageResolvConf s.DNSServers = c.Net.DNSServers s.DNSSearch = c.Net.DNSSearch s.DNSOptions = c.Net.DNSOptions s.NetworkOptions = c.Net.NetworkOptions - s.UseImageHosts = c.Net.NoHosts + s.UseImageHosts = &c.Net.NoHosts } if len(s.HostUsers) == 0 || len(c.HostUsers) != 0 { s.HostUsers = c.HostUsers @@ -587,9 +590,6 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions s.ImageVolumeMode = c.ImageVolume } } - if len(s.ImageVolumeMode) == 0 { - s.ImageVolumeMode = rtc.Engine.ImageVolumeMode - } if s.ImageVolumeMode == define.TypeBind { s.ImageVolumeMode = "anonymous" } @@ -622,9 +622,6 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions if len(s.CgroupsMode) == 0 { s.CgroupsMode = c.CgroupsMode } - if s.CgroupsMode == "" { - s.CgroupsMode = rtc.Cgroups() - } if len(s.Groups) == 0 || len(c.GroupAdd) != 0 { s.Groups = c.GroupAdd @@ -650,11 +647,11 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions if len(s.CapDrop) == 0 || len(c.CapDrop) != 0 { s.CapDrop = c.CapDrop } - if !s.Privileged { - s.Privileged = c.Privileged + if s.Privileged == nil { + s.Privileged = &c.Privileged } - if !s.ReadOnlyFilesystem { - s.ReadOnlyFilesystem = c.ReadOnly + if s.ReadOnlyFilesystem == nil { + s.ReadOnlyFilesystem = &c.ReadOnly } if len(s.ConmonPidFile) == 0 || len(c.ConmonPIDFile) != 0 { s.ConmonPidFile = c.ConmonPIDFile @@ -667,7 +664,8 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions // Only add ReadWrite tmpfs mounts iff the container is // being run ReadOnly and ReadWriteTmpFS is not disabled, // (user specifying --read-only-tmpfs=false.) - s.ReadWriteTmpfs = c.ReadOnly && c.ReadWriteTmpFS + localRWTmpfs := c.ReadOnly && c.ReadWriteTmpFS + s.ReadWriteTmpfs = &localRWTmpfs // TODO convert to map? // check if key=value and convert @@ -707,7 +705,8 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions s.Annotations[define.InspectAnnotationApparmor] = val case "label": if val == "nested" { - s.ContainerSecurityConfig.LabelNested = true + localTrue := true + s.ContainerSecurityConfig.LabelNested = &localTrue continue } // TODO selinux opts and label opts are the same thing @@ -739,7 +738,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions return fmt.Errorf("invalid --security-opt 2: %q", opt) } } - s.ContainerSecurityConfig.NoNewPrivileges = noNewPrivileges + s.ContainerSecurityConfig.NoNewPrivileges = &noNewPrivileges default: return fmt.Errorf("invalid --security-opt 2: %q", opt) } @@ -766,7 +765,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions s.Volumes = volumes } - if s.ContainerSecurityConfig.LabelNested { + if s.LabelNested != nil && *s.LabelNested { // Need to unmask the SELinux file system s.Unmask = append(s.Unmask, "/sys/fs/selinux", "/proc") s.Mounts = append(s.Mounts, specs.Mount{ @@ -796,14 +795,14 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions s.DeviceCgroupRule = append(s.DeviceCgroupRule, dev) } - if !s.Init { - s.Init = c.Init + if s.Init == nil { + s.Init = &c.Init } if len(s.InitPath) == 0 || len(c.InitPath) != 0 { s.InitPath = c.InitPath } - if !s.Stdin { - s.Stdin = c.Interactive + if s.Stdin == nil { + s.Stdin = &c.Interactive } // quiet // DeviceCgroupRules: c.StringSlice("device-cgroup-rule"), @@ -877,8 +876,8 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions s.Personality.Domain = specs.LinuxPersonalityDomain(c.Personality) } - if !s.Remove { - s.Remove = c.Rm + if s.Remove == nil { + s.Remove = &c.Rm } if s.StopTimeout == nil || c.StopTimeout != 0 { s.StopTimeout = &c.StopTimeout @@ -895,8 +894,8 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions if len(s.PidFile) == 0 || len(c.PidFile) != 0 { s.PidFile = c.PidFile } - if !s.Volatile { - s.Volatile = c.Rm + if s.Volatile == nil { + s.Volatile = &c.Rm } if len(s.EnvMerge) == 0 || len(c.EnvMerge) != 0 { s.EnvMerge = c.EnvMerge @@ -904,8 +903,8 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions if len(s.UnsetEnv) == 0 || len(c.UnsetEnv) != 0 { s.UnsetEnv = c.UnsetEnv } - if !s.UnsetEnvAll { - s.UnsetEnvAll = c.UnsetEnvAll + if s.UnsetEnvAll == nil { + s.UnsetEnvAll = &c.UnsetEnvAll } if len(s.ChrootDirs) == 0 || len(c.ChrootDirs) != 0 { s.ChrootDirs = c.ChrootDirs diff --git a/pkg/specgenutil/volumes.go b/pkg/specgenutil/volumes.go index 5155e779fd..cae55f18b5 100644 --- a/pkg/specgenutil/volumes.go +++ b/pkg/specgenutil/volumes.go @@ -30,6 +30,7 @@ var ( // TODO: handle options parsing/processing via containers/storage/pkg/mount func parseVolumes(rtc *config.Config, volumeFlag, mountFlag, tmpfsFlag []string) ([]spec.Mount, []*specgen.NamedVolume, []*specgen.OverlayVolume, []*specgen.ImageVolume, error) { // Get mounts from the --mounts flag. + // TODO: The runtime config part of this needs to move into pkg/specgen/generate to avoid querying containers.conf on the client. unifiedMounts, unifiedVolumes, unifiedImageVolumes, err := Mounts(mountFlag, rtc.Mounts()) if err != nil { return nil, nil, nil, nil, err diff --git a/test/system/160-volumes.bats b/test/system/160-volumes.bats index b7092cb14f..476d5b9872 100644 --- a/test/system/160-volumes.bats +++ b/test/system/160-volumes.bats @@ -532,7 +532,7 @@ EOF CONTAINERS_CONF_OVERRIDE="$containersconf" run_podman run --rm volume_image stat -f -c %T /data is "$output" "tmpfs" "Should be tmpfs" - CONTAINERS_CONF_OVERRIDE="$containersconf" run_podman run --image-volume bind --rm volume_image stat -f -c %T /data + CONTAINERS_CONF_OVERRIDE="$containersconf" run_podman run --image-volume anonymous --rm volume_image stat -f -c %T /data assert "$output" != "tmpfs" "Should match hosts $fs" CONTAINERS_CONF_OVERRIDE="$containersconf" run_podman run --image-volume tmpfs --rm volume_image stat -f -c %T /data From d202acd861406e8406eb675e40908a07b588cc19 Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Mon, 29 Jan 2024 16:30:55 -0500 Subject: [PATCH 2/2] Bump containers/common to latest main Signed-off-by: Matthew Heon --- go.mod | 24 +- go.sum | 58 ++- .../containers/common/libimage/layer_tree.go | 4 +- .../containers/common/pkg/config/config.go | 13 +- .../common/pkg/config/config_darwin.go | 3 - .../common/pkg/config/config_freebsd.go | 3 - .../common/pkg/config/config_linux.go | 3 - .../common/pkg/config/config_local.go | 8 + .../common/pkg/config/config_remote.go | 4 + .../common/pkg/config/containers.conf | 11 +- .../containers/common/pkg/config/default.go | 2 +- .../image/v5/docker/daemon/client.go | 7 +- .../internal/pkg/platform/platform_matcher.go | 30 +- .../image/v5/storage/storage_dest.go | 2 +- .../containers/image/v5/version/version.go | 2 +- .../github.com/containers/storage/.cirrus.yml | 6 +- vendor/github.com/containers/storage/VERSION | 2 +- .../containers/storage/drivers/driver.go | 18 + .../storage/drivers/overlay/composefs.go | 72 +-- .../storage/drivers/overlay/overlay.go | 48 +- .../storage/drivers/quota/projectquota.go | 448 ---------------- .../drivers/quota/projectquota_supported.go | 449 ++++++++++++++++ .../storage/pkg/chunked/dump/dump.go | 24 +- .../storage/pkg/chunked/storage_linux.go | 272 +++++++--- .../pkg/chunked/storage_unsupported.go | 3 +- .../storage/pkg/fsverity/fsverity_linux.go | 45 ++ .../pkg/fsverity/fsverity_unsupported.go | 21 + .../storage/pkg/homedir/homedir_others.go | 4 +- .../storage/pkg/homedir/homedir_unix.go | 7 +- .../storage/pkg/homedir/homedir_windows.go | 29 ++ vendor/github.com/containers/storage/store.go | 55 ++ .../client/client.go | 17 +- .../credentials/error.go | 11 +- .../github.com/felixge/httpsnoop/.travis.yml | 6 - vendor/github.com/felixge/httpsnoop/Makefile | 2 +- vendor/github.com/felixge/httpsnoop/README.md | 4 +- .../felixge/httpsnoop/capture_metrics.go | 2 +- .../httpsnoop/wrap_generated_gteq_1.8.go | 2 +- .../httpsnoop/wrap_generated_lt_1.8.go | 2 +- .../github.com/go-openapi/strfmt/default.go | 56 +- .../github.com/go-openapi/swag/BENCHMARK.md | 52 ++ .../go-openapi/swag/initialism_index.go | 137 +++++ .../github.com/go-openapi/swag/name_lexem.go | 72 +-- vendor/github.com/go-openapi/swag/split.go | 490 +++++++++++++----- .../go-openapi/swag/string_bytes.go | 22 + vendor/github.com/go-openapi/swag/util.go | 206 ++++---- vendor/github.com/go-openapi/swag/yaml.go | 35 +- .../github.com/klauspost/compress/README.md | 14 +- .../klauspost/compress/flate/deflate.go | 2 +- vendor/github.com/klauspost/compress/s2sx.mod | 2 +- .../klauspost/compress/zstd/decodeheader.go | 56 +- .../compress/zstd/encoder_options.go | 6 +- .../klauspost/compress/zstd/frameenc.go | 2 +- .../compress/zstd/fse_decoder_generic.go | 11 +- .../klauspost/compress/zstd/seqdec_amd64.s | 136 ++--- vendor/modules.txt | 27 +- 56 files changed, 1930 insertions(+), 1119 deletions(-) create mode 100644 vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go create mode 100644 vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go delete mode 100644 vendor/github.com/felixge/httpsnoop/.travis.yml create mode 100644 vendor/github.com/go-openapi/swag/BENCHMARK.md create mode 100644 vendor/github.com/go-openapi/swag/string_bytes.go diff --git a/go.mod b/go.mod index e6fccc0c1b..95d9519b93 100644 --- a/go.mod +++ b/go.mod @@ -11,14 +11,14 @@ require ( github.com/checkpoint-restore/go-criu/v7 v7.0.0 github.com/containernetworking/plugins v1.4.0 github.com/containers/buildah v1.33.2-0.20231121195905-d1a1c53c8e1c - github.com/containers/common v0.57.1-0.20240124083822-167512e3cfc4 + github.com/containers/common v0.57.1-0.20240129201029-3310a75e3608 github.com/containers/conmon v2.0.20+incompatible github.com/containers/gvisor-tap-vsock v0.7.2 - github.com/containers/image/v5 v5.29.1-0.20231221164234-1b221d4a9c28 + github.com/containers/image/v5 v5.29.2-0.20240129204525-816800b5daf7 github.com/containers/libhvee v0.6.0 github.com/containers/ocicrypt v1.1.9 github.com/containers/psgo v1.8.0 - github.com/containers/storage v1.51.1-0.20231221151421-1020ab61b4e5 + github.com/containers/storage v1.52.1-0.20240129173630-7a525ce0e2bc github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 github.com/coreos/stream-metadata-go v0.4.4 github.com/crc-org/vfkit v0.5.0 @@ -65,7 +65,7 @@ require ( github.com/vbauerster/mpb/v8 v8.7.2 github.com/vishvananda/netlink v1.2.1-beta.2 go.etcd.io/bbolt v1.3.8 - golang.org/x/exp v0.0.0-20231006140011-7918f672742d + golang.org/x/exp v0.0.0-20231226003508-02704c960a9b golang.org/x/net v0.20.0 golang.org/x/sync v0.6.0 golang.org/x/sys v0.16.0 @@ -101,13 +101,13 @@ require ( github.com/containers/luksy v0.0.0-20231030195837-b5a7f79da98b // indirect github.com/coreos/go-oidc/v3 v3.9.0 // indirect github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e // indirect github.com/disiqueira/gotree/v3 v3.0.2 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker-credential-helpers v0.8.0 // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/docker/docker-credential-helpers v0.8.1 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fsouza/go-dockerclient v1.10.0 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect @@ -124,8 +124,8 @@ require ( github.com/go-openapi/loads v0.21.2 // indirect github.com/go-openapi/runtime v0.26.0 // indirect github.com/go-openapi/spec v0.20.9 // indirect - github.com/go-openapi/strfmt v0.21.10 // indirect - github.com/go-openapi/swag v0.22.5 // indirect + github.com/go-openapi/strfmt v0.22.0 // indirect + github.com/go-openapi/swag v0.22.9 // indirect github.com/go-openapi/validate v0.22.1 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect @@ -145,7 +145,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jinzhu/copier v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/compress v1.17.4 // indirect + github.com/klauspost/compress v1.17.5 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/kr/fs v0.1.0 // indirect @@ -184,7 +184,7 @@ require ( github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/sigstore/fulcio v1.4.3 // indirect github.com/sigstore/rekor v1.2.2 // indirect - github.com/sigstore/sigstore v1.8.0 // indirect + github.com/sigstore/sigstore v1.8.1 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect github.com/sylabs/sif/v2 v2.15.1 // indirect @@ -208,7 +208,7 @@ require ( golang.org/x/arch v0.5.0 // indirect golang.org/x/crypto v0.18.0 // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/oauth2 v0.15.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.16.1 // indirect google.golang.org/appengine v1.6.8 // indirect diff --git a/go.sum b/go.sum index b196d9af5a..ee9c0f4ea1 100644 --- a/go.sum +++ b/go.sum @@ -257,14 +257,14 @@ github.com/containernetworking/plugins v1.4.0 h1:+w22VPYgk7nQHw7KT92lsRmuToHvb7w github.com/containernetworking/plugins v1.4.0/go.mod h1:UYhcOyjefnrQvKvmmyEKsUA+M9Nfn7tqULPpH0Pkcj0= github.com/containers/buildah v1.33.2-0.20231121195905-d1a1c53c8e1c h1:E7nxvH3N3kpyson0waJv1X+eY9hAs+x2zQswsK+//yY= github.com/containers/buildah v1.33.2-0.20231121195905-d1a1c53c8e1c/go.mod h1:oMNfVrZGEfWVOxXTNOYPMdZzDfSo2umURK/TO0d8TRk= -github.com/containers/common v0.57.1-0.20240124083822-167512e3cfc4 h1:gX7CDGPna9aj5J5UXLfzX8F0rwVHVWwAS6wXGATMMUc= -github.com/containers/common v0.57.1-0.20240124083822-167512e3cfc4/go.mod h1:3V+lxqRPM/HhF3Vizml1m698IAC08xCVM3waMWX4E/Q= +github.com/containers/common v0.57.1-0.20240129201029-3310a75e3608 h1:P+3HLEBeuFr/XcFtyZIS8uk5zdbcvtrHKCSzHlUUbdY= +github.com/containers/common v0.57.1-0.20240129201029-3310a75e3608/go.mod h1:Na7hGh5WnmB0RdGkKyb6JQb6DtKrs5qoIGrPucuR8t0= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/gvisor-tap-vsock v0.7.2 h1:6CyU5D85C0/DciRRd7W0bPljK4FAS+DPrrHEQMHfZKY= github.com/containers/gvisor-tap-vsock v0.7.2/go.mod h1:6NiTxh2GCVxZQLPzfuEB78/Osp2Usd9uf6nLdd6PiUY= -github.com/containers/image/v5 v5.29.1-0.20231221164234-1b221d4a9c28 h1:dI4/9x4Oh8SWEKIP8KcwoCFUWDO8jHbbfLhaFr20R/Y= -github.com/containers/image/v5 v5.29.1-0.20231221164234-1b221d4a9c28/go.mod h1:LC9m+8ED9+Vuw2WSd/mgvrHbi/44WJj/XBDNdiZC0AY= +github.com/containers/image/v5 v5.29.2-0.20240129204525-816800b5daf7 h1:xnnFpYdUjDn2MZYXmvcopGEvWVO/J0V8OexEnlnIGf0= +github.com/containers/image/v5 v5.29.2-0.20240129204525-816800b5daf7/go.mod h1:op1w+ftmdbE3UNW/6as8mruL1i5AMq6nH+btNOykMcU= github.com/containers/libhvee v0.6.0 h1:tUzwSz8R0GjR6IctgDnkTMjdtCk5Mxhpai4Vyv6UeF4= github.com/containers/libhvee v0.6.0/go.mod h1:f/q1wCdQqOLiK3IZqqBfOD7exMZYBU5pDYsrMa/pSFg= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= @@ -279,8 +279,8 @@ github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPN github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY= github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc= github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s= -github.com/containers/storage v1.51.1-0.20231221151421-1020ab61b4e5 h1:HzlHfy8C02EMrA9YUMUxAAgL9G1XCD0saMU9Lwg4Gfg= -github.com/containers/storage v1.51.1-0.20231221151421-1020ab61b4e5/go.mod h1:s37+wxZpGp2eh4vzBKZl9vil/s+j6KhcrdgdMoWZGCg= +github.com/containers/storage v1.52.1-0.20240129173630-7a525ce0e2bc h1:Cfv5hg2OtNZnIZLfOVmLjObX0DuJb+aqkTl+2+dNweA= +github.com/containers/storage v1.52.1-0.20240129173630-7a525ce0e2bc/go.mod h1:T/ZMocbhShnMLIF0pdkiLPwpkwlGlyUWJeSXnfC/uew= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= @@ -314,8 +314,8 @@ github.com/crc-org/vfkit v0.5.0/go.mod h1:OQiqOghCzdgkd/jRoVu4/lcfQSKje7XPVpfW1a github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc= -github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM= +github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= @@ -340,7 +340,7 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg= +github.com/docker/cli v25.0.1+incompatible h1:mFpqnrS6Hsm3v1k7Wa/BO23oz0k121MTbTO1lpcGSkU= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -350,8 +350,8 @@ github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r github.com/docker/docker v25.0.1+incompatible h1:k5TYd5rIVQRSqcTwCID+cyVA0yRg86+Pcrz1ls0/frA= github.com/docker/docker v25.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= -github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= +github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= +github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= @@ -382,8 +382,9 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= @@ -459,16 +460,16 @@ github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6 github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.10 h1:JIsly3KXZB/Qf4UzvzJpg4OELH/0ASDQsyk//TTBDDk= -github.com/go-openapi/strfmt v0.21.10/go.mod h1:vNDMwbilnl7xKiO/Ve/8H8Bb2JIInBnH+lqiw6QWgis= +github.com/go-openapi/strfmt v0.22.0 h1:Ew9PnEYc246TwrEspvBdDHS4BVKXy/AOVsfqGDgAcaI= +github.com/go-openapi/strfmt v0.22.0/go.mod h1:HzJ9kokGIju3/K6ap8jL+OlGAbjpSv27135Yr9OivU4= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.5 h1:fVS63IE3M0lsuWRzuom3RLwUMVI2peDH01s6M70ugys= -github.com/go-openapi/swag v0.22.5/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= @@ -638,8 +639,8 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= @@ -698,8 +699,8 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.5 h1:d4vBd+7CHydUqpFBgUEKkSdtSugf9YFmSkvUYPquI5E= +github.com/klauspost/compress v1.17.5/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -746,6 +747,7 @@ github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsI github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= @@ -963,7 +965,7 @@ github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbm github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= @@ -975,8 +977,8 @@ github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og= github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY= github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg= -github.com/sigstore/sigstore v1.8.0 h1:sSRWXv1JiDsK4T2wNWVYcvKCgxcSrhQ/QUJxsfCO4OM= -github.com/sigstore/sigstore v1.8.0/go.mod h1:l12B1gFlLIpBIVeqk/q1Lb+6YSOGNuN3xLExIjYH+qc= +github.com/sigstore/sigstore v1.8.1 h1:mAVposMb14oplk2h/bayPmIVdzbq2IhCgy4g6R0ZSjo= +github.com/sigstore/sigstore v1.8.1/go.mod h1:02SL1158BSj15bZyOFz7m+/nJzLZfFd9A8ab3Kz7w/E= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -1192,8 +1194,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20231226003508-02704c960a9b h1:kLiC65FbiHWFAOu+lxwNPujcsl8VYyTYYEZnsOO1WK4= +golang.org/x/exp v0.0.0-20231226003508-02704c960a9b/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1275,8 +1277,8 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= -golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1607,7 +1609,7 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/containers/common/libimage/layer_tree.go b/vendor/github.com/containers/common/libimage/layer_tree.go index b140e648c9..66829c46bb 100644 --- a/vendor/github.com/containers/common/libimage/layer_tree.go +++ b/vendor/github.com/containers/common/libimage/layer_tree.go @@ -149,7 +149,9 @@ func (t *layerTree) layersOf(image *Image) []*storage.Layer { var layers []*storage.Layer node := t.node(image.TopLayer()) for node != nil { - layers = append(layers, node.layer) + if node.layer != nil { + layers = append(layers, node.layer) + } node = node.parent } return layers diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index 2e352db446..84deb00418 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -31,7 +31,7 @@ const ( bindirPrefix = "$BINDIR" ) -var validImageVolumeModes = []string{_typeBind, "tmpfs", "ignore"} +var validImageVolumeModes = []string{"anonymous", "tmpfs", "ignore"} // ProxyEnv is a list of Proxy Environment variables var ProxyEnv = []string{ @@ -154,6 +154,13 @@ type ContainersConfig struct { // Deprecated: Do not use this field directly use conf.FindInitBinary() instead. InitPath string `toml:"init_path,omitempty"` + // InterfaceName tells container runtimes how to set interface names + // inside containers. + // The only valid value at the moment is "device" that indicates the + // interface name should be set as the network_interface name from + // the network config. + InterfaceName string `toml:"interface_name,omitempty"` + // IPCNS way to create a ipc namespace for the container IPCNS string `toml:"ipcns,omitempty"` @@ -814,6 +821,10 @@ func (c *ContainersConfig) Validate() error { return err } + if err := c.validateInterfaceName(); err != nil { + return err + } + if err := c.validateTZ(); err != nil { return err } diff --git a/vendor/github.com/containers/common/pkg/config/config_darwin.go b/vendor/github.com/containers/common/pkg/config/config_darwin.go index 1b40e2bae7..6ff13c15db 100644 --- a/vendor/github.com/containers/common/pkg/config/config_darwin.go +++ b/vendor/github.com/containers/common/pkg/config/config_darwin.go @@ -14,9 +14,6 @@ const ( // DefaultSignaturePolicyPath is the default value for the // policy.json file. DefaultSignaturePolicyPath = "/etc/containers/policy.json" - - // Mount type for mounting host dir - _typeBind = "bind" ) // podman remote clients on darwin cannot use unshare.isRootless() to determine the configuration file locations. diff --git a/vendor/github.com/containers/common/pkg/config/config_freebsd.go b/vendor/github.com/containers/common/pkg/config/config_freebsd.go index 48bb0994ce..903f0b47ce 100644 --- a/vendor/github.com/containers/common/pkg/config/config_freebsd.go +++ b/vendor/github.com/containers/common/pkg/config/config_freebsd.go @@ -14,9 +14,6 @@ const ( // DefaultSignaturePolicyPath is the default value for the // policy.json file. DefaultSignaturePolicyPath = "/usr/local/etc/containers/policy.json" - - // Mount type for mounting host dir - _typeBind = "nullfs" ) // podman remote clients on freebsd cannot use unshare.isRootless() to determine the configuration file locations. diff --git a/vendor/github.com/containers/common/pkg/config/config_linux.go b/vendor/github.com/containers/common/pkg/config/config_linux.go index f294402a3e..4ce5d03209 100644 --- a/vendor/github.com/containers/common/pkg/config/config_linux.go +++ b/vendor/github.com/containers/common/pkg/config/config_linux.go @@ -17,9 +17,6 @@ const ( // DefaultSignaturePolicyPath is the default value for the // policy.json file. DefaultSignaturePolicyPath = "/etc/containers/policy.json" - - // Mount type for mounting host dir - _typeBind = "bind" ) func selinuxEnabled() bool { diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go index aac90b2999..e9826d62ca 100644 --- a/vendor/github.com/containers/common/pkg/config/config_local.go +++ b/vendor/github.com/containers/common/pkg/config/config_local.go @@ -42,6 +42,14 @@ func (c *ContainersConfig) validateDevices() error { return nil } +func (c *ContainersConfig) validateInterfaceName() error { + if c.InterfaceName == "device" || c.InterfaceName == "" { + return nil + } + + return fmt.Errorf("invalid interface_name option %s", c.InterfaceName) +} + func (c *ContainersConfig) validateUlimits() error { for _, u := range c.DefaultUlimits.Get() { ul, err := units.ParseUlimit(u) diff --git a/vendor/github.com/containers/common/pkg/config/config_remote.go b/vendor/github.com/containers/common/pkg/config/config_remote.go index 7ad7bc233c..f7b3d1a028 100644 --- a/vendor/github.com/containers/common/pkg/config/config_remote.go +++ b/vendor/github.com/containers/common/pkg/config/config_remote.go @@ -20,6 +20,10 @@ func (c *ContainersConfig) validateDevices() error { return nil } +func (c *ContainersConfig) validateInterfaceName() error { + return nil +} + func (c *ContainersConfig) validateUlimits() error { return nil } diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index 7a21f69ff9..c638d35b35 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -164,6 +164,13 @@ default_sysctls = [ # #ipcns = "shareable" +# Default way to set an interface name inside container. Defaults to legacy +# pattern of ethX, where X is a integer, when left undefined. +# Options are: +# "device" Uses the network_interface name from the network config as interface name. +# Falls back to the ethX pattern if the network_interface is not set. +#interface_name = "" + # keyring tells the container engine whether to create # a kernel keyring for use within the container. # @@ -341,7 +348,7 @@ default_sysctls = [ #] # The firewall driver to be used by netavark. -# The default is empty which means netavark will pick one accordingly. Current supported +# The default is empty which means netavark will pick one accordingly. Current supported # drivers are "iptables", "none" (no firewall rules will be created) and "firewalld" (firewalld is # experimental at the moment and not recommend outside of testing). In the future we are # planning to add support for a "nftables" driver. @@ -549,7 +556,7 @@ default_sysctls = [ #image_parallel_copies = 0 # Tells container engines how to handle the built-in image volumes. -# * bind: An anonymous named volume will be created and mounted +# * anonymous: An anonymous named volume will be created and mounted # into the container. # * tmpfs: The volume is mounted onto the container as a tmpfs, # which allows users to create content that disappears when diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index 1d826851a3..caccace8a9 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -29,7 +29,7 @@ const ( _defaultTransport = "docker://" // _defaultImageVolumeMode is a mode to handle built-in image volumes. - _defaultImageVolumeMode = _typeBind + _defaultImageVolumeMode = "anonymous" // defaultInitName is the default name of the init binary defaultInitName = "catatonit" diff --git a/vendor/github.com/containers/image/v5/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go index 2c245f54f9..354af2140f 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/client.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go @@ -9,11 +9,6 @@ import ( "github.com/docker/go-connections/tlsconfig" ) -const ( - // The default API version to be used in case none is explicitly specified - defaultAPIVersion = "1.22" -) - // NewDockerClient initializes a new API client based on the passed SystemContext. func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { host := dockerclient.DefaultDockerHost @@ -23,7 +18,7 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { opts := []dockerclient.Opt{ dockerclient.WithHost(host), - dockerclient.WithVersion(defaultAPIVersion), + dockerclient.WithAPIVersionNegotiation(), } // We conditionalize building the TLS configuration only to TLS sockets: diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go index 3ba0e4084f..94002d6d47 100644 --- a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go +++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go @@ -25,6 +25,7 @@ import ( "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" "golang.org/x/exp/slices" ) @@ -82,15 +83,40 @@ func getCPUVariantWindows(arch string) string { func getCPUVariantArm() string { variant, err := getCPUInfo("Cpu architecture") if err != nil { + logrus.Errorf("Couldn't get cpu architecture: %v", err) return "" } - // TODO handle RPi Zero mismatch (https://github.com/moby/moby/pull/36121#issuecomment-398328286) switch strings.ToLower(variant) { case "8", "aarch64": variant = "v8" - case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + case "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": variant = "v7" + case "7": + // handle RPi Zero variant mismatch due to wrong variant from kernel + // https://github.com/containerd/containerd/pull/4530 + // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 + // https://github.com/moby/moby/pull/36121#issuecomment-398328286 + model, err := getCPUInfo("model name") + if err != nil { + logrus.Errorf("Couldn't get cpu model name, it may be the corner case where variant is 6: %v", err) + return "" + } + // model name is NOT a value provided by the CPU; it is another outcome of Linux CPU detection, + // https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v6.S#L178C35-L178C35 + // (matching happens based on value + mask at https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v6.S#L273-L274 ) + // ARM CPU ID starts with a “main” ID register https://developer.arm.com/documentation/ddi0406/cb/System-Level-Architecture/System-Control-Registers-in-a-VMSA-implementation/VMSA-System-control-registers-descriptions--in-register-order/MIDR--Main-ID-Register--VMSA?lang=en , + // but the ARMv6/ARMv7 differences are not a single dimension, https://developer.arm.com/documentation/ddi0406/cb/System-Level-Architecture/The-CPUID-Identification-Scheme?lang=en . + // The Linux "cpu architecture" is determined by a “memory model” feature. + // + // So, the "armv6-compatible" check basically checks for a "v6 or v7 CPU, but not one found listed as a known v7 one in the .proc.info.init tables of + // https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v7.S . + if strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { + logrus.Debugf("Detected corner case, setting cpu variant to v6") + variant = "v6" + } else { + variant = "v7" + } case "6", "6tej": variant = "v6" case "5", "5t", "5te", "5tej": diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go index bbbff6cf98..b2bc26fbf8 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_dest.go +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -276,7 +276,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces blobInfo: srcInfo, } - differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher) + differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Digest, srcInfo.Size, srcInfo.Annotations, &fetcher) if err != nil { return private.UploadedBlob{}, err } diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 0a057ddf0c..00d67999dc 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -8,7 +8,7 @@ const ( // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 29 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 1 + VersionPatch = 2 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "-dev" diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 43cfef2804..13bc20e7e9 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -23,7 +23,7 @@ env: # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - IMAGE_SUFFIX: "c20231208t193858z-f39f38d13" + IMAGE_SUFFIX: "c20240102t155643z-f39f38d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" @@ -167,7 +167,7 @@ vendor_task: cross_task: container: - image: golang:1.19 + image: golang:1.20 build_script: make cross @@ -181,6 +181,6 @@ success_task: - vendor - cross container: - image: golang:1.19 + image: golang:1.20 clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed script: /bin/true diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 6abf1916f7..d6b55473bd 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.51.1-dev +1.52.1-dev diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index f71ee69325..2c30df50ee 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -196,6 +196,8 @@ type DriverWithDifferOutput struct { BigData map[string][]byte TarSplit []byte TOCDigest digest.Digest + // RootDirMode is the mode of the root directory of the layer, if specified. + RootDirMode *os.FileMode // Artifacts is a collection of additional artifacts // generated by the differ that the storage driver can use. Artifacts map[string]interface{} @@ -212,10 +214,26 @@ const ( DifferOutputFormatFlat ) +type DifferFsVerity int + +const ( + // DifferFsVerityDisabled means no fs-verity is used + DifferFsVerityDisabled = iota + + // DifferFsVerityEnabled means fs-verity is used when supported + DifferFsVerityEnabled + + // DifferFsVerityRequired means fs-verity is required + DifferFsVerityRequired +) + // DifferOptions overrides how the differ work type DifferOptions struct { // Format defines the destination directory layout format Format DifferOutputFormat + + // UseFsVerity defines whether fs-verity is used + UseFsVerity DifferFsVerity } // Differ defines the interface for using a custom differ. diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs.go b/vendor/github.com/containers/storage/drivers/overlay/composefs.go index ed9287d6ad..baa9d7befe 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/composefs.go +++ b/vendor/github.com/containers/storage/drivers/overlay/composefs.go @@ -7,15 +7,13 @@ import ( "encoding/binary" "errors" "fmt" - "io/fs" "os" "os/exec" "path/filepath" "sync" - "syscall" - "unsafe" "github.com/containers/storage/pkg/chunked/dump" + "github.com/containers/storage/pkg/fsverity" "github.com/containers/storage/pkg/loopback" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" @@ -34,72 +32,6 @@ func getComposeFsHelper() (string, error) { return composeFsHelperPath, composeFsHelperErr } -func enableVerity(description string, fd int) error { - enableArg := unix.FsverityEnableArg{ - Version: 1, - Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256, - Block_size: 4096, - } - - _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg))) - if e1 != 0 && !errors.Is(e1, unix.EEXIST) { - return fmt.Errorf("failed to enable verity for %q: %w", description, e1) - } - return nil -} - -type verityDigest struct { - Fsv unix.FsverityDigest - Buf [64]byte -} - -func measureVerity(description string, fd int) (string, error) { - var digest verityDigest - digest.Fsv.Size = 64 - _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest))) - if e1 != 0 { - return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1) - } - return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil -} - -func enableVerityRecursive(root string) (map[string]string, error) { - digests := make(map[string]string) - walkFn := func(path string, d fs.DirEntry, err error) error { - if err != nil { - return err - } - if !d.Type().IsRegular() { - return nil - } - - f, err := os.Open(path) - if err != nil { - return err - } - defer f.Close() - - if err := enableVerity(path, int(f.Fd())); err != nil { - return err - } - - verity, err := measureVerity(path, int(f.Fd())) - if err != nil { - return err - } - - relPath, err := filepath.Rel(root, path) - if err != nil { - return err - } - - digests[relPath] = verity - return nil - } - err := filepath.WalkDir(root, walkFn) - return digests, err -} - func getComposefsBlob(dataDir string) string { return filepath.Join(dataDir, "composefs.blob") } @@ -151,7 +83,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com return err } - if err := enableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { + if err := fsverity.EnableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { logrus.Warningf("%s", err) } diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 8cc33e15f0..0492162833 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -82,7 +82,8 @@ const ( lowerFile = "lower" maxDepth = 500 - tocArtifact = "toc" + tocArtifact = "toc" + fsVerityDigestsArtifact = "fs-verity-digests" // idLength represents the number of random characters // which can be used to create the unique link identifier @@ -2085,7 +2086,13 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App if err != nil { return graphdriver.DriverWithDifferOutput{}, err } - + perms := defaultPerms + if d.options.forceMask != nil { + perms = *d.options.forceMask + } + if err := os.Chmod(applyDir, perms); err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } } else { var err error applyDir, err = d.getDiffPath(id) @@ -2101,6 +2108,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App } if d.usingComposefs { differOptions.Format = graphdriver.DifferOutputFormatFlat + differOptions.UseFsVerity = graphdriver.DifferFsVerityEnabled } out, err := differ.ApplyDiff(applyDir, &archive.TarOptions{ UIDMaps: idMappings.UIDs(), @@ -2120,23 +2128,33 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory stri if filepath.Dir(stagingDirectory) != d.getStagingDir() { return fmt.Errorf("%q is not a staging directory", stagingDirectory) } - - if d.usingComposefs { - // FIXME: move this logic into the differ so we don't have to open - // the file twice. - verityDigests, err := enableVerityRecursive(stagingDirectory) - if err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { - logrus.Warningf("%s", err) - } - toc := diffOutput.Artifacts[tocArtifact] - if err := generateComposeFsBlob(verityDigests, toc, d.getComposefsData(id)); err != nil { - return err - } - } diffPath, err := d.getDiffPath(id) if err != nil { return err } + + // If the current layer doesn't set the mode for the parent, override it with the parent layer's mode. + if d.options.forceMask == nil && diffOutput.RootDirMode == nil && parent != "" { + parentDiffPath, err := d.getDiffPath(parent) + if err != nil { + return err + } + parentSt, err := os.Stat(parentDiffPath) + if err != nil { + return err + } + if err := os.Chmod(stagingDirectory, parentSt.Mode()); err != nil { + return err + } + } + + if d.usingComposefs { + toc := diffOutput.Artifacts[tocArtifact] + verityDigests := diffOutput.Artifacts[fsVerityDigestsArtifact].(map[string]string) + if err := generateComposeFsBlob(verityDigests, toc, d.getComposefsData(id)); err != nil { + return err + } + } if err := os.RemoveAll(diffPath); err != nil && !os.IsNotExist(err) { return err } diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go index 2be79698d3..081fb2b106 100644 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go @@ -1,453 +1,5 @@ -//go:build linux && !exclude_disk_quota && cgo -// +build linux,!exclude_disk_quota,cgo - -// -// projectquota.go - implements XFS project quota controls -// for setting quota limits on a newly created directory. -// It currently supports the legacy XFS specific ioctls. -// -// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR -// for both xfs/ext4 for kernel version >= v4.5 -// - package quota -/* -#include -#include -#include -#include -#include - -#ifndef FS_XFLAG_PROJINHERIT -struct fsxattr { - __u32 fsx_xflags; - __u32 fsx_extsize; - __u32 fsx_nextents; - __u32 fsx_projid; - unsigned char fsx_pad[12]; -}; -#define FS_XFLAG_PROJINHERIT 0x00000200 -#endif -#ifndef FS_IOC_FSGETXATTR -#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) -#endif -#ifndef FS_IOC_FSSETXATTR -#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) -#endif - -#ifndef PRJQUOTA -#define PRJQUOTA 2 -#endif -#ifndef FS_PROJ_QUOTA -#define FS_PROJ_QUOTA 2 -#endif -#ifndef Q_XSETPQLIM -#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) -#endif -#ifndef Q_XGETPQUOTA -#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) -#endif -*/ -import "C" - -import ( - "errors" - "fmt" - "math" - "os" - "path" - "path/filepath" - "sync" - "syscall" - "unsafe" - - "github.com/containers/storage/pkg/directory" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -const projectIDsAllocatedPerQuotaHome = 10000 - // BackingFsBlockDeviceLink is the name of a file that we place in // the home directory of a driver that uses this package. const BackingFsBlockDeviceLink = "backingFsBlockDev" - -// Quota limit params - currently we only control blocks hard limit and inodes -type Quota struct { - Size uint64 - Inodes uint64 -} - -// Control - Context to be used by storage driver (e.g. overlay) -// who wants to apply project quotas to container dirs -type Control struct { - backingFsBlockDev string - nextProjectID uint32 - quotas *sync.Map - basePath string -} - -// Attempt to generate a unigue projectid. Multiple directories -// per file system can have quota and they need a group of unique -// ids. This function attempts to allocate at least projectIDsAllocatedPerQuotaHome(10000) -// unique projectids, based on the inode of the basepath. -func generateUniqueProjectID(path string) (uint32, error) { - fileinfo, err := os.Stat(path) - if err != nil { - return 0, err - } - stat, ok := fileinfo.Sys().(*syscall.Stat_t) - if !ok { - return 0, fmt.Errorf("not a syscall.Stat_t %s", path) - } - projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome) - return uint32(projectID), nil -} - -// NewControl - initialize project quota support. -// Test to make sure that quota can be set on a test dir and find -// the first project id to be used for the next container create. -// -// Returns nil (and error) if project quota is not supported. -// -// First get the project id of the basePath directory. -// This test will fail if the backing fs is not xfs. -// -// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: -// echo 100000:/var/lib/containers/storage/overlay >> /etc/projects -// echo 200000:/var/lib/containers/storage/volumes >> /etc/projects -// echo storage:100000 >> /etc/projid -// echo volumes:200000 >> /etc/projid -// xfs_quota -x -c 'project -s storage volumes' / -// -// In the example above, the storage directory project id will be used as a -// "start offset" and all containers will be assigned larger project ids -// (e.g. >= 100000). Then the volumes directory project id will be used as a -// "start offset" and all volumes will be assigned larger project ids -// (e.g. >= 200000). -// This is a way to prevent xfs_quota management from conflicting with -// containers/storage. - -// Then try to create a test directory with the next project id and set a quota -// on it. If that works, continue to scan existing containers to map allocated -// project ids. -func NewControl(basePath string) (*Control, error) { - // - // Get project id of parent dir as minimal id to be used by driver - // - minProjectID, err := getProjectID(basePath) - if err != nil { - return nil, err - } - if minProjectID == 0 { - // Indicates the storage was never initialized - // Generate a unique range of Projectids for this basepath - minProjectID, err = generateUniqueProjectID(basePath) - if err != nil { - return nil, err - } - - } - // - // create backing filesystem device node - // - backingFsBlockDev, err := makeBackingFsDev(basePath) - if err != nil { - return nil, err - } - - // - // Test if filesystem supports project quotas by trying to set - // a quota on the first available project id - // - quota := Quota{ - Size: 0, - Inodes: 0, - } - - q := Control{ - backingFsBlockDev: backingFsBlockDev, - nextProjectID: minProjectID + 1, - quotas: &sync.Map{}, - basePath: basePath, - } - - if err := q.setProjectQuota(minProjectID, quota); err != nil { - return nil, err - } - - // - // get first project id to be used for next container - // - err = q.findNextProjectID() - if err != nil { - return nil, err - } - - logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) - return &q, nil -} - -// SetQuota - assign a unique project id to directory and set the quota limits -// for that project id -func (q *Control) SetQuota(targetPath string, quota Quota) error { - var projectID uint32 - value, ok := q.quotas.Load(targetPath) - if ok { - projectID, ok = value.(uint32) - } - if !ok { - projectID = q.nextProjectID - - // - // assign project id to new container directory - // - err := setProjectID(targetPath, projectID) - if err != nil { - return err - } - - q.quotas.Store(targetPath, projectID) - q.nextProjectID++ - } - - // - // set the quota limit for the container's project id - // - logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID) - return q.setProjectQuota(projectID, quota) -} - -// ClearQuota removes the map entry in the quotas map for targetPath. -// It does so to prevent the map leaking entries as directories are deleted. -func (q *Control) ClearQuota(targetPath string) { - q.quotas.Delete(targetPath) -} - -// setProjectQuota - set the quota for project id on xfs block device -func (q *Control) setProjectQuota(projectID uint32, quota Quota) error { - var d C.fs_disk_quota_t - d.d_version = C.FS_DQUOT_VERSION - d.d_id = C.__u32(projectID) - d.d_flags = C.FS_PROJ_QUOTA - - if quota.Size > 0 { - d.d_fieldmask = d.d_fieldmask | C.FS_DQ_BHARD | C.FS_DQ_BSOFT - d.d_blk_hardlimit = C.__u64(quota.Size / 512) - d.d_blk_softlimit = d.d_blk_hardlimit - } - if quota.Inodes > 0 { - d.d_fieldmask = d.d_fieldmask | C.FS_DQ_IHARD | C.FS_DQ_ISOFT - d.d_ino_hardlimit = C.__u64(quota.Inodes) - d.d_ino_softlimit = d.d_ino_hardlimit - } - - cs := C.CString(q.backingFsBlockDev) - defer C.free(unsafe.Pointer(cs)) - - runQuotactl := func() syscall.Errno { - _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, - uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), - uintptr(unsafe.Pointer(&d)), 0, 0) - return errno - } - - errno := runQuotactl() - - // If the backingFsBlockDev does not exist any more then try to recreate it. - if errors.Is(errno, unix.ENOENT) { - if _, err := makeBackingFsDev(q.basePath); err != nil { - return fmt.Errorf( - "failed to recreate missing backingFsBlockDev %s for projid %d: %w", - q.backingFsBlockDev, projectID, err, - ) - } - - if errno := runQuotactl(); errno != 0 { - return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w", - projectID, q.backingFsBlockDev, errno) - } - - } else if errno != 0 { - return fmt.Errorf("failed to set quota limit for projid %d on %s: %w", - projectID, q.backingFsBlockDev, errno) - } - - return nil -} - -// GetQuota - get the quota limits of a directory that was configured with SetQuota -func (q *Control) GetQuota(targetPath string, quota *Quota) error { - d, err := q.fsDiskQuotaFromPath(targetPath) - if err != nil { - return err - } - quota.Size = uint64(d.d_blk_hardlimit) * 512 - quota.Inodes = uint64(d.d_ino_hardlimit) - return nil -} - -// GetDiskUsage - get the current disk usage of a directory that was configured with SetQuota -func (q *Control) GetDiskUsage(targetPath string, usage *directory.DiskUsage) error { - d, err := q.fsDiskQuotaFromPath(targetPath) - if err != nil { - return err - } - usage.Size = int64(d.d_bcount) * 512 - usage.InodeCount = int64(d.d_icount) - - return nil -} - -func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, error) { - var d C.fs_disk_quota_t - var projectID uint32 - value, ok := q.quotas.Load(targetPath) - if ok { - projectID, ok = value.(uint32) - } - if !ok { - return d, fmt.Errorf("quota not found for path : %s", targetPath) - } - - // - // get the quota limit for the container's project id - // - cs := C.CString(q.backingFsBlockDev) - defer C.free(unsafe.Pointer(cs)) - - _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA, - uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), - uintptr(unsafe.Pointer(&d)), 0, 0) - if errno != 0 { - return d, fmt.Errorf("failed to get quota limit for projid %d on %s: %w", - projectID, q.backingFsBlockDev, errno) - } - - return d, nil -} - -// getProjectID - get the project id of path on xfs -func getProjectID(targetPath string) (uint32, error) { - dir, err := openDir(targetPath) - if err != nil { - return 0, err - } - defer closeDir(dir) - - var fsx C.struct_fsxattr - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, - uintptr(unsafe.Pointer(&fsx))) - if errno != 0 { - return 0, fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) - } - - return uint32(fsx.fsx_projid), nil -} - -// setProjectID - set the project id of path on xfs -func setProjectID(targetPath string, projectID uint32) error { - dir, err := openDir(targetPath) - if err != nil { - return err - } - defer closeDir(dir) - - var fsx C.struct_fsxattr - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, - uintptr(unsafe.Pointer(&fsx))) - if errno != 0 { - return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) - } - fsx.fsx_projid = C.__u32(projectID) - fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT - _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, - uintptr(unsafe.Pointer(&fsx))) - if errno != 0 { - return fmt.Errorf("failed to set projid for %s: %w", targetPath, errno) - } - - return nil -} - -// findNextProjectID - find the next project id to be used for containers -// by scanning driver home directory to find used project ids -func (q *Control) findNextProjectID() error { - files, err := os.ReadDir(q.basePath) - if err != nil { - return fmt.Errorf("read directory failed : %s", q.basePath) - } - for _, file := range files { - if !file.IsDir() { - continue - } - path := filepath.Join(q.basePath, file.Name()) - projid, err := getProjectID(path) - if err != nil { - return err - } - if projid > 0 { - q.quotas.Store(path, projid) - } - if q.nextProjectID <= projid { - q.nextProjectID = projid + 1 - } - } - - return nil -} - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func openDir(path string) (*C.DIR, error) { - Cpath := C.CString(path) - defer free(Cpath) - - dir, errno := C.opendir(Cpath) - if dir == nil { - return nil, fmt.Errorf("can't open dir %v: %w", Cpath, errno) - } - return dir, nil -} - -func closeDir(dir *C.DIR) { - if dir != nil { - C.closedir(dir) - } -} - -func getDirFd(dir *C.DIR) uintptr { - return uintptr(C.dirfd(dir)) -} - -// Get the backing block device of the driver home directory -// and create a block device node under the home directory -// to be used by quotactl commands -func makeBackingFsDev(home string) (string, error) { - var stat unix.Stat_t - if err := unix.Stat(home, &stat); err != nil { - return "", err - } - - backingFsBlockDev := path.Join(home, BackingFsBlockDeviceLink) - backingFsBlockDevTmp := backingFsBlockDev + ".tmp" - // Re-create just in case someone copied the home directory over to a new device - if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil { - if !errors.Is(err, unix.EEXIST) { - return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) - } - // On EEXIST, try again after unlinking any potential leftover. - _ = unix.Unlink(backingFsBlockDevTmp) - if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil { - return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) - } - } - if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil { - return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err) - } - - return backingFsBlockDev, nil -} diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go new file mode 100644 index 0000000000..b0623bdacc --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go @@ -0,0 +1,449 @@ +//go:build linux && !exclude_disk_quota && cgo +// +build linux,!exclude_disk_quota,cgo + +// +// projectquota.go - implements XFS project quota controls +// for setting quota limits on a newly created directory. +// It currently supports the legacy XFS specific ioctls. +// +// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR +// for both xfs/ext4 for kernel version >= v4.5 +// + +package quota + +/* +#include +#include +#include +#include +#include + +#ifndef FS_XFLAG_PROJINHERIT +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + unsigned char fsx_pad[12]; +}; +#define FS_XFLAG_PROJINHERIT 0x00000200 +#endif +#ifndef FS_IOC_FSGETXATTR +#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) +#endif +#ifndef FS_IOC_FSSETXATTR +#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) +#endif + +#ifndef PRJQUOTA +#define PRJQUOTA 2 +#endif +#ifndef FS_PROJ_QUOTA +#define FS_PROJ_QUOTA 2 +#endif +#ifndef Q_XSETPQLIM +#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) +#endif +#ifndef Q_XGETPQUOTA +#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) +#endif +*/ +import "C" + +import ( + "errors" + "fmt" + "math" + "os" + "path" + "path/filepath" + "sync" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/directory" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const projectIDsAllocatedPerQuotaHome = 10000 + +// Quota limit params - currently we only control blocks hard limit and inodes +type Quota struct { + Size uint64 + Inodes uint64 +} + +// Control - Context to be used by storage driver (e.g. overlay) +// who wants to apply project quotas to container dirs +type Control struct { + backingFsBlockDev string + nextProjectID uint32 + quotas *sync.Map + basePath string +} + +// Attempt to generate a unigue projectid. Multiple directories +// per file system can have quota and they need a group of unique +// ids. This function attempts to allocate at least projectIDsAllocatedPerQuotaHome(10000) +// unique projectids, based on the inode of the basepath. +func generateUniqueProjectID(path string) (uint32, error) { + fileinfo, err := os.Stat(path) + if err != nil { + return 0, err + } + stat, ok := fileinfo.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("not a syscall.Stat_t %s", path) + } + projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome) + return uint32(projectID), nil +} + +// NewControl - initialize project quota support. +// Test to make sure that quota can be set on a test dir and find +// the first project id to be used for the next container create. +// +// Returns nil (and error) if project quota is not supported. +// +// First get the project id of the basePath directory. +// This test will fail if the backing fs is not xfs. +// +// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: +// echo 100000:/var/lib/containers/storage/overlay >> /etc/projects +// echo 200000:/var/lib/containers/storage/volumes >> /etc/projects +// echo storage:100000 >> /etc/projid +// echo volumes:200000 >> /etc/projid +// xfs_quota -x -c 'project -s storage volumes' / +// +// In the example above, the storage directory project id will be used as a +// "start offset" and all containers will be assigned larger project ids +// (e.g. >= 100000). Then the volumes directory project id will be used as a +// "start offset" and all volumes will be assigned larger project ids +// (e.g. >= 200000). +// This is a way to prevent xfs_quota management from conflicting with +// containers/storage. + +// Then try to create a test directory with the next project id and set a quota +// on it. If that works, continue to scan existing containers to map allocated +// project ids. +func NewControl(basePath string) (*Control, error) { + // + // Get project id of parent dir as minimal id to be used by driver + // + minProjectID, err := getProjectID(basePath) + if err != nil { + return nil, err + } + if minProjectID == 0 { + // Indicates the storage was never initialized + // Generate a unique range of Projectids for this basepath + minProjectID, err = generateUniqueProjectID(basePath) + if err != nil { + return nil, err + } + + } + // + // create backing filesystem device node + // + backingFsBlockDev, err := makeBackingFsDev(basePath) + if err != nil { + return nil, err + } + + // + // Test if filesystem supports project quotas by trying to set + // a quota on the first available project id + // + quota := Quota{ + Size: 0, + Inodes: 0, + } + + q := Control{ + backingFsBlockDev: backingFsBlockDev, + nextProjectID: minProjectID + 1, + quotas: &sync.Map{}, + basePath: basePath, + } + + if err := q.setProjectQuota(minProjectID, quota); err != nil { + return nil, err + } + + // + // get first project id to be used for next container + // + err = q.findNextProjectID() + if err != nil { + return nil, err + } + + logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) + return &q, nil +} + +// SetQuota - assign a unique project id to directory and set the quota limits +// for that project id +func (q *Control) SetQuota(targetPath string, quota Quota) error { + var projectID uint32 + value, ok := q.quotas.Load(targetPath) + if ok { + projectID, ok = value.(uint32) + } + if !ok { + projectID = q.nextProjectID + + // + // assign project id to new container directory + // + err := setProjectID(targetPath, projectID) + if err != nil { + return err + } + + q.quotas.Store(targetPath, projectID) + q.nextProjectID++ + } + + // + // set the quota limit for the container's project id + // + logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID) + return q.setProjectQuota(projectID, quota) +} + +// ClearQuota removes the map entry in the quotas map for targetPath. +// It does so to prevent the map leaking entries as directories are deleted. +func (q *Control) ClearQuota(targetPath string) { + q.quotas.Delete(targetPath) +} + +// setProjectQuota - set the quota for project id on xfs block device +func (q *Control) setProjectQuota(projectID uint32, quota Quota) error { + var d C.fs_disk_quota_t + d.d_version = C.FS_DQUOT_VERSION + d.d_id = C.__u32(projectID) + d.d_flags = C.FS_PROJ_QUOTA + + if quota.Size > 0 { + d.d_fieldmask = d.d_fieldmask | C.FS_DQ_BHARD | C.FS_DQ_BSOFT + d.d_blk_hardlimit = C.__u64(quota.Size / 512) + d.d_blk_softlimit = d.d_blk_hardlimit + } + if quota.Inodes > 0 { + d.d_fieldmask = d.d_fieldmask | C.FS_DQ_IHARD | C.FS_DQ_ISOFT + d.d_ino_hardlimit = C.__u64(quota.Inodes) + d.d_ino_softlimit = d.d_ino_hardlimit + } + + cs := C.CString(q.backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + runQuotactl := func() syscall.Errno { + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, + uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), + uintptr(unsafe.Pointer(&d)), 0, 0) + return errno + } + + errno := runQuotactl() + + // If the backingFsBlockDev does not exist any more then try to recreate it. + if errors.Is(errno, unix.ENOENT) { + if _, err := makeBackingFsDev(q.basePath); err != nil { + return fmt.Errorf( + "failed to recreate missing backingFsBlockDev %s for projid %d: %w", + q.backingFsBlockDev, projectID, err, + ) + } + + if errno := runQuotactl(); errno != 0 { + return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w", + projectID, q.backingFsBlockDev, errno) + } + + } else if errno != 0 { + return fmt.Errorf("failed to set quota limit for projid %d on %s: %w", + projectID, q.backingFsBlockDev, errno) + } + + return nil +} + +// GetQuota - get the quota limits of a directory that was configured with SetQuota +func (q *Control) GetQuota(targetPath string, quota *Quota) error { + d, err := q.fsDiskQuotaFromPath(targetPath) + if err != nil { + return err + } + quota.Size = uint64(d.d_blk_hardlimit) * 512 + quota.Inodes = uint64(d.d_ino_hardlimit) + return nil +} + +// GetDiskUsage - get the current disk usage of a directory that was configured with SetQuota +func (q *Control) GetDiskUsage(targetPath string, usage *directory.DiskUsage) error { + d, err := q.fsDiskQuotaFromPath(targetPath) + if err != nil { + return err + } + usage.Size = int64(d.d_bcount) * 512 + usage.InodeCount = int64(d.d_icount) + + return nil +} + +func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, error) { + var d C.fs_disk_quota_t + var projectID uint32 + value, ok := q.quotas.Load(targetPath) + if ok { + projectID, ok = value.(uint32) + } + if !ok { + return d, fmt.Errorf("quota not found for path : %s", targetPath) + } + + // + // get the quota limit for the container's project id + // + cs := C.CString(q.backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA, + uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return d, fmt.Errorf("failed to get quota limit for projid %d on %s: %w", + projectID, q.backingFsBlockDev, errno) + } + + return d, nil +} + +// getProjectID - get the project id of path on xfs +func getProjectID(targetPath string) (uint32, error) { + dir, err := openDir(targetPath) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return 0, fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) + } + + return uint32(fsx.fsx_projid), nil +} + +// setProjectID - set the project id of path on xfs +func setProjectID(targetPath string, projectID uint32) error { + dir, err := openDir(targetPath) + if err != nil { + return err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) + } + fsx.fsx_projid = C.__u32(projectID) + fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT + _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("failed to set projid for %s: %w", targetPath, errno) + } + + return nil +} + +// findNextProjectID - find the next project id to be used for containers +// by scanning driver home directory to find used project ids +func (q *Control) findNextProjectID() error { + files, err := os.ReadDir(q.basePath) + if err != nil { + return fmt.Errorf("read directory failed : %s", q.basePath) + } + for _, file := range files { + if !file.IsDir() { + continue + } + path := filepath.Join(q.basePath, file.Name()) + projid, err := getProjectID(path) + if err != nil { + return err + } + if projid > 0 { + q.quotas.Store(path, projid) + } + if q.nextProjectID <= projid { + q.nextProjectID = projid + 1 + } + } + + return nil +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir, errno := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("can't open dir %v: %w", Cpath, errno) + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +// Get the backing block device of the driver home directory +// and create a block device node under the home directory +// to be used by quotactl commands +func makeBackingFsDev(home string) (string, error) { + var stat unix.Stat_t + if err := unix.Stat(home, &stat); err != nil { + return "", err + } + + backingFsBlockDev := path.Join(home, BackingFsBlockDeviceLink) + backingFsBlockDevTmp := backingFsBlockDev + ".tmp" + // Re-create just in case someone copied the home directory over to a new device + if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil { + if !errors.Is(err, unix.EEXIST) { + return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) + } + // On EEXIST, try again after unlinking any potential leftover. + _ = unix.Unlink(backingFsBlockDevTmp) + if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil { + return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) + } + } + if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil { + return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err) + } + + return backingFsBlockDev, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go index 5e569682a6..d3c105c4d5 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go +++ b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go @@ -4,6 +4,7 @@ import ( "bufio" "fmt" "io" + "path/filepath" "strings" "time" "unicode" @@ -93,13 +94,18 @@ func getStMode(mode uint32, typ string) (uint32, error) { return mode, nil } -func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error { - path := strings.TrimRight(entry.Name, "/") - if path == "" { +func sanitizeName(name string) string { + path := filepath.Clean(name) + if path == "." { path = "/" } else if path[0] != '/' { path = "/" + path } + return path +} + +func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error { + path := sanitizeName(entry.Name) if _, err := fmt.Fprint(out, escaped(path, ESCAPE_STANDARD)); err != nil { return err @@ -133,9 +139,10 @@ func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]stri var payload string if entry.Linkname != "" { - payload = entry.Linkname - if entry.Type == internal.TypeLink && payload[0] != '/' { - payload = "/" + payload + if entry.Type == internal.TypeSymlink { + payload = entry.Linkname + } else { + payload = sanitizeName(entry.Linkname) } } else { if len(entry.Digest) > 10 { @@ -198,10 +205,13 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, if e.Linkname == "" { continue } + if e.Type == internal.TypeSymlink { + continue + } links[e.Linkname] = links[e.Linkname] + 1 } - if len(toc.Entries) == 0 || (toc.Entries[0].Name != "" && toc.Entries[0].Name != "/") { + if len(toc.Entries) == 0 || (sanitizeName(toc.Entries[0].Name) != "/") { root := &internal.FileMetadata{ Name: "/", Type: internal.TypeDir, diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index f278628e8f..f3414720ca 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -25,6 +25,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chunked/compressor" "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/fsverity" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" "github.com/containers/storage/types" @@ -46,6 +47,7 @@ const ( chunkedData = "zstd-chunked-data" chunkedLayerDataKey = "zstd-chunked-layer-data" tocKey = "toc" + fsVerityDigestsKey = "fs-verity-digests" fileTypeZstdChunked = iota fileTypeEstargz @@ -86,9 +88,18 @@ type chunkedDiffer struct { // the layer are trusted and should not be validated. skipValidation bool + // blobDigest is the digest of the whole compressed layer. It is used if + // convertToZstdChunked to validate a layer when it is converted since there + // is no TOC referenced by the manifest. + blobDigest digest.Digest + blobSize int64 storeOpts *types.StoreOptions + + useFsVerity graphdriver.DifferFsVerity + fsVerityDigests map[string]string + fsVerityMutex sync.Mutex } var xattrsToIgnore = map[string]interface{}{ @@ -188,33 +199,7 @@ func (f *seekableFile) GetBlobAt(chunks []ImageSourceChunk) (chan io.ReadCloser, return streams, errs, nil } -func convertTarToZstdChunked(destDirectory string, blobSize int64, iss ImageSourceSeekable) (*seekableFile, digest.Digest, map[string]string, error) { - var payload io.ReadCloser - var streams chan io.ReadCloser - var errs chan error - var err error - - chunksToRequest := []ImageSourceChunk{ - { - Offset: 0, - Length: uint64(blobSize), - }, - } - - streams, errs, err = iss.GetBlobAt(chunksToRequest) - if err != nil { - return nil, "", nil, err - } - select { - case p := <-streams: - payload = p - case err := <-errs: - return nil, "", nil, err - } - if payload == nil { - return nil, "", nil, errors.New("invalid stream returned") - } - +func convertTarToZstdChunked(destDirectory string, payload *os.File) (*seekableFile, digest.Digest, map[string]string, error) { diff, err := archive.DecompressStream(payload) if err != nil { return nil, "", nil, err @@ -235,10 +220,8 @@ func convertTarToZstdChunked(destDirectory string, blobSize int64, iss ImageSour return nil, "", nil, err } - digester := digest.Canonical.Digester() - hash := digester.Hash() - - if _, err := io.Copy(io.MultiWriter(chunked, hash), diff); err != nil { + convertedOutputDigester := digest.Canonical.Digester() + if _, err := io.Copy(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff); err != nil { f.Close() return nil, "", nil, err } @@ -249,11 +232,12 @@ func convertTarToZstdChunked(destDirectory string, blobSize int64, iss ImageSour is := seekableFile{ file: f, } - return &is, digester.Digest(), newAnnotations, nil + + return &is, convertedOutputDigester.Digest(), newAnnotations, nil } // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. -func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { +func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { storeOpts, err := types.DefaultStoreOptions() if err != nil { return nil, err @@ -273,10 +257,10 @@ func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotat return makeEstargzChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts) } - return makeConvertFromRawDiffer(ctx, store, blobSize, annotations, iss, &storeOpts) + return makeConvertFromRawDiffer(ctx, store, blobDigest, blobSize, annotations, iss, &storeOpts) } -func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { +func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { if !parseBooleanPullOption(storeOpts, "convert_images", false) { return nil, errors.New("convert_images not configured") } @@ -287,6 +271,8 @@ func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobSize } return &chunkedDiffer{ + fsVerityDigests: make(map[string]string), + blobDigest: blobDigest, blobSize: blobSize, convertToZstdChunked: true, copyBuffer: makeCopyBuffer(), @@ -312,16 +298,17 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in } return &chunkedDiffer{ - blobSize: blobSize, - contentDigest: contentDigest, - copyBuffer: makeCopyBuffer(), - fileType: fileTypeZstdChunked, - layersCache: layersCache, - manifest: manifest, - storeOpts: storeOpts, - stream: iss, - tarSplit: tarSplit, - tocOffset: tocOffset, + fsVerityDigests: make(map[string]string), + blobSize: blobSize, + contentDigest: contentDigest, + copyBuffer: makeCopyBuffer(), + fileType: fileTypeZstdChunked, + layersCache: layersCache, + manifest: manifest, + storeOpts: storeOpts, + stream: iss, + tarSplit: tarSplit, + tocOffset: tocOffset, }, nil } @@ -341,15 +328,16 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize } return &chunkedDiffer{ - blobSize: blobSize, - contentDigest: contentDigest, - copyBuffer: makeCopyBuffer(), - fileType: fileTypeEstargz, - layersCache: layersCache, - manifest: manifest, - storeOpts: storeOpts, - stream: iss, - tocOffset: tocOffset, + fsVerityDigests: make(map[string]string), + blobSize: blobSize, + contentDigest: contentDigest, + copyBuffer: makeCopyBuffer(), + fileType: fileTypeEstargz, + layersCache: layersCache, + manifest: manifest, + storeOpts: storeOpts, + stream: iss, + tocOffset: tocOffset, }, nil } @@ -946,6 +934,8 @@ func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileT return nil } +type recordFsVerityFunc func(string, *os.File) error + type destinationFile struct { digester digest.Digester dirfd int @@ -955,9 +945,10 @@ type destinationFile struct { options *archive.TarOptions skipValidation bool to io.Writer + recordFsVerity recordFsVerityFunc } -func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool) (*destinationFile, error) { +func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc) (*destinationFile, error) { file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) if err != nil { return nil, err @@ -984,15 +975,32 @@ func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *ar options: options, dirfd: dirfd, skipValidation: skipValidation, + recordFsVerity: recordFsVerity, }, nil } func (d *destinationFile) Close() (Err error) { defer func() { - err := d.file.Close() + var roFile *os.File + var err error + + if d.recordFsVerity != nil { + roFile, err = reopenFileReadOnly(d.file) + if err == nil { + defer roFile.Close() + } else if Err == nil { + Err = err + } + } + + err = d.file.Close() if Err == nil { Err = err } + + if Err == nil && roFile != nil { + Err = d.recordFsVerity(d.metadata.Name, roFile) + } }() if !d.skipValidation { @@ -1015,6 +1023,35 @@ func closeDestinationFiles(files chan *destinationFile, errors chan error) { close(errors) } +func (c *chunkedDiffer) recordFsVerity(path string, roFile *os.File) error { + if c.useFsVerity == graphdriver.DifferFsVerityDisabled { + return nil + } + // fsverity.EnableVerity doesn't return an error if fs-verity was already + // enabled on the file. + err := fsverity.EnableVerity(path, int(roFile.Fd())) + if err != nil { + if c.useFsVerity == graphdriver.DifferFsVerityRequired { + return err + } + + // If it is not required, ignore the error if the filesystem does not support it. + if errors.Is(err, unix.ENOTSUP) || errors.Is(err, unix.ENOTTY) { + return nil + } + } + verity, err := fsverity.MeasureVerity(path, int(roFile.Fd())) + if err != nil { + return err + } + + c.fsVerityMutex.Lock() + c.fsVerityDigests[path] = verity + c.fsVerityMutex.Unlock() + + return nil +} + func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) { var destFile *destinationFile @@ -1102,7 +1139,11 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan } filesToClose <- destFile } - destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation) + recordFsVerity := c.recordFsVerity + if c.useFsVerity == graphdriver.DifferFsVerityDisabled { + recordFsVerity = nil + } + destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation, recordFsVerity) if err != nil { Err = err goto exit @@ -1433,15 +1474,39 @@ type findAndCopyFileOptions struct { options *archive.TarOptions } +func reopenFileReadOnly(f *os.File) (*os.File, error) { + path := fmt.Sprintf("/proc/self/fd/%d", f.Fd()) + fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), f.Name()), nil +} + func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) { finalizeFile := func(dstFile *os.File) error { - if dstFile != nil { - defer dstFile.Close() - if err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false); err != nil { - return err - } + if dstFile == nil { + return nil } - return nil + err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false) + if err != nil { + dstFile.Close() + return err + } + var roFile *os.File + if c.useFsVerity != graphdriver.DifferFsVerityDisabled { + roFile, err = reopenFileReadOnly(dstFile) + } + dstFile.Close() + if err != nil { + return err + } + if roFile == nil { + return nil + } + + defer roFile.Close() + return c.recordFsVerity(r.Name, roFile) } found, dstFile, _, err := findFileInOtherLayers(c.layersCache, r, dirfd, copyOptions.useHardLinks) @@ -1498,6 +1563,43 @@ func makeEntriesFlat(mergedEntries []internal.FileMetadata) ([]internal.FileMeta return new, nil } +func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) { + var payload io.ReadCloser + var streams chan io.ReadCloser + var errs chan error + var err error + + chunksToRequest := []ImageSourceChunk{ + { + Offset: 0, + Length: uint64(c.blobSize), + }, + } + + streams, errs, err = c.stream.GetBlobAt(chunksToRequest) + if err != nil { + return "", err + } + select { + case p := <-streams: + payload = p + case err := <-errs: + return "", err + } + if payload == nil { + return "", errors.New("invalid stream returned") + } + + originalRawDigester := digest.Canonical.Digester() + + r := io.TeeReader(payload, originalRawDigester.Hash()) + + // copy the entire tarball and compute its digest + _, err = io.Copy(destination, r) + + return originalRawDigester.Digest(), err +} + func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, differOpts *graphdriver.DifferOptions) (graphdriver.DriverWithDifferOutput, error) { defer c.layersCache.release() defer func() { @@ -1506,11 +1608,38 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } }() + c.useFsVerity = differOpts.UseFsVerity + // stream to use for reading the zstd:chunked or Estargz file. stream := c.stream if c.convertToZstdChunked { - fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, c.blobSize, c.stream) + fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + blobFile := os.NewFile(uintptr(fd), "blob-file") + defer func() { + if blobFile != nil { + blobFile.Close() + } + }() + + // calculate the checksum before accessing the file. + compressedDigest, err := c.copyAllBlobToFile(blobFile) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + + if compressedDigest != c.blobDigest { + return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("invalid digest to convert: expected %q, got %q", c.blobDigest, compressedDigest) + } + + if _, err := blobFile.Seek(0, io.SeekStart); err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + + fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, blobFile) if err != nil { return graphdriver.DriverWithDifferOutput{}, err } @@ -1518,6 +1647,10 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff // need to keep it open until the entire file is processed. defer fileSource.Close() + // Close the file so that the file descriptor is released and the file is deleted. + blobFile.Close() + blobFile = nil + manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, c.blobSize, annotations) if err != nil { return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("read zstd:chunked manifest: %w", err) @@ -1530,6 +1663,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff c.fileType = fileTypeZstdChunked c.manifest = manifest c.tarSplit = tarSplit + // since we retrieved the whole file and it was validated, use the diffID instead of the TOC digest. c.contentDigest = diffID c.tocOffset = tocOffset @@ -1737,6 +1871,9 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } case tar.TypeDir: + if r.Name == "" || r.Name == "." { + output.RootDirMode = &mode + } if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil { return output, err } @@ -1874,6 +2011,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize)) } + output.Artifacts[fsVerityDigestsKey] = c.fsVerityDigests + return output, nil } @@ -1933,7 +2072,10 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i e.Chunks = make([]*internal.FileMetadata, nChunks+1) for j := 0; j <= nChunks; j++ { - e.Chunks[j] = &entries[i+j] + // we need a copy here, otherwise we override the + // .Size later + copy := entries[i+j] + e.Chunks[j] = © e.EndOffset = entries[i+j].EndOffset } i += nChunks diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go index 8d3fcf2ba4..ac6bdfec7a 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go @@ -9,9 +9,10 @@ import ( storage "github.com/containers/storage" graphdriver "github.com/containers/storage/drivers" + digest "github.com/opencontainers/go-digest" ) // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. -func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { +func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { return nil, errors.New("format not supported on this system") } diff --git a/vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go new file mode 100644 index 0000000000..5b21c4b764 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go @@ -0,0 +1,45 @@ +package fsverity + +import ( + "errors" + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// verityDigest struct represents the digest used for verifying the integrity of a file. +type verityDigest struct { + Fsv unix.FsverityDigest + Buf [64]byte +} + +// EnableVerity enables the verity feature on a file represented by the file descriptor 'fd'. The file must be opened +// in read-only mode. +// The 'description' parameter is a human-readable description of the file. +func EnableVerity(description string, fd int) error { + enableArg := unix.FsverityEnableArg{ + Version: 1, + Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256, + Block_size: 4096, + } + + _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg))) + if e1 != 0 && !errors.Is(e1, unix.EEXIST) { + return fmt.Errorf("failed to enable verity for %q: %w", description, e1) + } + return nil +} + +// MeasureVerity measures and returns the verity digest for the file represented by 'fd'. +// The 'description' parameter is a human-readable description of the file. +func MeasureVerity(description string, fd int) (string, error) { + var digest verityDigest + digest.Fsv.Size = 64 + _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest))) + if e1 != 0 { + return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1) + } + return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil +} diff --git a/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go new file mode 100644 index 0000000000..7909c54fae --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go @@ -0,0 +1,21 @@ +//go:build !linux +// +build !linux + +package fsverity + +import ( + "fmt" +) + +// EnableVerity enables the verity feature on a file represented by the file descriptor 'fd'. The file must be opened +// in read-only mode. +// The 'description' parameter is a human-readable description of the file. +func EnableVerity(description string, fd int) error { + return fmt.Errorf("fs-verity is not supported on this platform") +} + +// MeasureVerity measures and returns the verity digest for the file represented by 'fd'. +// The 'description' parameter is a human-readable description of the file. +func MeasureVerity(description string, fd int) (string, error) { + return fmt.Errorf("fs-verity is not supported on this platform") +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go index b02812e61b..9057fe1b2c 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go @@ -1,5 +1,5 @@ -//go:build !linux && !darwin && !freebsd -// +build !linux,!darwin,!freebsd +//go:build !linux && !darwin && !freebsd && !windows +// +build !linux,!darwin,!freebsd,!windows package homedir diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go index 3ac9ff6994..45be87659e 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go @@ -118,10 +118,13 @@ func GetConfigHome() (string, error) { tmpDir := filepath.Join(resolvedHome, ".config") _ = os.MkdirAll(tmpDir, 0o700) st, err := os.Stat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { + if err != nil { + rootlessConfigHomeDirError = err + return + } else if int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() { cfgHomeDir = tmpDir } else { - rootlessConfigHomeDirError = fmt.Errorf("path %q exists and it is not writeable only by the current user", tmpDir) + rootlessConfigHomeDirError = fmt.Errorf("path %q exists and it is not owned by the current user", tmpDir) return } } diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go index af65f2c03d..a76610f90e 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go @@ -5,6 +5,7 @@ package homedir import ( "os" + "path/filepath" ) // Key returns the env var name for the user's home dir based on @@ -25,8 +26,36 @@ func Get() string { return home } +// GetConfigHome returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func GetConfigHome() (string, error) { + return filepath.Join(Get(), ".config"), nil +} + // GetShortcutString returns the string that is shortcut to user's home directory // in the native shell of the platform running on. func GetShortcutString() string { return "%USERPROFILE%" // be careful while using in format functions } + +// StickRuntimeDirContents is a no-op on Windows +func StickRuntimeDirContents(files []string) ([]string, error) { + return nil, nil +} + +// GetRuntimeDir returns a directory suitable to store runtime files. +// The function will try to use the XDG_RUNTIME_DIR env variable if it is set. +// XDG_RUNTIME_DIR is typically configured via pam_systemd. +// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable +// directory for the current user. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetRuntimeDir() (string, error) { + data, err := GetDataHome() + if err != nil { + return "", err + } + runtimeDir := filepath.Join(data, "containers", "storage") + return runtimeDir, nil +} diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 49a4ff1118..3f431726e2 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -397,6 +397,18 @@ type Store interface { // allow ImagesByDigest to find images by their correct digests. SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error + // ImageDirectory returns a path of a directory which the caller can + // use to store data, specific to the image, which the library does not + // directly manage. The directory will be deleted when the image is + // deleted. + ImageDirectory(id string) (string, error) + + // ImageRunDirectory returns a path of a directory which the caller can + // use to store data, specific to the image, which the library does not + // directly manage. The directory will be deleted when the host system + // is restarted. + ImageRunDirectory(id string) (string, error) + // ListLayerBigData retrieves a list of the (possibly large) chunks of // named data associated with a layer. ListLayerBigData(id string) ([]string, error) @@ -3311,6 +3323,27 @@ func (s *store) ContainerByLayer(id string) (*Container, error) { return nil, ErrContainerUnknown } +func (s *store) ImageDirectory(id string) (string, error) { + foundImage := false + if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) { + if store.Exists(id) { + foundImage = true + } + middleDir := s.graphDriverName + "-images" + gipath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(gipath, 0o700); err != nil { + return "", true, err + } + return gipath, true, nil + }); done { + return res, err + } + if foundImage { + return "", fmt.Errorf("locating image with ID %q (consider removing the image to resolve the issue): %w", id, os.ErrNotExist) + } + return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) +} + func (s *store) ContainerDirectory(id string) (string, error) { res, _, err := readContainerStore(s, func() (string, bool, error) { id, err := s.containerStore.Lookup(id) @@ -3328,6 +3361,28 @@ func (s *store) ContainerDirectory(id string) (string, error) { return res, err } +func (s *store) ImageRunDirectory(id string) (string, error) { + foundImage := false + if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) { + if store.Exists(id) { + foundImage = true + } + + middleDir := s.graphDriverName + "-images" + rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(rcpath, 0o700); err != nil { + return "", true, err + } + return rcpath, true, nil + }); done { + return res, err + } + if foundImage { + return "", fmt.Errorf("locating image with ID %q (consider removing the image to resolve the issue): %w", id, os.ErrNotExist) + } + return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) +} + func (s *store) ContainerRunDirectory(id string) (string, error) { res, _, err := readContainerStore(s, func() (string, bool, error) { id, err := s.containerStore.Lookup(id) diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go index 678153cf88..7ca5ab7222 100644 --- a/vendor/github.com/docker/docker-credential-helpers/client/client.go +++ b/vendor/github.com/docker/docker-credential-helpers/client/client.go @@ -16,11 +16,9 @@ func isValidCredsMessage(msg string) error { if credentials.IsCredentialsMissingServerURLMessage(msg) { return credentials.NewErrCredentialsMissingServerURL() } - if credentials.IsCredentialsMissingUsernameMessage(msg) { return credentials.NewErrCredentialsMissingUsername() } - return nil } @@ -36,13 +34,10 @@ func Store(program ProgramFunc, creds *credentials.Credentials) error { out, err := cmd.Output() if err != nil { - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { + if isValidErr := isValidCredsMessage(string(out)); isValidErr != nil { err = isValidErr } - - return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) + return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out))) } return nil @@ -55,17 +50,15 @@ func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error out, err := cmd.Output() if err != nil { - t := strings.TrimSpace(string(out)) - - if credentials.IsErrCredentialsNotFoundMessage(t) { + if credentials.IsErrCredentialsNotFoundMessage(string(out)) { return nil, credentials.NewErrCredentialsNotFound() } - if isValidErr := isValidCredsMessage(t); isValidErr != nil { + if isValidErr := isValidCredsMessage(string(out)); isValidErr != nil { err = isValidErr } - return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) + return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out))) } resp := &credentials.Credentials{ diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go index 8fa4d5d254..2283d5a44c 100644 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go @@ -1,6 +1,9 @@ package credentials -import "errors" +import ( + "errors" + "strings" +) const ( // ErrCredentialsNotFound standardizes the not found error, so every helper returns @@ -47,7 +50,7 @@ func IsErrCredentialsNotFound(err error) bool { // This function helps to check messages returned by an // external program via its standard output. func IsErrCredentialsNotFoundMessage(err string) bool { - return err == errCredentialsNotFoundMessage + return strings.TrimSpace(err) == errCredentialsNotFoundMessage } // errCredentialsMissingServerURL represents an error raised @@ -104,7 +107,7 @@ func IsCredentialsMissingServerURL(err error) bool { // IsCredentialsMissingServerURLMessage checks for an // errCredentialsMissingServerURL in the error message. func IsCredentialsMissingServerURLMessage(err string) bool { - return err == errCredentialsMissingServerURLMessage + return strings.TrimSpace(err) == errCredentialsMissingServerURLMessage } // IsCredentialsMissingUsername returns true if the error @@ -117,5 +120,5 @@ func IsCredentialsMissingUsername(err error) bool { // IsCredentialsMissingUsernameMessage checks for an // errCredentialsMissingUsername in the error message. func IsCredentialsMissingUsernameMessage(err string) bool { - return err == errCredentialsMissingUsernameMessage + return strings.TrimSpace(err) == errCredentialsMissingUsernameMessage } diff --git a/vendor/github.com/felixge/httpsnoop/.travis.yml b/vendor/github.com/felixge/httpsnoop/.travis.yml deleted file mode 100644 index bfc421200d..0000000000 --- a/vendor/github.com/felixge/httpsnoop/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go - -go: - - 1.6 - - 1.7 - - 1.8 diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile index 2d84889aed..4e12afdd90 100644 --- a/vendor/github.com/felixge/httpsnoop/Makefile +++ b/vendor/github.com/felixge/httpsnoop/Makefile @@ -1,7 +1,7 @@ .PHONY: ci generate clean ci: clean generate - go test -v ./... + go test -race -v ./... generate: go generate . diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md index ddcecd13e7..cf6b42f3d7 100644 --- a/vendor/github.com/felixge/httpsnoop/README.md +++ b/vendor/github.com/felixge/httpsnoop/README.md @@ -7,8 +7,8 @@ http.Handlers. Doing this requires non-trivial wrapping of the http.ResponseWriter interface, which is also exposed for users interested in a more low-level API. -[![GoDoc](https://godoc.org/github.com/felixge/httpsnoop?status.svg)](https://godoc.org/github.com/felixge/httpsnoop) -[![Build Status](https://travis-ci.org/felixge/httpsnoop.svg?branch=master)](https://travis-ci.org/felixge/httpsnoop) +[![Go Reference](https://pkg.go.dev/badge/github.com/felixge/httpsnoop.svg)](https://pkg.go.dev/github.com/felixge/httpsnoop) +[![Build Status](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml/badge.svg)](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml) ## Usage Example diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go index b77cc7c009..bec7b71b39 100644 --- a/vendor/github.com/felixge/httpsnoop/capture_metrics.go +++ b/vendor/github.com/felixge/httpsnoop/capture_metrics.go @@ -52,7 +52,7 @@ func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWri return func(code int) { next(code) - if !headerWritten { + if !(code >= 100 && code <= 199) && !headerWritten { m.Code = code headerWritten = true } diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go index 31cbdfb8ef..101cedde67 100644 --- a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go @@ -1,5 +1,5 @@ // +build go1.8 -// Code generated by "httpsnoop/codegen"; DO NOT EDIT +// Code generated by "httpsnoop/codegen"; DO NOT EDIT. package httpsnoop diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go index ab99c07c7a..e0951df152 100644 --- a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go @@ -1,5 +1,5 @@ // +build !go1.8 -// Code generated by "httpsnoop/codegen"; DO NOT EDIT +// Code generated by "httpsnoop/codegen"; DO NOT EDIT. package httpsnoop diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go index a89a4de3f3..2813714060 100644 --- a/vendor/github.com/go-openapi/strfmt/default.go +++ b/vendor/github.com/go-openapi/strfmt/default.go @@ -25,6 +25,7 @@ import ( "strings" "github.com/asaskevich/govalidator" + "github.com/google/uuid" "go.mongodb.org/mongo-driver/bson" ) @@ -57,24 +58,35 @@ const ( // - long top-level domain names (e.g. example.london) are permitted // - symbol unicode points are permitted (e.g. emoji) (not for top-level domain) HostnamePattern = `^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$` - // UUIDPattern Regex for UUID that allows uppercase - UUIDPattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$` - // UUID3Pattern Regex for UUID3 that allows uppercase - UUID3Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$` - // UUID4Pattern Regex for UUID4 that allows uppercase - UUID4Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$` - // UUID5Pattern Regex for UUID5 that allows uppercase - UUID5Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$` + // json null type jsonNull = "null" ) +const ( + // UUIDPattern Regex for UUID that allows uppercase + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUIDPattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{32}$)` + + // UUID3Pattern Regex for UUID3 that allows uppercase + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUID3Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{12}3[0-9a-f]{3}?[0-9a-f]{16}$)` + + // UUID4Pattern Regex for UUID4 that allows uppercase + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUID4Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}$)` + + // UUID5Pattern Regex for UUID5 that allows uppercase + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUID5Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}5[0-9a-f]{3}[89ab][0-9a-f]{15}$)` +) + var ( rxHostname = regexp.MustCompile(HostnamePattern) - rxUUID = regexp.MustCompile(UUIDPattern) - rxUUID3 = regexp.MustCompile(UUID3Pattern) - rxUUID4 = regexp.MustCompile(UUID4Pattern) - rxUUID5 = regexp.MustCompile(UUID5Pattern) ) // IsHostname returns true when the string is a valid hostname @@ -99,24 +111,28 @@ func IsHostname(str string) bool { return valid } -// IsUUID returns true is the string matches a UUID, upper case is allowed +// IsUUID returns true is the string matches a UUID (in any version, including v6 and v7), upper case is allowed func IsUUID(str string) bool { - return rxUUID.MatchString(str) + _, err := uuid.Parse(str) + return err == nil } -// IsUUID3 returns true is the string matches a UUID, upper case is allowed +// IsUUID3 returns true is the string matches a UUID v3, upper case is allowed func IsUUID3(str string) bool { - return rxUUID3.MatchString(str) + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(3) } -// IsUUID4 returns true is the string matches a UUID, upper case is allowed +// IsUUID4 returns true is the string matches a UUID v4, upper case is allowed func IsUUID4(str string) bool { - return rxUUID4.MatchString(str) + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(4) } -// IsUUID5 returns true is the string matches a UUID, upper case is allowed +// IsUUID5 returns true is the string matches a UUID v5, upper case is allowed func IsUUID5(str string) bool { - return rxUUID5.MatchString(str) + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(5) } // IsEmail validates an email address. diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/BENCHMARK.md new file mode 100644 index 0000000000..e7f28ed6b7 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/BENCHMARK.md @@ -0,0 +1,52 @@ +# Benchmarks + +## Name mangling utilities + +```bash +go test -bench XXX -run XXX -benchtime 30s +``` + +### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op +BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op +BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op +``` + +### Benchmarks after PR #79 + +~ x10 performance improvement and ~ /100 memory allocations. + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op +``` + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op +``` diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go index 03555184d1..2b2e463107 100644 --- a/vendor/github.com/go-openapi/swag/initialism_index.go +++ b/vendor/github.com/go-openapi/swag/initialism_index.go @@ -16,9 +16,130 @@ package swag import ( "sort" + "strings" "sync" ) +var ( + // commonInitialisms are common acronyms that are kept as whole uppercased words. + commonInitialisms *indexOfInitialisms + + // initialisms is a slice of sorted initialisms + initialisms []string + + // a copy of initialisms pre-baked as []rune + initialismsRunes [][]rune + initialismsUpperCased [][]rune + + isInitialism func(string) bool + + maxAllocMatches int +) + +func init() { + // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 + configuredInitialisms := map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "IPv4": true, + "IPv6": true, + "JSON": true, + "LHS": true, + "OAI": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, + } + + // a thread-safe index of initialisms + commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) + maxAllocMatches = maxAllocHeuristic(initialismsRunes) + + // a test function + isInitialism = commonInitialisms.isInitialism +} + +func asRunes(in []string) [][]rune { + out := make([][]rune, len(in)) + for i, initialism := range in { + out[i] = []rune(initialism) + } + + return out +} + +func asUpperCased(in []string) [][]rune { + out := make([][]rune, len(in)) + + for i, initialism := range in { + out[i] = []rune(upper(trim(initialism))) + } + + return out +} + +func maxAllocHeuristic(in [][]rune) int { + heuristic := make(map[rune]int) + for _, initialism := range in { + heuristic[initialism[0]]++ + } + + var maxAlloc int + for _, val := range heuristic { + if val > maxAlloc { + maxAlloc = val + } + } + + return maxAlloc +} + +// AddInitialisms add additional initialisms +func AddInitialisms(words ...string) { + for _, word := range words { + // commonInitialisms[upper(word)] = true + commonInitialisms.add(upper(word)) + } + // sort again + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) +} + // indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. // Since go1.9, this may be implemented with sync.Map. type indexOfInitialisms struct { @@ -63,3 +184,19 @@ func (m *indexOfInitialisms) sorted() (result []string) { sort.Sort(sort.Reverse(byInitialism(result))) return } + +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return strings.Compare(s[i], s[j]) > 0 +} diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go index aa7f6a9bb8..8bb64ac32f 100644 --- a/vendor/github.com/go-openapi/swag/name_lexem.go +++ b/vendor/github.com/go-openapi/swag/name_lexem.go @@ -14,74 +14,80 @@ package swag -import "unicode" +import ( + "unicode" + "unicode/utf8" +) type ( - nameLexem interface { - GetUnsafeGoName() string - GetOriginal() string - IsInitialism() bool - } + lexemKind uint8 - initialismNameLexem struct { + nameLexem struct { original string matchedInitialism string - } - - casualNameLexem struct { - original string + kind lexemKind } ) -func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem { - return &initialismNameLexem{ +const ( + lexemKindCasualName lexemKind = iota + lexemKindInitialismName +) + +func newInitialismNameLexem(original, matchedInitialism string) nameLexem { + return nameLexem{ + kind: lexemKindInitialismName, original: original, matchedInitialism: matchedInitialism, } } -func newCasualNameLexem(original string) *casualNameLexem { - return &casualNameLexem{ +func newCasualNameLexem(original string) nameLexem { + return nameLexem{ + kind: lexemKindCasualName, original: original, } } -func (l *initialismNameLexem) GetUnsafeGoName() string { - return l.matchedInitialism -} +func (l nameLexem) GetUnsafeGoName() string { + if l.kind == lexemKindInitialismName { + return l.matchedInitialism + } + + var ( + first rune + rest string + ) -func (l *casualNameLexem) GetUnsafeGoName() string { - var first rune - var rest string for i, orig := range l.original { if i == 0 { first = orig continue } + if i > 0 { rest = l.original[i:] break } } + if len(l.original) > 1 { - return string(unicode.ToUpper(first)) + lower(rest) + b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest)) + defer func() { + poolOfBuffers.RedeemBuffer(b) + }() + b.WriteRune(unicode.ToUpper(first)) + b.WriteString(lower(rest)) + return b.String() } return l.original } -func (l *initialismNameLexem) GetOriginal() string { +func (l nameLexem) GetOriginal() string { return l.original } -func (l *casualNameLexem) GetOriginal() string { - return l.original -} - -func (l *initialismNameLexem) IsInitialism() bool { - return true -} - -func (l *casualNameLexem) IsInitialism() bool { - return false +func (l nameLexem) IsInitialism() bool { + return l.kind == lexemKindInitialismName } diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go index a1825fb7dc..274727a866 100644 --- a/vendor/github.com/go-openapi/swag/split.go +++ b/vendor/github.com/go-openapi/swag/split.go @@ -15,124 +15,269 @@ package swag import ( + "bytes" + "sync" "unicode" + "unicode/utf8" ) -var nameReplaceTable = map[rune]string{ - '@': "At ", - '&': "And ", - '|': "Pipe ", - '$': "Dollar ", - '!': "Bang ", - '-': "", - '_': "", -} - type ( splitter struct { - postSplitInitialismCheck bool initialisms []string + initialismsRunes [][]rune + initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version + postSplitInitialismCheck bool } - splitterOption func(*splitter) *splitter + splitterOption func(*splitter) + + initialismMatch struct { + body []rune + start, end int + complete bool + } + initialismMatches []initialismMatch ) -// split calls the splitter; splitter provides more control and post options -func split(str string) []string { - lexems := newSplitter().split(str) - result := make([]string, 0, len(lexems)) +type ( + // memory pools of temporary objects. + // + // These are used to recycle temporarily allocated objects + // and relieve the GC from undue pressure. - for _, lexem := range lexems { + matchesPool struct { + *sync.Pool + } + + buffersPool struct { + *sync.Pool + } + + lexemsPool struct { + *sync.Pool + } + + splittersPool struct { + *sync.Pool + } +) + +var ( + // poolOfMatches holds temporary slices for recycling during the initialism match process + poolOfMatches = matchesPool{ + Pool: &sync.Pool{ + New: func() any { + s := make(initialismMatches, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfBuffers = buffersPool{ + Pool: &sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } + + poolOfLexems = lexemsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]nameLexem, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfSplitters = splittersPool{ + Pool: &sync.Pool{ + New: func() any { + s := newSplitter() + + return &s + }, + }, + } +) + +// nameReplaceTable finds a word representation for special characters. +func nameReplaceTable(r rune) (string, bool) { + switch r { + case '@': + return "At ", true + case '&': + return "And ", true + case '|': + return "Pipe ", true + case '$': + return "Dollar ", true + case '!': + return "Bang ", true + case '-': + return "", true + case '_': + return "", true + default: + return "", false + } +} + +// split calls the splitter. +// +// Use newSplitter for more control and options +func split(str string) []string { + s := poolOfSplitters.BorrowSplitter() + lexems := s.split(str) + result := make([]string, 0, len(*lexems)) + + for _, lexem := range *lexems { result = append(result, lexem.GetOriginal()) } + poolOfLexems.RedeemLexems(lexems) + poolOfSplitters.RedeemSplitter(s) return result } -func (s *splitter) split(str string) []nameLexem { - return s.toNameLexems(str) -} - -func newSplitter(options ...splitterOption) *splitter { - splitter := &splitter{ +func newSplitter(options ...splitterOption) splitter { + s := splitter{ postSplitInitialismCheck: false, initialisms: initialisms, + initialismsRunes: initialismsRunes, + initialismsUpperCased: initialismsUpperCased, } for _, option := range options { - splitter = option(splitter) + option(&s) } - return splitter -} - -// withPostSplitInitialismCheck allows to catch initialisms after main split process -func withPostSplitInitialismCheck(s *splitter) *splitter { - s.postSplitInitialismCheck = true return s } -type ( - initialismMatch struct { - start, end int - body []rune - complete bool - } - initialismMatches []*initialismMatch -) +// withPostSplitInitialismCheck allows to catch initialisms after main split process +func withPostSplitInitialismCheck(s *splitter) { + s.postSplitInitialismCheck = true +} -func (s *splitter) toNameLexems(name string) []nameLexem { +func (p matchesPool) BorrowMatches() *initialismMatches { + s := p.Get().(*initialismMatches) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { + s := p.Get().(*bytes.Buffer) + s.Reset() + + if s.Cap() < size { + s.Grow(size) + } + + return s +} + +func (p lexemsPool) BorrowLexems() *[]nameLexem { + s := p.Get().(*[]nameLexem) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter { + s := p.Get().(*splitter) + s.postSplitInitialismCheck = false // reset options + for _, apply := range options { + apply(s) + } + + return s +} + +func (p matchesPool) RedeemMatches(s *initialismMatches) { + p.Put(s) +} + +func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { + p.Put(s) +} + +func (p lexemsPool) RedeemLexems(s *[]nameLexem) { + p.Put(s) +} + +func (p splittersPool) RedeemSplitter(s *splitter) { + p.Put(s) +} + +func (m initialismMatch) isZero() bool { + return m.start == 0 && m.end == 0 +} + +func (s splitter) split(name string) *[]nameLexem { nameRunes := []rune(name) matches := s.gatherInitialismMatches(nameRunes) + if matches == nil { + return poolOfLexems.BorrowLexems() + } + return s.mapMatchesToNameLexems(nameRunes, matches) } -func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { - matches := make(initialismMatches, 0) +func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { + var matches *initialismMatches for currentRunePosition, currentRune := range nameRunes { - newMatches := make(initialismMatches, 0, len(matches)) + // recycle these allocations as we loop over runes + // with such recycling, only 2 slices should be allocated per call + // instead of o(n). + newMatches := poolOfMatches.BorrowMatches() // check current initialism matches - for _, match := range matches { - if keepCompleteMatch := match.complete; keepCompleteMatch { - newMatches = append(newMatches, match) - continue - } - - // drop failed match - currentMatchRune := match.body[currentRunePosition-match.start] - if !s.initialismRuneEqual(currentMatchRune, currentRune) { - continue - } - - // try to complete ongoing match - if currentRunePosition-match.start == len(match.body)-1 { - // we are close; the next step is to check the symbol ahead - // if it is a small letter, then it is not the end of match - // but beginning of the next word - - if currentRunePosition < len(nameRunes)-1 { - nextRune := nameRunes[currentRunePosition+1] - if newWord := unicode.IsLower(nextRune); newWord { - // oh ok, it was the start of a new word - continue - } + if matches != nil { // skip first iteration + for _, match := range *matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + *newMatches = append(*newMatches, match) + continue } - match.complete = true - match.end = currentRunePosition - } + // drop failed match + currentMatchRune := match.body[currentRunePosition-match.start] + if currentMatchRune != currentRune { + continue + } - newMatches = append(newMatches, match) + // try to complete ongoing match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close; the next step is to check the symbol ahead + // if it is a small letter, then it is not the end of match + // but beginning of the next word + + if currentRunePosition < len(nameRunes)-1 { + nextRune := nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // oh ok, it was the start of a new word + continue + } + } + + match.complete = true + match.end = currentRunePosition + } + + *newMatches = append(*newMatches, match) + } } // check for new initialism matches - for _, initialism := range s.initialisms { - initialismRunes := []rune(initialism) - if s.initialismRuneEqual(initialismRunes[0], currentRune) { - newMatches = append(newMatches, &initialismMatch{ + for i := range s.initialisms { + initialismRunes := s.initialismsRunes[i] + if initialismRunes[0] == currentRune { + *newMatches = append(*newMatches, initialismMatch{ start: currentRunePosition, body: initialismRunes, complete: false, @@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { } } + if matches != nil { + poolOfMatches.RedeemMatches(matches) + } matches = newMatches } + // up to the caller to redeem this last slice return matches } -func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem { - nameLexems := make([]nameLexem, 0) +func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem { + nameLexems := poolOfLexems.BorrowLexems() - var lastAcceptedMatch *initialismMatch - for _, match := range matches { + var lastAcceptedMatch initialismMatch + for _, match := range *matches { if !match.complete { continue } - if firstMatch := lastAcceptedMatch == nil; firstMatch { - nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + if firstMatch := lastAcceptedMatch.isZero(); firstMatch { + s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start]) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match @@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa } middle := nameRunes[lastAcceptedMatch.end+1 : match.start] - nameLexems = append(nameLexems, s.breakCasualString(middle)...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + s.appendBrokenDownCasualString(nameLexems, middle) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match } // we have not found any accepted matches - if lastAcceptedMatch == nil { - return s.breakCasualString(nameRunes) + if lastAcceptedMatch.isZero() { + *nameLexems = (*nameLexems)[:0] + s.appendBrokenDownCasualString(nameLexems, nameRunes) + } else if lastAcceptedMatch.end+1 != len(nameRunes) { + rest := nameRunes[lastAcceptedMatch.end+1:] + s.appendBrokenDownCasualString(nameLexems, rest) } - if lastAcceptedMatch.end+1 != len(nameRunes) { - rest := nameRunes[lastAcceptedMatch.end+1:] - nameLexems = append(nameLexems, s.breakCasualString(rest)...) - } + poolOfMatches.RedeemMatches(matches) return nameLexems } -func (s *splitter) initialismRuneEqual(a, b rune) bool { - return a == b -} - -func (s *splitter) breakInitialism(original string) nameLexem { +func (s splitter) breakInitialism(original string) nameLexem { return newInitialismNameLexem(original, original) } -func (s *splitter) breakCasualString(str []rune) []nameLexem { - segments := make([]nameLexem, 0) - currentSegment := "" +func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) { + currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused + defer func() { + poolOfBuffers.RedeemBuffer(currentSegment) + }() addCasualNameLexem := func(original string) { - segments = append(segments, newCasualNameLexem(original)) + *segments = append(*segments, newCasualNameLexem(original)) } addInitialismNameLexem := func(original, match string) { - segments = append(segments, newInitialismNameLexem(original, match)) + *segments = append(*segments, newInitialismNameLexem(original, match)) } - addNameLexem := func(original string) { - if s.postSplitInitialismCheck { - for _, initialism := range s.initialisms { - if upper(initialism) == upper(original) { - addInitialismNameLexem(original, initialism) + var addNameLexem func(string) + if s.postSplitInitialismCheck { + addNameLexem = func(original string) { + for i := range s.initialisms { + if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) { + addInitialismNameLexem(original, s.initialisms[i]) + return } } - } - addCasualNameLexem(original) + addCasualNameLexem(original) + } + } else { + addNameLexem = addCasualNameLexem } - for _, rn := range string(str) { - if replace, found := nameReplaceTable[rn]; found { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + for _, rn := range str { + if replace, found := nameReplaceTable(rn); found { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } if replace != "" { @@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem { } if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } continue } if unicode.IsUpper(rn) { - if currentSegment != "" { - addNameLexem(currentSegment) + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } - currentSegment = "" + currentSegment.Reset() } - currentSegment += string(rn) + currentSegment.WriteRune(rn) } - if currentSegment != "" { - addNameLexem(currentSegment) + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } - - return segments +} + +// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but +// it ignores leading and trailing blank spaces in the compared +// string. +// +// base is assumed to be composed of upper-cased runes, and be already +// trimmed. +// +// This code is heavily inspired from strings.EqualFold. +func isEqualFoldIgnoreSpace(base []rune, str string) bool { + var i, baseIndex int + // equivalent to b := []byte(str), but without data copy + b := hackStringBytes(str) + + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + break + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + break + } + i += size + } + + if i >= len(b) { + return len(base) == 0 + } + + for _, baseRune := range base { + if i >= len(b) { + break + } + + if c := b[i]; c < utf8.RuneSelf { + // single byte rune case (ASCII) + if baseRune >= utf8.RuneSelf { + return false + } + + baseChar := byte(baseRune) + if c != baseChar && + !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) { + return false + } + + baseIndex++ + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if unicode.ToUpper(r) != baseRune { + return false + } + baseIndex++ + i += size + } + + if baseIndex != len(base) { + return false + } + + // all passed: now we should only have blanks + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + return false + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + return false + } + + i += size + } + + return true } diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go new file mode 100644 index 0000000000..c52d6bf719 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/string_bytes.go @@ -0,0 +1,22 @@ +package swag + +import "unsafe" + +type internalString struct { + Data unsafe.Pointer + Len int +} + +// hackStringBytes returns the (unsafe) underlying bytes slice of a string. +func hackStringBytes(str string) []byte { + p := (*internalString)(unsafe.Pointer(&str)).Data + return unsafe.Slice((*byte)(p), len(str)) +} + +/* + * go1.20 version (for when go mod moves to a go1.20 requirement): + +func hackStringBytes(str string) []byte { + return unsafe.Slice(unsafe.StringData(str), len(str)) +} +*/ diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index c5322d133d..5051401c49 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -18,76 +18,25 @@ import ( "reflect" "strings" "unicode" + "unicode/utf8" ) -// commonInitialisms are common acronyms that are kept as whole uppercased words. -var commonInitialisms *indexOfInitialisms - -// initialisms is a slice of sorted initialisms -var initialisms []string - -var isInitialism func(string) bool - // GoNamePrefixFunc sets an optional rule to prefix go names // which do not start with a letter. // +// The prefix function is assumed to return a string that starts with an upper case letter. +// // e.g. to help convert "123" into "{prefix}123" // // The default is to prefix with "X" var GoNamePrefixFunc func(string) string -func init() { - // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 - var configuredInitialisms = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "IPv4": true, - "IPv6": true, - "JSON": true, - "LHS": true, - "OAI": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, +func prefixFunc(name, in string) string { + if GoNamePrefixFunc == nil { + return "X" + in } - // a thread-safe index of initialisms - commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) - initialisms = commonInitialisms.sorted() - - // a test function - isInitialism = commonInitialisms.isInitialism + return GoNamePrefixFunc(name) + in } const ( @@ -156,25 +105,9 @@ func SplitByFormat(data, format string) []string { return result } -type byInitialism []string - -func (s byInitialism) Len() int { - return len(s) -} -func (s byInitialism) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s byInitialism) Less(i, j int) bool { - if len(s[i]) != len(s[j]) { - return len(s[i]) < len(s[j]) - } - - return strings.Compare(s[i], s[j]) > 0 -} - // Removes leading whitespaces func trim(str string) string { - return strings.Trim(str, " ") + return strings.TrimSpace(str) } // Shortcut to strings.ToUpper() @@ -188,15 +121,20 @@ func lower(str string) string { } // Camelize an uppercased word -func Camelize(word string) (camelized string) { +func Camelize(word string) string { + camelized := poolOfBuffers.BorrowBuffer(len(word)) + defer func() { + poolOfBuffers.RedeemBuffer(camelized) + }() + for pos, ru := range []rune(word) { if pos > 0 { - camelized += string(unicode.ToLower(ru)) + camelized.WriteRune(unicode.ToLower(ru)) } else { - camelized += string(unicode.ToUpper(ru)) + camelized.WriteRune(unicode.ToUpper(ru)) } } - return + return camelized.String() } // ToFileName lowercases and underscores a go type name @@ -224,33 +162,40 @@ func ToCommandName(name string) string { // ToHumanNameLower represents a code name as a human series of words func ToHumanNameLower(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) - out := make([]string, 0, len(in)) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) + out := make([]string, 0, len(*in)) - for _, w := range in { + for _, w := range *in { if !w.IsInitialism() { out = append(out, lower(w.GetOriginal())) } else { - out = append(out, w.GetOriginal()) + out = append(out, trim(w.GetOriginal())) } } + poolOfLexems.RedeemLexems(in) return strings.Join(out, " ") } // ToHumanNameTitle represents a code name as a human series of words with the first letters titleized func ToHumanNameTitle(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) - out := make([]string, 0, len(in)) - for _, w := range in { - original := w.GetOriginal() + out := make([]string, 0, len(*in)) + for _, w := range *in { + original := trim(w.GetOriginal()) if !w.IsInitialism() { out = append(out, Camelize(original)) } else { out = append(out, original) } } + poolOfLexems.RedeemLexems(in) + return strings.Join(out, " ") } @@ -264,7 +209,7 @@ func ToJSONName(name string) string { out = append(out, lower(w)) continue } - out = append(out, Camelize(w)) + out = append(out, Camelize(trim(w))) } return strings.Join(out, "") } @@ -283,35 +228,70 @@ func ToVarName(name string) string { // ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes func ToGoName(name string) string { - lexems := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + lexems := s.split(name) + poolOfSplitters.RedeemSplitter(s) + defer func() { + poolOfLexems.RedeemLexems(lexems) + }() + lexemes := *lexems - result := "" - for _, lexem := range lexems { + if len(lexemes) == 0 { + return "" + } + + result := poolOfBuffers.BorrowBuffer(len(name)) + defer func() { + poolOfBuffers.RedeemBuffer(result) + }() + + // check if not starting with a letter, upper case + firstPart := lexemes[0].GetUnsafeGoName() + if lexemes[0].IsInitialism() { + firstPart = upper(firstPart) + } + + if c := firstPart[0]; c < utf8.RuneSelf { + // ASCII + switch { + case 'A' <= c && c <= 'Z': + result.WriteString(firstPart) + case 'a' <= c && c <= 'z': + result.WriteByte(c - 'a' + 'A') + result.WriteString(firstPart[1:]) + default: + result.WriteString(prefixFunc(name, firstPart)) + // NOTE: no longer check if prefixFunc returns a string that starts with uppercase: + // assume this is always the case + } + } else { + // unicode + firstRune, _ := utf8.DecodeRuneInString(firstPart) + switch { + case !unicode.IsLetter(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + case !unicode.IsUpper(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + /* + result.WriteRune(unicode.ToUpper(firstRune)) + result.WriteString(firstPart[offset:]) + */ + default: + result.WriteString(firstPart) + } + } + + for _, lexem := range lexemes[1:] { goName := lexem.GetUnsafeGoName() // to support old behavior if lexem.IsInitialism() { goName = upper(goName) } - result += goName + result.WriteString(goName) } - if len(result) > 0 { - // Only prefix with X when the first character isn't an ascii letter - first := []rune(result)[0] - if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) { - if GoNamePrefixFunc == nil { - return "X" + result - } - result = GoNamePrefixFunc(name) + result - } - first = []rune(result)[0] - if unicode.IsLetter(first) && !unicode.IsUpper(first) { - result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...)) - } - } - - return result + return result.String() } // ContainsStrings searches a slice of strings for a case-sensitive match @@ -376,16 +356,6 @@ func IsZero(data interface{}) bool { } } -// AddInitialisms add additional initialisms -func AddInitialisms(words ...string) { - for _, word := range words { - // commonInitialisms[upper(word)] = true - commonInitialisms.add(upper(word)) - } - // sort again - initialisms = commonInitialisms.sorted() -} - // CommandLineOptionsGroup represents a group of user-defined command line options type CommandLineOptionsGroup struct { ShortDescription string diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index 3b8c1e3a62..a8c4e359ea 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -18,6 +18,8 @@ import ( "encoding/json" "fmt" "path/filepath" + "reflect" + "sort" "strconv" "github.com/mailru/easyjson/jlexer" @@ -245,7 +247,27 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) { return yaml.Marshal(&n) } +func isNil(input interface{}) bool { + if input == nil { + return true + } + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + func json2yaml(item interface{}) (*yaml.Node, error) { + if isNil(item) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Value: "null", + }, nil + } + switch val := item.(type) { case JSONMapSlice: var n yaml.Node @@ -265,7 +287,14 @@ func json2yaml(item interface{}) (*yaml.Node, error) { case map[string]interface{}: var n yaml.Node n.Kind = yaml.MappingNode - for k, v := range val { + keys := make([]string, 0, len(val)) + for k := range val { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := val[k] childNode, err := json2yaml(v) if err != nil { return nil, err @@ -318,9 +347,9 @@ func json2yaml(item interface{}) (*yaml.Node, error) { Tag: yamlBoolScalar, Value: strconv.FormatBool(val), }, nil + default: + return nil, fmt.Errorf("unhandled type: %T", val) } - - return nil, nil //nolint:nilnil } // JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 7e83f583c0..c918f11d8b 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,18 @@ This package provides various compression algorithms. # changelog +* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) + * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 + * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 + * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 + * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 + * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 + +* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) + * fse: Fix max header size https://github.com/klauspost/compress/pull/881 + * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 + * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 + * Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 @@ -554,7 +566,7 @@ For direct deflate use, NewStatelessWriter and StatelessDeflate are available. S A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: -``` +```go // replace 'ioutil.Discard' with your output. gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) if err != nil { diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index de912e187c..66d1657d2c 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -212,7 +212,7 @@ func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { // Should only be used after a start/reset. func (d *compressor) fillWindow(b []byte) { // Do not fill window if we are in store-only or huffman mode. - if d.level <= 0 { + if d.level <= 0 && d.level > -MinCustomWindowSize { return } if d.fast != nil { diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod index 2263853fca..5a4412f907 100644 --- a/vendor/github.com/klauspost/compress/s2sx.mod +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -1,4 +1,4 @@ module github.com/klauspost/compress -go 1.16 +go 1.19 diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go index f6a240970d..6a5a2988b6 100644 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -95,42 +95,54 @@ type Header struct { // If there isn't enough input, io.ErrUnexpectedEOF is returned. // The FirstBlock.OK will indicate if enough information was available to decode the first block header. func (h *Header) Decode(in []byte) error { + _, err := h.DecodeAndStrip(in) + return err +} + +// DecodeAndStrip will decode the header from the beginning of the stream +// and on success return the remaining bytes. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { *h = Header{} if len(in) < 4 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } h.HeaderSize += 4 b, in := in[:4], in[4:] if string(b) != frameMagic { if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { - return ErrMagicMismatch + return nil, ErrMagicMismatch } if len(in) < 4 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } h.HeaderSize += 4 h.Skippable = true h.SkippableID = int(b[0] & 0xf) h.SkippableSize = binary.LittleEndian.Uint32(in) - return nil + return in[4:], nil } // Read Window_Descriptor // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor if len(in) < 1 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } fhd, in := in[0], in[1:] h.HeaderSize++ h.SingleSegment = fhd&(1<<5) != 0 h.HasCheckSum = fhd&(1<<2) != 0 if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") + return nil, errors.New("reserved bit set on frame header") } if !h.SingleSegment { if len(in) < 1 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } var wd byte wd, in = in[0], in[1:] @@ -148,7 +160,7 @@ func (h *Header) Decode(in []byte) error { size = 4 } if len(in) < int(size) { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } b, in = in[:size], in[size:] h.HeaderSize += int(size) @@ -178,7 +190,7 @@ func (h *Header) Decode(in []byte) error { if fcsSize > 0 { h.HasFCS = true if len(in) < fcsSize { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } b, in = in[:fcsSize], in[fcsSize:] h.HeaderSize += int(fcsSize) @@ -199,7 +211,7 @@ func (h *Header) Decode(in []byte) error { // Frame Header done, we will not fail from now on. if len(in) < 3 { - return nil + return in, nil } tmp := in[:3] bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) @@ -209,7 +221,7 @@ func (h *Header) Decode(in []byte) error { cSize := int(bh >> 3) switch blockType { case blockTypeReserved: - return nil + return in, nil case blockTypeRLE: h.FirstBlock.Compressed = true h.FirstBlock.DecompressedSize = cSize @@ -225,5 +237,25 @@ func (h *Header) Decode(in []byte) error { } h.FirstBlock.OK = true - return nil + return in, nil +} + +// AppendTo will append the encoded header to the dst slice. +// There is no error checking performed on the header values. +func (h *Header) AppendTo(dst []byte) ([]byte, error) { + if h.Skippable { + magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} + magic[0] |= byte(h.SkippableID & 0xf) + dst = append(dst, magic[:]...) + f := h.SkippableSize + return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil + } + f := frameHeader{ + ContentSize: h.FrameContentSize, + WindowSize: uint32(h.WindowSize), + SingleSegment: h.SingleSegment, + Checksum: h.HasCheckSum, + DictID: h.DictionaryID, + } + return f.appendTo(dst), nil } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index faaf81921c..20671dcb91 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -94,7 +94,7 @@ func WithEncoderConcurrency(n int) EOption { // The value must be a power of two between MinWindowSize and MaxWindowSize. // A larger value will enable better compression but allocate more memory and, // for above-default values, take considerably longer. -// The default value is determined by the compression level. +// The default value is determined by the compression level and max 8MB. func WithWindowSize(n int) EOption { return func(o *encoderOptions) error { switch { @@ -232,9 +232,9 @@ func WithEncoderLevel(l EncoderLevel) EOption { case SpeedDefault: o.windowSize = 8 << 20 case SpeedBetterCompression: - o.windowSize = 16 << 20 + o.windowSize = 8 << 20 case SpeedBestCompression: - o.windowSize = 32 << 20 + o.windowSize = 8 << 20 } } if !o.customALEntropy { diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go index 2f5d5ed454..667ca06794 100644 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -76,7 +76,7 @@ func (f frameHeader) appendTo(dst []byte) []byte { if f.SingleSegment { dst = append(dst, uint8(f.ContentSize)) } - // Unless SingleSegment is set, framessizes < 256 are nto stored. + // Unless SingleSegment is set, framessizes < 256 are not stored. case 1: f.ContentSize -= 256 dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go index 332e51fe44..8adfebb029 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -20,10 +20,9 @@ func (s *fseDecoder) buildDtable() error { if v == -1 { s.dt[highThreshold].setAddBits(uint8(i)) highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) + v = 1 } + symbolNext[i] = uint16(v) } } @@ -35,10 +34,12 @@ func (s *fseDecoder) buildDtable() error { for ss, v := range s.norm[:s.symbolLen] { for i := 0; i < int(v); i++ { s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { + for { // lowprob area position = (position + step) & tableMask + if position <= highThreshold { + break + } } } } diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 974b99725f..5b06174b89 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -157,8 +157,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -177,8 +176,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -197,8 +195,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -459,8 +456,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -479,8 +475,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -499,8 +494,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -772,11 +766,10 @@ sequenceDecs_decode_bmi2_fill_2_end: BZHIQ R14, R15, R15 // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -784,11 +777,10 @@ sequenceDecs_decode_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -796,10 +788,9 @@ sequenceDecs_decode_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -1032,11 +1023,10 @@ sequenceDecs_decode_56_bmi2_fill_end: BZHIQ R14, R15, R15 // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -1044,11 +1034,10 @@ sequenceDecs_decode_56_bmi2_fill_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -1056,10 +1045,9 @@ sequenceDecs_decode_56_bmi2_fill_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -1967,8 +1955,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -1987,8 +1974,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -2007,8 +1993,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -2514,11 +2499,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: BZHIQ R13, R14, R14 // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -2526,11 +2510,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -2538,10 +2521,9 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -3055,8 +3037,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3075,8 +3056,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3095,8 +3075,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3704,11 +3683,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: BZHIQ R13, R14, R14 // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -3716,11 +3694,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -3728,10 +3705,9 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX diff --git a/vendor/modules.txt b/vendor/modules.txt index 3bcb401de1..5b46919c3f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -167,7 +167,7 @@ github.com/containers/buildah/pkg/sshagent github.com/containers/buildah/pkg/util github.com/containers/buildah/pkg/volumes github.com/containers/buildah/util -# github.com/containers/common v0.57.1-0.20240124083822-167512e3cfc4 +# github.com/containers/common v0.57.1-0.20240129201029-3310a75e3608 ## explicit; go 1.20 github.com/containers/common/internal/attributedstring github.com/containers/common/libimage @@ -236,7 +236,7 @@ github.com/containers/conmon/runner/config # github.com/containers/gvisor-tap-vsock v0.7.2 ## explicit; go 1.20 github.com/containers/gvisor-tap-vsock/pkg/types -# github.com/containers/image/v5 v5.29.1-0.20231221164234-1b221d4a9c28 +# github.com/containers/image/v5 v5.29.2-0.20240129204525-816800b5daf7 ## explicit; go 1.19 github.com/containers/image/v5/copy github.com/containers/image/v5/directory @@ -346,8 +346,8 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.51.1-0.20231221151421-1020ab61b4e5 -## explicit; go 1.19 +# github.com/containers/storage v1.52.1-0.20240129173630-7a525ce0e2bc +## explicit; go 1.20 github.com/containers/storage github.com/containers/storage/drivers github.com/containers/storage/drivers/aufs @@ -374,6 +374,7 @@ github.com/containers/storage/pkg/directory github.com/containers/storage/pkg/dmesg github.com/containers/storage/pkg/fileutils github.com/containers/storage/pkg/fsutils +github.com/containers/storage/pkg/fsverity github.com/containers/storage/pkg/homedir github.com/containers/storage/pkg/idmap github.com/containers/storage/pkg/idtools @@ -425,7 +426,7 @@ github.com/crc-org/vfkit/pkg/config github.com/crc-org/vfkit/pkg/rest github.com/crc-org/vfkit/pkg/rest/define github.com/crc-org/vfkit/pkg/util -# github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 +# github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f ## explicit github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer # github.com/cyphar/filepath-securejoin v0.2.4 @@ -495,7 +496,7 @@ github.com/docker/docker/pkg/progress github.com/docker/docker/pkg/stdcopy github.com/docker/docker/pkg/streamformatter github.com/docker/docker/pkg/system -# github.com/docker/docker-credential-helpers v0.8.0 +# github.com/docker/docker-credential-helpers v0.8.1 ## explicit; go 1.19 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials @@ -511,7 +512,7 @@ github.com/docker/go-plugins-helpers/volume # github.com/docker/go-units v0.5.0 ## explicit github.com/docker/go-units -# github.com/felixge/httpsnoop v1.0.3 +# github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop # github.com/fsnotify/fsnotify v1.7.0 @@ -588,10 +589,10 @@ github.com/go-openapi/runtime/yamlpc # github.com/go-openapi/spec v0.20.9 ## explicit; go 1.13 github.com/go-openapi/spec -# github.com/go-openapi/strfmt v0.21.10 +# github.com/go-openapi/strfmt v0.22.0 ## explicit; go 1.19 github.com/go-openapi/strfmt -# github.com/go-openapi/swag v0.22.5 +# github.com/go-openapi/swag v0.22.9 ## explicit; go 1.19 github.com/go-openapi/swag # github.com/go-openapi/validate v0.22.1 @@ -708,7 +709,7 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.17.4 +# github.com/klauspost/compress v1.17.5 ## explicit; go 1.19 github.com/klauspost/compress github.com/klauspost/compress/flate @@ -983,7 +984,7 @@ github.com/sigstore/rekor/pkg/generated/client/pubkey github.com/sigstore/rekor/pkg/generated/client/tlog github.com/sigstore/rekor/pkg/generated/models github.com/sigstore/rekor/pkg/util -# github.com/sigstore/sigstore v1.8.0 +# github.com/sigstore/sigstore v1.8.1 ## explicit; go 1.20 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/oauth @@ -1162,7 +1163,7 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts golang.org/x/crypto/twofish golang.org/x/crypto/xts -# golang.org/x/exp v0.0.0-20231006140011-7918f672742d +# golang.org/x/exp v0.0.0-20231226003508-02704c960a9b ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps @@ -1187,7 +1188,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.15.0 +# golang.org/x/oauth2 v0.16.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal