mirror of
https://github.com/containers/podman.git
synced 2025-07-04 01:48:28 +08:00
Vendor in latest containers/(common,image,storage)
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
14
go.mod
14
go.mod
@ -12,12 +12,12 @@ require (
|
||||
github.com/containernetworking/cni v1.1.2
|
||||
github.com/containernetworking/plugins v1.1.1
|
||||
github.com/containers/buildah v1.28.1-0.20221029151733-c2cf9fa47ab6
|
||||
github.com/containers/common v0.50.2-0.20221109162103-1e40f47dd90b
|
||||
github.com/containers/common v0.50.2-0.20221111184705-791b83e1cdf1
|
||||
github.com/containers/conmon v2.0.20+incompatible
|
||||
github.com/containers/image/v5 v5.23.1-0.20221101011818-2f770d6d5a0c
|
||||
github.com/containers/image/v5 v5.23.1-0.20221109193300-0d85878d7a77
|
||||
github.com/containers/ocicrypt v1.1.6
|
||||
github.com/containers/psgo v1.8.0
|
||||
github.com/containers/storage v1.43.1-0.20221104122514-74e37433a2a0
|
||||
github.com/containers/storage v1.44.1-0.20221110192950-67e9778710f8
|
||||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
|
||||
github.com/cyphar/filepath-securejoin v0.2.3
|
||||
@ -41,7 +41,7 @@ require (
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
|
||||
github.com/nxadm/tail v1.4.8
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/gomega v1.24.0
|
||||
github.com/onsi/gomega v1.24.1
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2
|
||||
github.com/opencontainers/runc v1.1.4
|
||||
@ -121,7 +121,7 @@ require (
|
||||
github.com/seccomp/libseccomp-golang v0.10.0 // indirect
|
||||
github.com/sigstore/sigstore v1.4.5 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
github.com/sylabs/sif/v2 v2.8.1 // indirect
|
||||
github.com/sylabs/sif/v2 v2.8.3 // indirect
|
||||
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
|
||||
github.com/theupdateframework/go-tuf v0.5.2-0.20220930112810-3890c1e7ace4 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
@ -132,9 +132,9 @@ require (
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/crypto v0.2.0 // indirect
|
||||
golang.org/x/mod v0.6.0 // indirect
|
||||
golang.org/x/net v0.1.0 // indirect
|
||||
golang.org/x/net v0.2.0 // indirect
|
||||
golang.org/x/tools v0.2.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a // indirect
|
||||
google.golang.org/grpc v1.50.1 // indirect
|
||||
|
28
go.sum
28
go.sum
@ -264,12 +264,12 @@ github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNG
|
||||
github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
|
||||
github.com/containers/buildah v1.28.1-0.20221029151733-c2cf9fa47ab6 h1:6bFoF3QIUzza8NWAsHS1ZGDDEr+r5do46dXEbzkZb3Y=
|
||||
github.com/containers/buildah v1.28.1-0.20221029151733-c2cf9fa47ab6/go.mod h1:skMuWv4FIebpsAFT7fBv2Ll0e0w2j71IUWCIrw9iTV0=
|
||||
github.com/containers/common v0.50.2-0.20221109162103-1e40f47dd90b h1:Hnd2R1izztqrDJsyFSKvbzXW3jWxLyqjEmcCubeOIn0=
|
||||
github.com/containers/common v0.50.2-0.20221109162103-1e40f47dd90b/go.mod h1:Nbv796IlIsJ6h8zFhNAn2hTXzVUICfiKS8vi3og41oA=
|
||||
github.com/containers/common v0.50.2-0.20221111184705-791b83e1cdf1 h1:AmN1j+GzK4+fmtOljYVbxAEJeXKkPs3ofB/uxJt4SCU=
|
||||
github.com/containers/common v0.50.2-0.20221111184705-791b83e1cdf1/go.mod h1:VBycGm+y123zhrbvGu5GykZiYJbtSqm7kN2tXCu2INM=
|
||||
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
||||
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||
github.com/containers/image/v5 v5.23.1-0.20221101011818-2f770d6d5a0c h1:Jm6GiccEre7O+KohztmIBlduJdYPvzEhhjUuUczZivQ=
|
||||
github.com/containers/image/v5 v5.23.1-0.20221101011818-2f770d6d5a0c/go.mod h1:PWNjca543qp7QK9Pbp0Qsg/cuoCjeREZmIvOx/hXEjI=
|
||||
github.com/containers/image/v5 v5.23.1-0.20221109193300-0d85878d7a77 h1:zLn8X9uD1jgjC7mTK/SwS1tmXTMLzfw1Lbc0Rn+6rFY=
|
||||
github.com/containers/image/v5 v5.23.1-0.20221109193300-0d85878d7a77/go.mod h1:T17ZmftW9GT2/gOv6b35kGqxB/caOnTn2k3dyh3VH34=
|
||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU=
|
||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
||||
@ -281,8 +281,8 @@ github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY
|
||||
github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc=
|
||||
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
|
||||
github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
|
||||
github.com/containers/storage v1.43.1-0.20221104122514-74e37433a2a0 h1:j/+RfR57O5MEr0Irhju/5phfwoU7s3pR0Q5T1dEPeVw=
|
||||
github.com/containers/storage v1.43.1-0.20221104122514-74e37433a2a0/go.mod h1:HSfx7vUXwKPatPMqhgMw3mI3c3ijIJPZV5O0sj/mVxI=
|
||||
github.com/containers/storage v1.44.1-0.20221110192950-67e9778710f8 h1:MrQjgoKVQpD/16sfYe9C3T3y2gLvfBPADMFQ7Oq93zo=
|
||||
github.com/containers/storage v1.44.1-0.20221110192950-67e9778710f8/go.mod h1:HSfx7vUXwKPatPMqhgMw3mI3c3ijIJPZV5O0sj/mVxI=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||
@ -739,8 +739,8 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg=
|
||||
github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
|
||||
github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E=
|
||||
github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
|
||||
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
@ -910,8 +910,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/sylabs/sif/v2 v2.8.1 h1:whr4Vz12RXfLnYyVGHoD/rD/hbF2g9OW7BJHa+WIqW8=
|
||||
github.com/sylabs/sif/v2 v2.8.1/go.mod h1:LQOdYXC9a8i7BleTKRw9lohi0rTbXkJOeS9u0ebvgyM=
|
||||
github.com/sylabs/sif/v2 v2.8.3 h1:m5MwWhGOSdf+vuz/Riehbm2L3mcVn0716YduLicFbIE=
|
||||
github.com/sylabs/sif/v2 v2.8.3/go.mod h1:ZK1fytsX0alye/c4p7HEzVcsaKLitheoZgiIH/vCMTc=
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
||||
@ -1017,8 +1017,8 @@ golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.2.0 h1:BRXPfhNivWL5Yq0BGQ39a2sW6t44aODpfxkWjYdzewE=
|
||||
golang.org/x/crypto v0.2.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@ -1109,8 +1109,8 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
5
vendor/github.com/containers/common/libnetwork/netavark/config.go
generated
vendored
5
vendor/github.com/containers/common/libnetwork/netavark/config.go
generated
vendored
@ -116,6 +116,11 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
|
||||
}
|
||||
// rust only support "true" or "false" while go can parse 1 and 0 as well so we need to change it
|
||||
newNetwork.Options[types.IsolateOption] = strconv.FormatBool(val)
|
||||
case types.MetricOption:
|
||||
_, err := strconv.ParseUint(value, 10, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported bridge network option %s", key)
|
||||
}
|
||||
|
1
vendor/github.com/containers/common/libnetwork/types/const.go
generated
vendored
1
vendor/github.com/containers/common/libnetwork/types/const.go
generated
vendored
@ -40,6 +40,7 @@ const (
|
||||
MTUOption = "mtu"
|
||||
ModeOption = "mode"
|
||||
IsolateOption = "isolate"
|
||||
MetricOption = "metric"
|
||||
)
|
||||
|
||||
type NetworkBackend string
|
||||
|
13
vendor/github.com/containers/common/pkg/config/config.go
generated
vendored
13
vendor/github.com/containers/common/pkg/config/config.go
generated
vendored
@ -358,6 +358,9 @@ type EngineConfig struct {
|
||||
// OCIRuntimes are the set of configured OCI runtimes (default is runc).
|
||||
OCIRuntimes map[string][]string `toml:"runtimes,omitempty"`
|
||||
|
||||
// PlatformToOCIRuntime requests specific OCI runtime for a specified platform of image.
|
||||
PlatformToOCIRuntime map[string]string `toml:"platform_to_oci_runtime,omitempty"`
|
||||
|
||||
// PodExitPolicy determines the behaviour when the last container of a pod exits.
|
||||
PodExitPolicy PodExitPolicy `toml:"pod_exit_policy,omitempty"`
|
||||
|
||||
@ -619,6 +622,16 @@ type Destination struct {
|
||||
IsMachine bool `toml:"is_machine,omitempty"`
|
||||
}
|
||||
|
||||
// Consumes container image's os and arch and returns if any dedicated runtime was
|
||||
// configured otherwise returns default runtime.
|
||||
func (c *EngineConfig) ImagePlatformToRuntime(os string, arch string) string {
|
||||
platformString := os + "/" + arch
|
||||
if val, ok := c.PlatformToOCIRuntime[platformString]; ok {
|
||||
return val
|
||||
}
|
||||
return c.OCIRuntime
|
||||
}
|
||||
|
||||
// NewConfig creates a new Config. It starts with an empty config and, if
|
||||
// specified, merges the config at `userConfigPath` path. Depending if we're
|
||||
// running as root or rootless, we then merge the system configuration followed
|
||||
|
4
vendor/github.com/containers/common/pkg/config/config_darwin.go
generated
vendored
4
vendor/github.com/containers/common/pkg/config/config_darwin.go
generated
vendored
@ -10,6 +10,10 @@ const (
|
||||
|
||||
// DefaultContainersConfig holds the default containers config path
|
||||
DefaultContainersConfig = "/usr/share/" + _configPath
|
||||
|
||||
// DefaultSignaturePolicyPath is the default value for the
|
||||
// policy.json file.
|
||||
DefaultSignaturePolicyPath = "/etc/containers/policy.json"
|
||||
)
|
||||
|
||||
// podman remote clients on darwin cannot use unshare.isRootless() to determine the configuration file locations.
|
||||
|
4
vendor/github.com/containers/common/pkg/config/config_freebsd.go
generated
vendored
4
vendor/github.com/containers/common/pkg/config/config_freebsd.go
generated
vendored
@ -10,6 +10,10 @@ const (
|
||||
|
||||
// DefaultContainersConfig holds the default containers config path
|
||||
DefaultContainersConfig = "/usr/local/share/" + _configPath
|
||||
|
||||
// DefaultSignaturePolicyPath is the default value for the
|
||||
// policy.json file.
|
||||
DefaultSignaturePolicyPath = "/usr/local/etc/containers/policy.json"
|
||||
)
|
||||
|
||||
// podman remote clients on freebsd cannot use unshare.isRootless() to determine the configuration file locations.
|
||||
|
4
vendor/github.com/containers/common/pkg/config/config_linux.go
generated
vendored
4
vendor/github.com/containers/common/pkg/config/config_linux.go
generated
vendored
@ -13,6 +13,10 @@ const (
|
||||
|
||||
// DefaultContainersConfig holds the default containers config path
|
||||
DefaultContainersConfig = "/usr/share/" + _configPath
|
||||
|
||||
// DefaultSignaturePolicyPath is the default value for the
|
||||
// policy.json file.
|
||||
DefaultSignaturePolicyPath = "/etc/containers/policy.json"
|
||||
)
|
||||
|
||||
func selinuxEnabled() bool {
|
||||
|
4
vendor/github.com/containers/common/pkg/config/config_windows.go
generated
vendored
4
vendor/github.com/containers/common/pkg/config/config_windows.go
generated
vendored
@ -8,6 +8,10 @@ const (
|
||||
|
||||
// DefaultContainersConfig holds the default containers config path
|
||||
DefaultContainersConfig = "/usr/share/" + _configPath
|
||||
|
||||
// DefaultSignaturePolicyPath is the default value for the
|
||||
// policy.json file.
|
||||
DefaultSignaturePolicyPath = "/etc/containers/policy.json"
|
||||
)
|
||||
|
||||
// podman remote clients on windows cannot use unshare.isRootless() to determine the configuration file locations.
|
||||
|
5
vendor/github.com/containers/common/pkg/config/containers.conf
generated
vendored
5
vendor/github.com/containers/common/pkg/config/containers.conf
generated
vendored
@ -263,6 +263,11 @@ default_sysctls = [
|
||||
# If it is empty or commented out, no volumes will be added
|
||||
#
|
||||
#volumes = []
|
||||
#
|
||||
#[engine.platform_to_oci_runtime]
|
||||
#"wasi/wasm" = ["crun-wasm"]
|
||||
#"wasi/wasm32" = ["crun-wasm"]
|
||||
#"wasi/wasm64" = ["crun-wasm"]
|
||||
|
||||
[secrets]
|
||||
#driver = "file"
|
||||
|
17
vendor/github.com/containers/common/pkg/config/default.go
generated
vendored
17
vendor/github.com/containers/common/pkg/config/default.go
generated
vendored
@ -149,9 +149,6 @@ const (
|
||||
DefaultPidsLimit = 2048
|
||||
// DefaultPullPolicy pulls the image if it does not exist locally.
|
||||
DefaultPullPolicy = "missing"
|
||||
// DefaultSignaturePolicyPath is the default value for the
|
||||
// policy.json file.
|
||||
DefaultSignaturePolicyPath = "/etc/containers/policy.json"
|
||||
// DefaultSubnet is the subnet that will be used for the default
|
||||
// network.
|
||||
DefaultSubnet = "10.88.0.0/16"
|
||||
@ -332,6 +329,15 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
|
||||
"/bin/crun",
|
||||
"/run/current-system/sw/bin/crun",
|
||||
},
|
||||
"crun-wasm": {
|
||||
"/usr/bin/crun-wasm",
|
||||
"/usr/sbin/crun-wasm",
|
||||
"/usr/local/bin/crun-wasm",
|
||||
"/usr/local/sbin/crun-wasm",
|
||||
"/sbin/crun-wasm",
|
||||
"/bin/crun-wasm",
|
||||
"/run/current-system/sw/bin/crun-wasm",
|
||||
},
|
||||
"runc": {
|
||||
"/usr/bin/runc",
|
||||
"/usr/sbin/runc",
|
||||
@ -378,6 +384,11 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
|
||||
"/usr/local/bin/ocijail",
|
||||
},
|
||||
}
|
||||
c.PlatformToOCIRuntime = map[string]string{
|
||||
"wasi/wasm": "crun-wasm",
|
||||
"wasi/wasm32": "crun-wasm",
|
||||
"wasi/wasm64": "crun-wasm",
|
||||
}
|
||||
// Needs to be called after populating c.OCIRuntimes.
|
||||
c.OCIRuntime = c.findRuntime()
|
||||
|
||||
|
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@ -1 +1 @@
|
||||
1.43.1-dev
|
||||
1.44.1-dev
|
||||
|
75
vendor/github.com/containers/storage/containers.go
generated
vendored
75
vendor/github.com/containers/storage/containers.go
generated
vendored
@ -191,7 +191,7 @@ func (r *containerStore) startWritingWithReload(canReload bool) error {
|
||||
}()
|
||||
|
||||
if canReload {
|
||||
if err := r.reloadIfChanged(true); err != nil {
|
||||
if _, err := r.reloadIfChanged(true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -215,18 +215,41 @@ func (r *containerStore) stopWriting() {
|
||||
// If this succeeds, the caller MUST call stopReading().
|
||||
func (r *containerStore) startReading() error {
|
||||
r.lockfile.RLock()
|
||||
succeeded := false
|
||||
unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
r.lockfile.Unlock()
|
||||
if unlockFn != nil {
|
||||
unlockFn()
|
||||
}
|
||||
}()
|
||||
|
||||
if err := r.reloadIfChanged(false); err != nil {
|
||||
return err
|
||||
if tryLockedForWriting, err := r.reloadIfChanged(false); err != nil {
|
||||
if !tryLockedForWriting {
|
||||
return err
|
||||
}
|
||||
unlockFn()
|
||||
unlockFn = nil
|
||||
|
||||
r.lockfile.Lock()
|
||||
unlockFn = r.lockfile.Unlock
|
||||
if _, err := r.load(true); err != nil {
|
||||
return err
|
||||
}
|
||||
unlockFn()
|
||||
unlockFn = nil
|
||||
|
||||
r.lockfile.RLock()
|
||||
unlockFn = r.lockfile.Unlock
|
||||
// We need to check for a reload reload once more because the on-disk state could have been modified
|
||||
// after we released the lock.
|
||||
// If that, _again_, finds inconsistent state, just give up.
|
||||
// We could, plausibly, retry a few times, but that inconsistent state (duplicate container names)
|
||||
// shouldn’t be saved (by correct implementations) in the first place.
|
||||
if _, err := r.reloadIfChanged(false); err != nil {
|
||||
return fmt.Errorf("(even after successfully cleaning up once:) %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
unlockFn = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -239,15 +262,23 @@ func (r *containerStore) stopReading() {
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *containerStore) reloadIfChanged(lockedForWriting bool) error {
|
||||
//
|
||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||
// load() with lockedForWriting could succeed. In that case the caller MUST
|
||||
// call load(), not reloadIfChanged() (because the “if changed” state will not
|
||||
// be detected again).
|
||||
func (r *containerStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
|
||||
r.loadMut.Lock()
|
||||
defer r.loadMut.Unlock()
|
||||
|
||||
modified, err := r.lockfile.Modified()
|
||||
if err == nil && modified {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if modified {
|
||||
return r.load(lockedForWriting)
|
||||
}
|
||||
return err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *containerStore) Containers() ([]Container, error) {
|
||||
@ -274,24 +305,27 @@ func (r *containerStore) datapath(id, key string) string {
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *containerStore) load(lockedForWriting bool) error {
|
||||
needSave := false
|
||||
//
|
||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||
// retrying with lockedForWriting could succeed.
|
||||
func (r *containerStore) load(lockedForWriting bool) (bool, error) {
|
||||
rpath := r.containerspath()
|
||||
data, err := os.ReadFile(rpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
containers := []*Container{}
|
||||
if len(data) != 0 {
|
||||
if err := json.Unmarshal(data, &containers); err != nil {
|
||||
return fmt.Errorf("loading %q: %w", rpath, err)
|
||||
return false, fmt.Errorf("loading %q: %w", rpath, err)
|
||||
}
|
||||
}
|
||||
idlist := make([]string, 0, len(containers))
|
||||
layers := make(map[string]*Container)
|
||||
ids := make(map[string]*Container)
|
||||
names := make(map[string]*Container)
|
||||
var errorToResolveBySaving error // == nil
|
||||
for n, container := range containers {
|
||||
idlist = append(idlist, container.ID)
|
||||
ids[container.ID] = containers[n]
|
||||
@ -299,7 +333,7 @@ func (r *containerStore) load(lockedForWriting bool) error {
|
||||
for _, name := range container.Names {
|
||||
if conflict, ok := names[name]; ok {
|
||||
r.removeName(conflict, name)
|
||||
needSave = true
|
||||
errorToResolveBySaving = errors.New("container store is inconsistent and the current caller does not hold a write lock")
|
||||
}
|
||||
names[name] = containers[n]
|
||||
}
|
||||
@ -310,14 +344,13 @@ func (r *containerStore) load(lockedForWriting bool) error {
|
||||
r.byid = ids
|
||||
r.bylayer = layers
|
||||
r.byname = names
|
||||
if needSave {
|
||||
if errorToResolveBySaving != nil {
|
||||
if !lockedForWriting {
|
||||
// Eventually, the callers should be modified to retry with a write lock, instead.
|
||||
return errors.New("container store is inconsistent and the current caller does not hold a write lock")
|
||||
return true, errorToResolveBySaving
|
||||
}
|
||||
return r.Save()
|
||||
return false, r.Save()
|
||||
}
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Save saves the contents of the store to disk. It should be called with
|
||||
@ -358,7 +391,7 @@ func newContainerStore(dir string) (rwContainerStore, error) {
|
||||
return nil, err
|
||||
}
|
||||
defer cstore.stopWriting()
|
||||
if err := cstore.load(true); err != nil {
|
||||
if _, err := cstore.load(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cstore, nil
|
||||
|
56
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
56
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
@ -51,6 +51,7 @@ struct fsxattr {
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
@ -78,6 +79,7 @@ type Control struct {
|
||||
backingFsBlockDev string
|
||||
nextProjectID uint32
|
||||
quotas map[string]uint32
|
||||
basePath string
|
||||
}
|
||||
|
||||
// Attempt to generate a unigue projectid. Multiple directories
|
||||
@ -158,20 +160,22 @@ func NewControl(basePath string) (*Control, error) {
|
||||
Size: 0,
|
||||
Inodes: 0,
|
||||
}
|
||||
if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
q := Control{
|
||||
backingFsBlockDev: backingFsBlockDev,
|
||||
nextProjectID: minProjectID + 1,
|
||||
quotas: make(map[string]uint32),
|
||||
basePath: basePath,
|
||||
}
|
||||
|
||||
if err := q.setProjectQuota(minProjectID, quota); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//
|
||||
// get first project id to be used for next container
|
||||
//
|
||||
err = q.findNextProjectID(basePath)
|
||||
err = q.findNextProjectID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -204,11 +208,11 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error {
|
||||
// set the quota limit for the container's project id
|
||||
//
|
||||
logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID)
|
||||
return setProjectQuota(q.backingFsBlockDev, projectID, quota)
|
||||
return q.setProjectQuota(projectID, quota)
|
||||
}
|
||||
|
||||
// setProjectQuota - set the quota for project id on xfs block device
|
||||
func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error {
|
||||
func (q *Control) setProjectQuota(projectID uint32, quota Quota) error {
|
||||
var d C.fs_disk_quota_t
|
||||
d.d_version = C.FS_DQUOT_VERSION
|
||||
d.d_id = C.__u32(projectID)
|
||||
@ -225,15 +229,35 @@ func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) er
|
||||
d.d_ino_softlimit = d.d_ino_hardlimit
|
||||
}
|
||||
|
||||
var cs = C.CString(backingFsBlockDev)
|
||||
var cs = C.CString(q.backingFsBlockDev)
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
|
||||
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
|
||||
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
|
||||
uintptr(unsafe.Pointer(&d)), 0, 0)
|
||||
if errno != 0 {
|
||||
runQuotactl := func() syscall.Errno {
|
||||
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
|
||||
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
|
||||
uintptr(unsafe.Pointer(&d)), 0, 0)
|
||||
return errno
|
||||
}
|
||||
|
||||
errno := runQuotactl()
|
||||
|
||||
// If the backingFsBlockDev does not exist any more then try to recreate it.
|
||||
if errors.Is(errno, unix.ENOENT) {
|
||||
if _, err := makeBackingFsDev(q.basePath); err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed to recreate missing backingFsBlockDev %s for projid %d: %w",
|
||||
q.backingFsBlockDev, projectID, err,
|
||||
)
|
||||
}
|
||||
|
||||
if errno := runQuotactl(); errno != 0 {
|
||||
return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w",
|
||||
projectID, q.backingFsBlockDev, errno)
|
||||
}
|
||||
|
||||
} else if errno != 0 {
|
||||
return fmt.Errorf("failed to set quota limit for projid %d on %s: %w",
|
||||
projectID, backingFsBlockDev, errno)
|
||||
projectID, q.backingFsBlockDev, errno)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -332,16 +356,16 @@ func setProjectID(targetPath string, projectID uint32) error {
|
||||
|
||||
// findNextProjectID - find the next project id to be used for containers
|
||||
// by scanning driver home directory to find used project ids
|
||||
func (q *Control) findNextProjectID(home string) error {
|
||||
files, err := os.ReadDir(home)
|
||||
func (q *Control) findNextProjectID() error {
|
||||
files, err := os.ReadDir(q.basePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read directory failed : %s", home)
|
||||
return fmt.Errorf("read directory failed : %s", q.basePath)
|
||||
}
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
continue
|
||||
}
|
||||
path := filepath.Join(home, file.Name())
|
||||
path := filepath.Join(q.basePath, file.Name())
|
||||
projid, err := getProjectID(path)
|
||||
if err != nil {
|
||||
return err
|
||||
|
86
vendor/github.com/containers/storage/images.go
generated
vendored
86
vendor/github.com/containers/storage/images.go
generated
vendored
@ -208,7 +208,7 @@ func (r *imageStore) startWritingWithReload(canReload bool) error {
|
||||
}()
|
||||
|
||||
if canReload {
|
||||
if err := r.reloadIfChanged(true); err != nil {
|
||||
if _, err := r.reloadIfChanged(true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -235,20 +235,43 @@ func (r *imageStore) stopWriting() {
|
||||
// should use startReading() instead.
|
||||
func (r *imageStore) startReadingWithReload(canReload bool) error {
|
||||
r.lockfile.RLock()
|
||||
succeeded := false
|
||||
unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
r.lockfile.Unlock()
|
||||
if unlockFn != nil {
|
||||
unlockFn()
|
||||
}
|
||||
}()
|
||||
|
||||
if canReload {
|
||||
if err := r.reloadIfChanged(false); err != nil {
|
||||
return err
|
||||
if tryLockedForWriting, err := r.reloadIfChanged(false); err != nil {
|
||||
if !tryLockedForWriting {
|
||||
return err
|
||||
}
|
||||
unlockFn()
|
||||
unlockFn = nil
|
||||
|
||||
r.lockfile.Lock()
|
||||
unlockFn = r.lockfile.Unlock
|
||||
if _, err := r.load(true); err != nil {
|
||||
return err
|
||||
}
|
||||
unlockFn()
|
||||
unlockFn = nil
|
||||
|
||||
r.lockfile.RLock()
|
||||
unlockFn = r.lockfile.Unlock
|
||||
// We need to check for a reload reload once more because the on-disk state could have been modified
|
||||
// after we released the lock.
|
||||
// If that, _again_, finds inconsistent state, just give up.
|
||||
// We could, plausibly, retry a few times, but that inconsistent state (duplicate image names)
|
||||
// shouldn’t be saved (by correct implementations) in the first place.
|
||||
if _, err := r.reloadIfChanged(false); err != nil {
|
||||
return fmt.Errorf("(even after successfully cleaning up once:) %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
unlockFn = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -267,15 +290,23 @@ func (r *imageStore) stopReading() {
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *imageStore) reloadIfChanged(lockedForWriting bool) error {
|
||||
//
|
||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||
// retrying with lockedForWriting could succeed. In that case the caller MUST
|
||||
// call load(), not reloadIfChanged() (because the “if changed” state will not
|
||||
// be detected again).
|
||||
func (r *imageStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
|
||||
r.loadMut.Lock()
|
||||
defer r.loadMut.Unlock()
|
||||
|
||||
modified, err := r.lockfile.Modified()
|
||||
if err == nil && modified {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if modified {
|
||||
return r.load(lockedForWriting)
|
||||
}
|
||||
return err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *imageStore) Images() ([]Image, error) {
|
||||
@ -342,36 +373,39 @@ func (i *Image) recomputeDigests() error {
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *imageStore) load(lockedForWriting bool) error {
|
||||
shouldSave := false
|
||||
//
|
||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||
// retrying with lockedForWriting could succeed.
|
||||
func (r *imageStore) load(lockedForWriting bool) (bool, error) {
|
||||
rpath := r.imagespath()
|
||||
data, err := os.ReadFile(rpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
images := []*Image{}
|
||||
if len(data) != 0 {
|
||||
if err := json.Unmarshal(data, &images); err != nil {
|
||||
return fmt.Errorf("loading %q: %w", rpath, err)
|
||||
return false, fmt.Errorf("loading %q: %w", rpath, err)
|
||||
}
|
||||
}
|
||||
idlist := make([]string, 0, len(images))
|
||||
ids := make(map[string]*Image)
|
||||
names := make(map[string]*Image)
|
||||
digests := make(map[digest.Digest][]*Image)
|
||||
var errorToResolveBySaving error // == nil
|
||||
for n, image := range images {
|
||||
ids[image.ID] = images[n]
|
||||
idlist = append(idlist, image.ID)
|
||||
for _, name := range image.Names {
|
||||
if conflict, ok := names[name]; ok {
|
||||
r.removeName(conflict, name)
|
||||
shouldSave = true
|
||||
errorToResolveBySaving = ErrDuplicateImageNames
|
||||
}
|
||||
}
|
||||
// Compute the digest list.
|
||||
if err := image.recomputeDigests(); err != nil {
|
||||
return fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err)
|
||||
return false, fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err)
|
||||
}
|
||||
for _, name := range image.Names {
|
||||
names[name] = image
|
||||
@ -383,19 +417,23 @@ func (r *imageStore) load(lockedForWriting bool) error {
|
||||
image.ReadOnly = !r.lockfile.IsReadWrite()
|
||||
}
|
||||
|
||||
if shouldSave && (!r.lockfile.IsReadWrite() || !lockedForWriting) {
|
||||
// Eventually, the callers should be modified to retry with a write lock if IsReadWrite && !lockedForWriting, instead.
|
||||
return ErrDuplicateImageNames
|
||||
if errorToResolveBySaving != nil {
|
||||
if !r.lockfile.IsReadWrite() {
|
||||
return false, errorToResolveBySaving
|
||||
}
|
||||
if !lockedForWriting {
|
||||
return true, errorToResolveBySaving
|
||||
}
|
||||
}
|
||||
r.images = images
|
||||
r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store.
|
||||
r.byid = ids
|
||||
r.byname = names
|
||||
r.bydigest = digests
|
||||
if shouldSave {
|
||||
return r.Save()
|
||||
if errorToResolveBySaving != nil {
|
||||
return false, r.Save()
|
||||
}
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Save saves the contents of the store to disk. It should be called with
|
||||
@ -439,7 +477,7 @@ func newImageStore(dir string) (rwImageStore, error) {
|
||||
return nil, err
|
||||
}
|
||||
defer istore.stopWriting()
|
||||
if err := istore.load(true); err != nil {
|
||||
if _, err := istore.load(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &istore, nil
|
||||
@ -462,7 +500,7 @@ func newROImageStore(dir string) (roImageStore, error) {
|
||||
return nil, err
|
||||
}
|
||||
defer istore.stopReading()
|
||||
if err := istore.load(false); err != nil {
|
||||
if _, err := istore.load(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &istore, nil
|
||||
|
142
vendor/github.com/containers/storage/layers.go
generated
vendored
142
vendor/github.com/containers/storage/layers.go
generated
vendored
@ -36,6 +36,10 @@ import (
|
||||
const (
|
||||
tarSplitSuffix = ".tar-split.gz"
|
||||
incompleteFlag = "incomplete"
|
||||
// maxLayerStoreCleanupIterations is the number of times we try to clean up inconsistent layer store state
|
||||
// in readers (which, for implementation reasons, gives other writers the opportunity to create more inconsistent state)
|
||||
// until we just give up.
|
||||
maxLayerStoreCleanupIterations = 3
|
||||
)
|
||||
|
||||
// A Layer is a record of a copy-on-write layer that's stored by the lower
|
||||
@ -331,7 +335,7 @@ func (r *layerStore) startWritingWithReload(canReload bool) error {
|
||||
}()
|
||||
|
||||
if canReload {
|
||||
if err := r.reloadIfChanged(true); err != nil {
|
||||
if _, err := r.reloadIfChanged(true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -358,20 +362,46 @@ func (r *layerStore) stopWriting() {
|
||||
// should use startReading() instead.
|
||||
func (r *layerStore) startReadingWithReload(canReload bool) error {
|
||||
r.lockfile.RLock()
|
||||
succeeded := false
|
||||
unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
r.lockfile.Unlock()
|
||||
if unlockFn != nil {
|
||||
unlockFn()
|
||||
}
|
||||
}()
|
||||
|
||||
if canReload {
|
||||
if err := r.reloadIfChanged(false); err != nil {
|
||||
return err
|
||||
cleanupsDone := 0
|
||||
for {
|
||||
tryLockedForWriting, err := r.reloadIfChanged(false)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if !tryLockedForWriting {
|
||||
return err
|
||||
}
|
||||
if cleanupsDone >= maxLayerStoreCleanupIterations {
|
||||
return fmt.Errorf("(even after %d cleanup attempts:) %w", cleanupsDone, err)
|
||||
}
|
||||
unlockFn()
|
||||
unlockFn = nil
|
||||
|
||||
r.lockfile.Lock()
|
||||
unlockFn = r.lockfile.Unlock
|
||||
if _, err := r.load(true); err != nil {
|
||||
return err
|
||||
}
|
||||
unlockFn()
|
||||
unlockFn = nil
|
||||
|
||||
r.lockfile.RLock()
|
||||
unlockFn = r.lockfile.Unlock
|
||||
// We need to check for a reload reload again because the on-disk state could have been modified
|
||||
// after we released the lock.
|
||||
cleanupsDone++
|
||||
}
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
unlockFn = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -424,15 +454,23 @@ func (r *layerStore) Modified() (bool, error) {
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *layerStore) reloadIfChanged(lockedForWriting bool) error {
|
||||
//
|
||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||
// retrying with lockedForWriting could succeed. In that case the caller MUST
|
||||
// call load(), not reloadIfChanged() (because the “if changed” state will not
|
||||
// be detected again).
|
||||
func (r *layerStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
|
||||
r.loadMut.Lock()
|
||||
defer r.loadMut.Unlock()
|
||||
|
||||
modified, err := r.Modified()
|
||||
if err == nil && modified {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if modified {
|
||||
return r.load(lockedForWriting)
|
||||
}
|
||||
return err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *layerStore) Layers() ([]Layer, error) {
|
||||
@ -455,26 +493,28 @@ func (r *layerStore) layerspath() string {
|
||||
//
|
||||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||
// if it is held for writing.
|
||||
func (r *layerStore) load(lockedForWriting bool) error {
|
||||
shouldSave := false
|
||||
//
|
||||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||||
// retrying with lockedForWriting could succeed.
|
||||
func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
||||
rpath := r.layerspath()
|
||||
info, err := os.Stat(rpath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
r.layerspathModified = info.ModTime()
|
||||
}
|
||||
data, err := os.ReadFile(rpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
layers := []*Layer{}
|
||||
if len(data) != 0 {
|
||||
if err := json.Unmarshal(data, &layers); err != nil {
|
||||
return fmt.Errorf("loading %q: %w", rpath, err)
|
||||
return false, fmt.Errorf("loading %q: %w", rpath, err)
|
||||
}
|
||||
}
|
||||
idlist := make([]string, 0, len(layers))
|
||||
@ -482,6 +522,7 @@ func (r *layerStore) load(lockedForWriting bool) error {
|
||||
names := make(map[string]*Layer)
|
||||
compressedsums := make(map[digest.Digest][]string)
|
||||
uncompressedsums := make(map[digest.Digest][]string)
|
||||
var errorToResolveBySaving error // == nil; if there are multiple errors, this is one of them.
|
||||
if r.lockfile.IsReadWrite() {
|
||||
selinux.ClearLabels()
|
||||
}
|
||||
@ -491,7 +532,7 @@ func (r *layerStore) load(lockedForWriting bool) error {
|
||||
for _, name := range layer.Names {
|
||||
if conflict, ok := names[name]; ok {
|
||||
r.removeName(conflict, name)
|
||||
shouldSave = true
|
||||
errorToResolveBySaving = ErrDuplicateLayerNames
|
||||
}
|
||||
names[name] = layers[n]
|
||||
}
|
||||
@ -505,11 +546,22 @@ func (r *layerStore) load(lockedForWriting bool) error {
|
||||
selinux.ReserveLabel(layer.MountLabel)
|
||||
}
|
||||
layer.ReadOnly = !r.lockfile.IsReadWrite()
|
||||
// The r.lockfile.IsReadWrite() condition maintains past practice:
|
||||
// Incomplete layers in a read-only store are not treated as a reason to refuse to use other layers from that store
|
||||
// (OTOH creating child layers on top would probably lead to problems?).
|
||||
// We do remove incomplete layers in read-write stores so that we don’t build on top of them.
|
||||
if layerHasIncompleteFlag(layer) && r.lockfile.IsReadWrite() {
|
||||
errorToResolveBySaving = errors.New("an incomplete layer exists and can't be cleaned up")
|
||||
}
|
||||
}
|
||||
|
||||
if shouldSave && (!r.lockfile.IsReadWrite() || !lockedForWriting) {
|
||||
// Eventually, the callers should be modified to retry with a write lock if IsReadWrite && !lockedForWriting, instead.
|
||||
return ErrDuplicateLayerNames
|
||||
if errorToResolveBySaving != nil {
|
||||
if !r.lockfile.IsReadWrite() {
|
||||
return false, errorToResolveBySaving
|
||||
}
|
||||
if !lockedForWriting {
|
||||
return true, errorToResolveBySaving
|
||||
}
|
||||
}
|
||||
r.layers = layers
|
||||
r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store.
|
||||
@ -523,42 +575,42 @@ func (r *layerStore) load(lockedForWriting bool) error {
|
||||
r.mountsLockfile.RLock()
|
||||
defer r.mountsLockfile.Unlock()
|
||||
if err := r.loadMounts(); err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// Last step: as we’re writable, try to remove anything that a previous
|
||||
if errorToResolveBySaving != nil {
|
||||
if !r.lockfile.IsReadWrite() {
|
||||
return false, fmt.Errorf("internal error: layerStore.load has shouldSave but !r.lockfile.IsReadWrite")
|
||||
}
|
||||
// Last step: try to remove anything that a previous
|
||||
// user of this storage area marked for deletion but didn't manage to
|
||||
// actually delete.
|
||||
var incompleteDeletionErrors error // = nil
|
||||
if lockedForWriting {
|
||||
for _, layer := range r.layers {
|
||||
if layer.Flags == nil {
|
||||
layer.Flags = make(map[string]interface{})
|
||||
}
|
||||
if layerHasIncompleteFlag(layer) {
|
||||
logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
|
||||
err = r.deleteInternal(layer.ID)
|
||||
if err != nil {
|
||||
// Don't return the error immediately, because deleteInternal does not saveLayers();
|
||||
// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
|
||||
// deleted incomplete layers have their metadata correctly removed.
|
||||
incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
|
||||
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
|
||||
}
|
||||
shouldSave = true
|
||||
for _, layer := range r.layers {
|
||||
if layer.Flags == nil {
|
||||
layer.Flags = make(map[string]interface{})
|
||||
}
|
||||
if layerHasIncompleteFlag(layer) {
|
||||
logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
|
||||
err = r.deleteInternal(layer.ID)
|
||||
if err != nil {
|
||||
// Don't return the error immediately, because deleteInternal does not saveLayers();
|
||||
// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
|
||||
// deleted incomplete layers have their metadata correctly removed.
|
||||
incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
|
||||
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
if shouldSave {
|
||||
if err := r.saveLayers(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := r.saveLayers(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if incompleteDeletionErrors != nil {
|
||||
return incompleteDeletionErrors
|
||||
return false, incompleteDeletionErrors
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *layerStore) loadMounts() error {
|
||||
@ -689,7 +741,7 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri
|
||||
return nil, err
|
||||
}
|
||||
defer rlstore.stopWriting()
|
||||
if err := rlstore.load(true); err != nil {
|
||||
if _, err := rlstore.load(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &rlstore, nil
|
||||
@ -714,7 +766,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL
|
||||
return nil, err
|
||||
}
|
||||
defer rlstore.stopReading()
|
||||
if err := rlstore.load(false); err != nil {
|
||||
if _, err := rlstore.load(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &rlstore, nil
|
||||
|
17
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go
generated
vendored
Normal file
17
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
package kernel
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// Utsname represents the system name structure.
|
||||
// It is passthrough for unix.Utsname in order to make it portable with
|
||||
// other platforms where it is not available.
|
||||
type Utsname unix.Utsname
|
||||
|
||||
func uname() (*unix.Utsname, error) {
|
||||
uts := &unix.Utsname{}
|
||||
|
||||
if err := unix.Uname(uts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return uts, nil
|
||||
}
|
9
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go
generated
vendored
9
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go
generated
vendored
@ -1,13 +1,14 @@
|
||||
//go:build freebsd || openbsd
|
||||
// +build freebsd openbsd
|
||||
//go:build openbsd
|
||||
// +build openbsd
|
||||
|
||||
package kernel
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// A stub called by kernel_unix.go .
|
||||
func uname() (*Utsname, error) {
|
||||
return nil, errors.New("Kernel version detection is available only on linux")
|
||||
return nil, fmt.Errorf("Kernel version detection is not available on %s", runtime.GOOS)
|
||||
}
|
||||
|
4
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
//go:build !linux && !solaris
|
||||
// +build !linux,!solaris
|
||||
//go:build !linux && !solaris && !freebsd
|
||||
// +build !linux,!solaris,!freebsd
|
||||
|
||||
package kernel
|
||||
|
||||
|
12
vendor/github.com/onsi/gomega/CHANGELOG.md
generated
vendored
12
vendor/github.com/onsi/gomega/CHANGELOG.md
generated
vendored
@ -1,3 +1,15 @@
|
||||
## 1.24.1
|
||||
|
||||
### Fixes
|
||||
- maintain backward compatibility for Eventually and Consisntetly's signatures [4c7df5e]
|
||||
- fix small typo (#601) [ea0ebe6]
|
||||
|
||||
### Maintenance
|
||||
- Bump golang.org/x/net from 0.1.0 to 0.2.0 (#603) [1ba8372]
|
||||
- Bump github.com/onsi/ginkgo/v2 from 2.4.0 to 2.5.0 (#602) [f9426cb]
|
||||
- fix label-filter in test.yml [d795db6]
|
||||
- stop running flakey tests and rely on external network dependencies in CI [7133290]
|
||||
|
||||
## 1.24.0
|
||||
|
||||
### Features
|
||||
|
5
vendor/github.com/onsi/gomega/gexec/session.go
generated
vendored
5
vendor/github.com/onsi/gomega/gexec/session.go
generated
vendored
@ -121,7 +121,6 @@ To assert that the command has exited it is more convenient to use the Exit matc
|
||||
|
||||
When the process exits because it has received a particular signal, the exit code will be 128+signal-value
|
||||
(See http://www.tldp.org/LDP/abs/html/exitcodes.html and http://man7.org/linux/man-pages/man7/signal.7.html)
|
||||
|
||||
*/
|
||||
func (s *Session) ExitCode() int {
|
||||
s.lock.Lock()
|
||||
@ -142,9 +141,7 @@ will wait for the command to exit then return the entirety of Out's contents.
|
||||
Wait uses eventually under the hood and accepts the same timeout/polling intervals that eventually does.
|
||||
*/
|
||||
func (s *Session) Wait(timeout ...interface{}) *Session {
|
||||
args := []any{s}
|
||||
args = append(args, timeout...)
|
||||
EventuallyWithOffset(1, args...).Should(Exit())
|
||||
EventuallyWithOffset(1, s, timeout...).Should(Exit())
|
||||
return s
|
||||
}
|
||||
|
||||
|
18
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
18
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
const GOMEGA_VERSION = "1.24.0"
|
||||
const GOMEGA_VERSION = "1.24.1"
|
||||
|
||||
const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
|
||||
If you're using Ginkgo then you probably forgot to put your assertion in an It().
|
||||
@ -368,9 +368,9 @@ is equivalent to
|
||||
|
||||
Eventually(...).WithTimeout(time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...)
|
||||
*/
|
||||
func Eventually(args ...interface{}) AsyncAssertion {
|
||||
func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
|
||||
ensureDefaultGomegaIsConfigured()
|
||||
return Default.Eventually(args...)
|
||||
return Default.Eventually(actualOrCtx, args...)
|
||||
}
|
||||
|
||||
// EventuallyWithOffset operates like Eventually but takes an additional
|
||||
@ -382,9 +382,9 @@ func Eventually(args ...interface{}) AsyncAssertion {
|
||||
// `EventuallyWithOffset` specifying a timeout interval (and an optional polling interval) are
|
||||
// the same as `Eventually(...).WithOffset(...).WithTimeout` or
|
||||
// `Eventually(...).WithOffset(...).WithTimeout(...).WithPolling`.
|
||||
func EventuallyWithOffset(offset int, args ...interface{}) AsyncAssertion {
|
||||
func EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
|
||||
ensureDefaultGomegaIsConfigured()
|
||||
return Default.EventuallyWithOffset(offset, args...)
|
||||
return Default.EventuallyWithOffset(offset, actualOrCtx, args...)
|
||||
}
|
||||
|
||||
/*
|
||||
@ -402,9 +402,9 @@ Consistently is useful in cases where you want to assert that something *does no
|
||||
|
||||
This will block for 200 milliseconds and repeatedly check the channel and ensure nothing has been received.
|
||||
*/
|
||||
func Consistently(args ...interface{}) AsyncAssertion {
|
||||
func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
|
||||
ensureDefaultGomegaIsConfigured()
|
||||
return Default.Consistently(args...)
|
||||
return Default.Consistently(actualOrCtx, args...)
|
||||
}
|
||||
|
||||
// ConsistentlyWithOffset operates like Consistently but takes an additional
|
||||
@ -413,9 +413,9 @@ func Consistently(args ...interface{}) AsyncAssertion {
|
||||
//
|
||||
// `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and
|
||||
// optional `WithTimeout` and `WithPolling`.
|
||||
func ConsistentlyWithOffset(offset int, args ...interface{}) AsyncAssertion {
|
||||
func ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
|
||||
ensureDefaultGomegaIsConfigured()
|
||||
return Default.ConsistentlyWithOffset(offset, args...)
|
||||
return Default.ConsistentlyWithOffset(offset, actualOrCtx, args...)
|
||||
}
|
||||
|
||||
/*
|
||||
|
37
vendor/github.com/onsi/gomega/internal/gomega.go
generated
vendored
37
vendor/github.com/onsi/gomega/internal/gomega.go
generated
vendored
@ -2,7 +2,6 @@ package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega/types"
|
||||
@ -53,42 +52,38 @@ func (g *Gomega) ExpectWithOffset(offset int, actual interface{}, extra ...inter
|
||||
return NewAssertion(actual, g, offset, extra...)
|
||||
}
|
||||
|
||||
func (g *Gomega) Eventually(args ...interface{}) types.AsyncAssertion {
|
||||
return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, args...)
|
||||
func (g *Gomega) Eventually(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
|
||||
return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, actualOrCtx, args...)
|
||||
}
|
||||
|
||||
func (g *Gomega) EventuallyWithOffset(offset int, args ...interface{}) types.AsyncAssertion {
|
||||
return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, args...)
|
||||
func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
|
||||
return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, actualOrCtx, args...)
|
||||
}
|
||||
|
||||
func (g *Gomega) Consistently(args ...interface{}) types.AsyncAssertion {
|
||||
return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, args...)
|
||||
func (g *Gomega) Consistently(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
|
||||
return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, actualOrCtx, args...)
|
||||
}
|
||||
|
||||
func (g *Gomega) ConsistentlyWithOffset(offset int, args ...interface{}) types.AsyncAssertion {
|
||||
return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, args...)
|
||||
func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
|
||||
return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, actualOrCtx, args...)
|
||||
}
|
||||
|
||||
func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, args ...interface{}) types.AsyncAssertion {
|
||||
func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
|
||||
baseOffset := 3
|
||||
timeoutInterval := -time.Duration(1)
|
||||
pollingInterval := -time.Duration(1)
|
||||
intervals := []interface{}{}
|
||||
var ctx context.Context
|
||||
if len(args) == 0 {
|
||||
g.Fail(fmt.Sprintf("Call to %s is missing a value or function to poll", asyncAssertionType), offset+baseOffset)
|
||||
return nil
|
||||
}
|
||||
|
||||
actual := args[0]
|
||||
startingIndex := 1
|
||||
if _, isCtx := args[0].(context.Context); isCtx && len(args) > 1 {
|
||||
actual := actualOrCtx
|
||||
startingIndex := 0
|
||||
if _, isCtx := actualOrCtx.(context.Context); isCtx && len(args) > 0 {
|
||||
// the first argument is a context, we should accept it as the context _only if_ it is **not** the only argumnent **and** the second argument is not a parseable duration
|
||||
// this is due to an unfortunate ambiguity in early version of Gomega in which multi-type durations are allowed after the actual
|
||||
if _, err := toDuration(args[1]); err != nil {
|
||||
ctx = args[0].(context.Context)
|
||||
actual = args[1]
|
||||
startingIndex = 2
|
||||
if _, err := toDuration(args[0]); err != nil {
|
||||
ctx = actualOrCtx.(context.Context)
|
||||
actual = args[0]
|
||||
startingIndex = 1
|
||||
}
|
||||
}
|
||||
|
||||
|
8
vendor/github.com/onsi/gomega/types/types.go
generated
vendored
8
vendor/github.com/onsi/gomega/types/types.go
generated
vendored
@ -19,11 +19,11 @@ type Gomega interface {
|
||||
Expect(actual interface{}, extra ...interface{}) Assertion
|
||||
ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion
|
||||
|
||||
Eventually(args ...interface{}) AsyncAssertion
|
||||
EventuallyWithOffset(offset int, args ...interface{}) AsyncAssertion
|
||||
Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion
|
||||
EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion
|
||||
|
||||
Consistently(args ...interface{}) AsyncAssertion
|
||||
ConsistentlyWithOffset(offset int, args ...interface{}) AsyncAssertion
|
||||
Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion
|
||||
ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion
|
||||
|
||||
SetDefaultEventuallyTimeout(time.Duration)
|
||||
SetDefaultEventuallyPollingInterval(time.Duration)
|
||||
|
4
vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
generated
vendored
4
vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved.
|
||||
// Copyright (c) 2018-2022, Sylabs Inc. All rights reserved.
|
||||
// Copyright (c) 2017, SingularityWare, LLC. All rights reserved.
|
||||
// Copyright (c) 2017, Yannick Cote <yhcote@gmail.com> All rights reserved.
|
||||
// This software is licensed under a 3-clause BSD license. Please consult the
|
||||
@ -104,7 +104,7 @@ func (f *FileImage) writeDescriptors() error {
|
||||
return binary.Write(f.rw, binary.LittleEndian, f.rds)
|
||||
}
|
||||
|
||||
// writeHeader writes the the global header in f to backing storage.
|
||||
// writeHeader writes the global header in f to backing storage.
|
||||
func (f *FileImage) writeHeader() error {
|
||||
if _, err := f.rw.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
|
5
vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go
generated
vendored
5
vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go
generated
vendored
@ -214,6 +214,11 @@ func (d Descriptor) SignatureMetadata() (ht crypto.Hash, fp []byte, err error) {
|
||||
}
|
||||
|
||||
fp = make([]byte, 20)
|
||||
|
||||
if bytes.Equal(s.Entity[:len(fp)], fp) {
|
||||
return ht, nil, nil // Fingerprint not present.
|
||||
}
|
||||
|
||||
copy(fp, s.Entity[:])
|
||||
|
||||
return ht, fp, nil
|
||||
|
11
vendor/golang.org/x/crypto/cast5/cast5.go
generated
vendored
11
vendor/golang.org/x/crypto/cast5/cast5.go
generated
vendored
@ -13,7 +13,10 @@
|
||||
// golang.org/x/crypto/chacha20poly1305).
|
||||
package cast5 // import "golang.org/x/crypto/cast5"
|
||||
|
||||
import "errors"
|
||||
import (
|
||||
"errors"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const BlockSize = 8
|
||||
const KeySize = 16
|
||||
@ -241,19 +244,19 @@ func (c *Cipher) keySchedule(in []byte) {
|
||||
// These are the three 'f' functions. See RFC 2144, section 2.2.
|
||||
func f1(d, m uint32, r uint8) uint32 {
|
||||
t := m + d
|
||||
I := (t << r) | (t >> (32 - r))
|
||||
I := bits.RotateLeft32(t, int(r))
|
||||
return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff]
|
||||
}
|
||||
|
||||
func f2(d, m uint32, r uint8) uint32 {
|
||||
t := m ^ d
|
||||
I := (t << r) | (t >> (32 - r))
|
||||
I := bits.RotateLeft32(t, int(r))
|
||||
return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff]
|
||||
}
|
||||
|
||||
func f3(d, m uint32, r uint8) uint32 {
|
||||
t := m - d
|
||||
I := (t << r) | (t >> (32 - r))
|
||||
I := bits.RotateLeft32(t, int(r))
|
||||
return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff]
|
||||
}
|
||||
|
||||
|
66
vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
generated
vendored
66
vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
generated
vendored
@ -5,6 +5,8 @@
|
||||
// Package salsa provides low-level access to functions in the Salsa family.
|
||||
package salsa // import "golang.org/x/crypto/salsa20/salsa"
|
||||
|
||||
import "math/bits"
|
||||
|
||||
// Sigma is the Salsa20 constant for 256-bit keys.
|
||||
var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'}
|
||||
|
||||
@ -31,76 +33,76 @@ func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
|
||||
|
||||
for i := 0; i < 20; i += 2 {
|
||||
u := x0 + x12
|
||||
x4 ^= u<<7 | u>>(32-7)
|
||||
x4 ^= bits.RotateLeft32(u, 7)
|
||||
u = x4 + x0
|
||||
x8 ^= u<<9 | u>>(32-9)
|
||||
x8 ^= bits.RotateLeft32(u, 9)
|
||||
u = x8 + x4
|
||||
x12 ^= u<<13 | u>>(32-13)
|
||||
x12 ^= bits.RotateLeft32(u, 13)
|
||||
u = x12 + x8
|
||||
x0 ^= u<<18 | u>>(32-18)
|
||||
x0 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x5 + x1
|
||||
x9 ^= u<<7 | u>>(32-7)
|
||||
x9 ^= bits.RotateLeft32(u, 7)
|
||||
u = x9 + x5
|
||||
x13 ^= u<<9 | u>>(32-9)
|
||||
x13 ^= bits.RotateLeft32(u, 9)
|
||||
u = x13 + x9
|
||||
x1 ^= u<<13 | u>>(32-13)
|
||||
x1 ^= bits.RotateLeft32(u, 13)
|
||||
u = x1 + x13
|
||||
x5 ^= u<<18 | u>>(32-18)
|
||||
x5 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x10 + x6
|
||||
x14 ^= u<<7 | u>>(32-7)
|
||||
x14 ^= bits.RotateLeft32(u, 7)
|
||||
u = x14 + x10
|
||||
x2 ^= u<<9 | u>>(32-9)
|
||||
x2 ^= bits.RotateLeft32(u, 9)
|
||||
u = x2 + x14
|
||||
x6 ^= u<<13 | u>>(32-13)
|
||||
x6 ^= bits.RotateLeft32(u, 13)
|
||||
u = x6 + x2
|
||||
x10 ^= u<<18 | u>>(32-18)
|
||||
x10 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x15 + x11
|
||||
x3 ^= u<<7 | u>>(32-7)
|
||||
x3 ^= bits.RotateLeft32(u, 7)
|
||||
u = x3 + x15
|
||||
x7 ^= u<<9 | u>>(32-9)
|
||||
x7 ^= bits.RotateLeft32(u, 9)
|
||||
u = x7 + x3
|
||||
x11 ^= u<<13 | u>>(32-13)
|
||||
x11 ^= bits.RotateLeft32(u, 13)
|
||||
u = x11 + x7
|
||||
x15 ^= u<<18 | u>>(32-18)
|
||||
x15 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x0 + x3
|
||||
x1 ^= u<<7 | u>>(32-7)
|
||||
x1 ^= bits.RotateLeft32(u, 7)
|
||||
u = x1 + x0
|
||||
x2 ^= u<<9 | u>>(32-9)
|
||||
x2 ^= bits.RotateLeft32(u, 9)
|
||||
u = x2 + x1
|
||||
x3 ^= u<<13 | u>>(32-13)
|
||||
x3 ^= bits.RotateLeft32(u, 13)
|
||||
u = x3 + x2
|
||||
x0 ^= u<<18 | u>>(32-18)
|
||||
x0 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x5 + x4
|
||||
x6 ^= u<<7 | u>>(32-7)
|
||||
x6 ^= bits.RotateLeft32(u, 7)
|
||||
u = x6 + x5
|
||||
x7 ^= u<<9 | u>>(32-9)
|
||||
x7 ^= bits.RotateLeft32(u, 9)
|
||||
u = x7 + x6
|
||||
x4 ^= u<<13 | u>>(32-13)
|
||||
x4 ^= bits.RotateLeft32(u, 13)
|
||||
u = x4 + x7
|
||||
x5 ^= u<<18 | u>>(32-18)
|
||||
x5 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x10 + x9
|
||||
x11 ^= u<<7 | u>>(32-7)
|
||||
x11 ^= bits.RotateLeft32(u, 7)
|
||||
u = x11 + x10
|
||||
x8 ^= u<<9 | u>>(32-9)
|
||||
x8 ^= bits.RotateLeft32(u, 9)
|
||||
u = x8 + x11
|
||||
x9 ^= u<<13 | u>>(32-13)
|
||||
x9 ^= bits.RotateLeft32(u, 13)
|
||||
u = x9 + x8
|
||||
x10 ^= u<<18 | u>>(32-18)
|
||||
x10 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x15 + x14
|
||||
x12 ^= u<<7 | u>>(32-7)
|
||||
x12 ^= bits.RotateLeft32(u, 7)
|
||||
u = x12 + x15
|
||||
x13 ^= u<<9 | u>>(32-9)
|
||||
x13 ^= bits.RotateLeft32(u, 9)
|
||||
u = x13 + x12
|
||||
x14 ^= u<<13 | u>>(32-13)
|
||||
x14 ^= bits.RotateLeft32(u, 13)
|
||||
u = x14 + x13
|
||||
x15 ^= u<<18 | u>>(32-18)
|
||||
x15 ^= bits.RotateLeft32(u, 18)
|
||||
}
|
||||
out[0] = byte(x0)
|
||||
out[1] = byte(x0 >> 8)
|
||||
|
66
vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
generated
vendored
66
vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
generated
vendored
@ -4,6 +4,8 @@
|
||||
|
||||
package salsa
|
||||
|
||||
import "math/bits"
|
||||
|
||||
// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts
|
||||
// the result into the 64-byte array out. The input and output may be the same array.
|
||||
func Core208(out *[64]byte, in *[64]byte) {
|
||||
@ -29,76 +31,76 @@ func Core208(out *[64]byte, in *[64]byte) {
|
||||
|
||||
for i := 0; i < 8; i += 2 {
|
||||
u := x0 + x12
|
||||
x4 ^= u<<7 | u>>(32-7)
|
||||
x4 ^= bits.RotateLeft32(u, 7)
|
||||
u = x4 + x0
|
||||
x8 ^= u<<9 | u>>(32-9)
|
||||
x8 ^= bits.RotateLeft32(u, 9)
|
||||
u = x8 + x4
|
||||
x12 ^= u<<13 | u>>(32-13)
|
||||
x12 ^= bits.RotateLeft32(u, 13)
|
||||
u = x12 + x8
|
||||
x0 ^= u<<18 | u>>(32-18)
|
||||
x0 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x5 + x1
|
||||
x9 ^= u<<7 | u>>(32-7)
|
||||
x9 ^= bits.RotateLeft32(u, 7)
|
||||
u = x9 + x5
|
||||
x13 ^= u<<9 | u>>(32-9)
|
||||
x13 ^= bits.RotateLeft32(u, 9)
|
||||
u = x13 + x9
|
||||
x1 ^= u<<13 | u>>(32-13)
|
||||
x1 ^= bits.RotateLeft32(u, 13)
|
||||
u = x1 + x13
|
||||
x5 ^= u<<18 | u>>(32-18)
|
||||
x5 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x10 + x6
|
||||
x14 ^= u<<7 | u>>(32-7)
|
||||
x14 ^= bits.RotateLeft32(u, 7)
|
||||
u = x14 + x10
|
||||
x2 ^= u<<9 | u>>(32-9)
|
||||
x2 ^= bits.RotateLeft32(u, 9)
|
||||
u = x2 + x14
|
||||
x6 ^= u<<13 | u>>(32-13)
|
||||
x6 ^= bits.RotateLeft32(u, 13)
|
||||
u = x6 + x2
|
||||
x10 ^= u<<18 | u>>(32-18)
|
||||
x10 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x15 + x11
|
||||
x3 ^= u<<7 | u>>(32-7)
|
||||
x3 ^= bits.RotateLeft32(u, 7)
|
||||
u = x3 + x15
|
||||
x7 ^= u<<9 | u>>(32-9)
|
||||
x7 ^= bits.RotateLeft32(u, 9)
|
||||
u = x7 + x3
|
||||
x11 ^= u<<13 | u>>(32-13)
|
||||
x11 ^= bits.RotateLeft32(u, 13)
|
||||
u = x11 + x7
|
||||
x15 ^= u<<18 | u>>(32-18)
|
||||
x15 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x0 + x3
|
||||
x1 ^= u<<7 | u>>(32-7)
|
||||
x1 ^= bits.RotateLeft32(u, 7)
|
||||
u = x1 + x0
|
||||
x2 ^= u<<9 | u>>(32-9)
|
||||
x2 ^= bits.RotateLeft32(u, 9)
|
||||
u = x2 + x1
|
||||
x3 ^= u<<13 | u>>(32-13)
|
||||
x3 ^= bits.RotateLeft32(u, 13)
|
||||
u = x3 + x2
|
||||
x0 ^= u<<18 | u>>(32-18)
|
||||
x0 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x5 + x4
|
||||
x6 ^= u<<7 | u>>(32-7)
|
||||
x6 ^= bits.RotateLeft32(u, 7)
|
||||
u = x6 + x5
|
||||
x7 ^= u<<9 | u>>(32-9)
|
||||
x7 ^= bits.RotateLeft32(u, 9)
|
||||
u = x7 + x6
|
||||
x4 ^= u<<13 | u>>(32-13)
|
||||
x4 ^= bits.RotateLeft32(u, 13)
|
||||
u = x4 + x7
|
||||
x5 ^= u<<18 | u>>(32-18)
|
||||
x5 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x10 + x9
|
||||
x11 ^= u<<7 | u>>(32-7)
|
||||
x11 ^= bits.RotateLeft32(u, 7)
|
||||
u = x11 + x10
|
||||
x8 ^= u<<9 | u>>(32-9)
|
||||
x8 ^= bits.RotateLeft32(u, 9)
|
||||
u = x8 + x11
|
||||
x9 ^= u<<13 | u>>(32-13)
|
||||
x9 ^= bits.RotateLeft32(u, 13)
|
||||
u = x9 + x8
|
||||
x10 ^= u<<18 | u>>(32-18)
|
||||
x10 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x15 + x14
|
||||
x12 ^= u<<7 | u>>(32-7)
|
||||
x12 ^= bits.RotateLeft32(u, 7)
|
||||
u = x12 + x15
|
||||
x13 ^= u<<9 | u>>(32-9)
|
||||
x13 ^= bits.RotateLeft32(u, 9)
|
||||
u = x13 + x12
|
||||
x14 ^= u<<13 | u>>(32-13)
|
||||
x14 ^= bits.RotateLeft32(u, 13)
|
||||
u = x14 + x13
|
||||
x15 ^= u<<18 | u>>(32-18)
|
||||
x15 ^= bits.RotateLeft32(u, 18)
|
||||
}
|
||||
x0 += j0
|
||||
x1 += j1
|
||||
|
66
vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
generated
vendored
66
vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
generated
vendored
@ -4,6 +4,8 @@
|
||||
|
||||
package salsa
|
||||
|
||||
import "math/bits"
|
||||
|
||||
const rounds = 20
|
||||
|
||||
// core applies the Salsa20 core function to 16-byte input in, 32-byte key k,
|
||||
@ -31,76 +33,76 @@ func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
|
||||
|
||||
for i := 0; i < rounds; i += 2 {
|
||||
u := x0 + x12
|
||||
x4 ^= u<<7 | u>>(32-7)
|
||||
x4 ^= bits.RotateLeft32(u, 7)
|
||||
u = x4 + x0
|
||||
x8 ^= u<<9 | u>>(32-9)
|
||||
x8 ^= bits.RotateLeft32(u, 9)
|
||||
u = x8 + x4
|
||||
x12 ^= u<<13 | u>>(32-13)
|
||||
x12 ^= bits.RotateLeft32(u, 13)
|
||||
u = x12 + x8
|
||||
x0 ^= u<<18 | u>>(32-18)
|
||||
x0 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x5 + x1
|
||||
x9 ^= u<<7 | u>>(32-7)
|
||||
x9 ^= bits.RotateLeft32(u, 7)
|
||||
u = x9 + x5
|
||||
x13 ^= u<<9 | u>>(32-9)
|
||||
x13 ^= bits.RotateLeft32(u, 9)
|
||||
u = x13 + x9
|
||||
x1 ^= u<<13 | u>>(32-13)
|
||||
x1 ^= bits.RotateLeft32(u, 13)
|
||||
u = x1 + x13
|
||||
x5 ^= u<<18 | u>>(32-18)
|
||||
x5 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x10 + x6
|
||||
x14 ^= u<<7 | u>>(32-7)
|
||||
x14 ^= bits.RotateLeft32(u, 7)
|
||||
u = x14 + x10
|
||||
x2 ^= u<<9 | u>>(32-9)
|
||||
x2 ^= bits.RotateLeft32(u, 9)
|
||||
u = x2 + x14
|
||||
x6 ^= u<<13 | u>>(32-13)
|
||||
x6 ^= bits.RotateLeft32(u, 13)
|
||||
u = x6 + x2
|
||||
x10 ^= u<<18 | u>>(32-18)
|
||||
x10 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x15 + x11
|
||||
x3 ^= u<<7 | u>>(32-7)
|
||||
x3 ^= bits.RotateLeft32(u, 7)
|
||||
u = x3 + x15
|
||||
x7 ^= u<<9 | u>>(32-9)
|
||||
x7 ^= bits.RotateLeft32(u, 9)
|
||||
u = x7 + x3
|
||||
x11 ^= u<<13 | u>>(32-13)
|
||||
x11 ^= bits.RotateLeft32(u, 13)
|
||||
u = x11 + x7
|
||||
x15 ^= u<<18 | u>>(32-18)
|
||||
x15 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x0 + x3
|
||||
x1 ^= u<<7 | u>>(32-7)
|
||||
x1 ^= bits.RotateLeft32(u, 7)
|
||||
u = x1 + x0
|
||||
x2 ^= u<<9 | u>>(32-9)
|
||||
x2 ^= bits.RotateLeft32(u, 9)
|
||||
u = x2 + x1
|
||||
x3 ^= u<<13 | u>>(32-13)
|
||||
x3 ^= bits.RotateLeft32(u, 13)
|
||||
u = x3 + x2
|
||||
x0 ^= u<<18 | u>>(32-18)
|
||||
x0 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x5 + x4
|
||||
x6 ^= u<<7 | u>>(32-7)
|
||||
x6 ^= bits.RotateLeft32(u, 7)
|
||||
u = x6 + x5
|
||||
x7 ^= u<<9 | u>>(32-9)
|
||||
x7 ^= bits.RotateLeft32(u, 9)
|
||||
u = x7 + x6
|
||||
x4 ^= u<<13 | u>>(32-13)
|
||||
x4 ^= bits.RotateLeft32(u, 13)
|
||||
u = x4 + x7
|
||||
x5 ^= u<<18 | u>>(32-18)
|
||||
x5 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x10 + x9
|
||||
x11 ^= u<<7 | u>>(32-7)
|
||||
x11 ^= bits.RotateLeft32(u, 7)
|
||||
u = x11 + x10
|
||||
x8 ^= u<<9 | u>>(32-9)
|
||||
x8 ^= bits.RotateLeft32(u, 9)
|
||||
u = x8 + x11
|
||||
x9 ^= u<<13 | u>>(32-13)
|
||||
x9 ^= bits.RotateLeft32(u, 13)
|
||||
u = x9 + x8
|
||||
x10 ^= u<<18 | u>>(32-18)
|
||||
x10 ^= bits.RotateLeft32(u, 18)
|
||||
|
||||
u = x15 + x14
|
||||
x12 ^= u<<7 | u>>(32-7)
|
||||
x12 ^= bits.RotateLeft32(u, 7)
|
||||
u = x12 + x15
|
||||
x13 ^= u<<9 | u>>(32-9)
|
||||
x13 ^= bits.RotateLeft32(u, 9)
|
||||
u = x13 + x12
|
||||
x14 ^= u<<13 | u>>(32-13)
|
||||
x14 ^= bits.RotateLeft32(u, 13)
|
||||
u = x14 + x13
|
||||
x15 ^= u<<18 | u>>(32-18)
|
||||
x15 ^= bits.RotateLeft32(u, 18)
|
||||
}
|
||||
x0 += j0
|
||||
x1 += j1
|
||||
|
194
vendor/golang.org/x/crypto/sha3/keccakf.go
generated
vendored
194
vendor/golang.org/x/crypto/sha3/keccakf.go
generated
vendored
@ -7,6 +7,8 @@
|
||||
|
||||
package sha3
|
||||
|
||||
import "math/bits"
|
||||
|
||||
// rc stores the round constants for use in the ι step.
|
||||
var rc = [24]uint64{
|
||||
0x0000000000000001,
|
||||
@ -60,13 +62,13 @@ func keccakF1600(a *[25]uint64) {
|
||||
|
||||
bc0 = a[0] ^ d0
|
||||
t = a[6] ^ d1
|
||||
bc1 = t<<44 | t>>(64-44)
|
||||
bc1 = bits.RotateLeft64(t, 44)
|
||||
t = a[12] ^ d2
|
||||
bc2 = t<<43 | t>>(64-43)
|
||||
bc2 = bits.RotateLeft64(t, 43)
|
||||
t = a[18] ^ d3
|
||||
bc3 = t<<21 | t>>(64-21)
|
||||
bc3 = bits.RotateLeft64(t, 21)
|
||||
t = a[24] ^ d4
|
||||
bc4 = t<<14 | t>>(64-14)
|
||||
bc4 = bits.RotateLeft64(t, 14)
|
||||
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
|
||||
a[6] = bc1 ^ (bc3 &^ bc2)
|
||||
a[12] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -74,15 +76,15 @@ func keccakF1600(a *[25]uint64) {
|
||||
a[24] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[10] ^ d0
|
||||
bc2 = t<<3 | t>>(64-3)
|
||||
bc2 = bits.RotateLeft64(t, 3)
|
||||
t = a[16] ^ d1
|
||||
bc3 = t<<45 | t>>(64-45)
|
||||
bc3 = bits.RotateLeft64(t, 45)
|
||||
t = a[22] ^ d2
|
||||
bc4 = t<<61 | t>>(64-61)
|
||||
bc4 = bits.RotateLeft64(t, 61)
|
||||
t = a[3] ^ d3
|
||||
bc0 = t<<28 | t>>(64-28)
|
||||
bc0 = bits.RotateLeft64(t, 28)
|
||||
t = a[9] ^ d4
|
||||
bc1 = t<<20 | t>>(64-20)
|
||||
bc1 = bits.RotateLeft64(t, 20)
|
||||
a[10] = bc0 ^ (bc2 &^ bc1)
|
||||
a[16] = bc1 ^ (bc3 &^ bc2)
|
||||
a[22] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -90,15 +92,15 @@ func keccakF1600(a *[25]uint64) {
|
||||
a[9] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[20] ^ d0
|
||||
bc4 = t<<18 | t>>(64-18)
|
||||
bc4 = bits.RotateLeft64(t, 18)
|
||||
t = a[1] ^ d1
|
||||
bc0 = t<<1 | t>>(64-1)
|
||||
bc0 = bits.RotateLeft64(t, 1)
|
||||
t = a[7] ^ d2
|
||||
bc1 = t<<6 | t>>(64-6)
|
||||
bc1 = bits.RotateLeft64(t, 6)
|
||||
t = a[13] ^ d3
|
||||
bc2 = t<<25 | t>>(64-25)
|
||||
bc2 = bits.RotateLeft64(t, 25)
|
||||
t = a[19] ^ d4
|
||||
bc3 = t<<8 | t>>(64-8)
|
||||
bc3 = bits.RotateLeft64(t, 8)
|
||||
a[20] = bc0 ^ (bc2 &^ bc1)
|
||||
a[1] = bc1 ^ (bc3 &^ bc2)
|
||||
a[7] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -106,15 +108,15 @@ func keccakF1600(a *[25]uint64) {
|
||||
a[19] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[5] ^ d0
|
||||
bc1 = t<<36 | t>>(64-36)
|
||||
bc1 = bits.RotateLeft64(t, 36)
|
||||
t = a[11] ^ d1
|
||||
bc2 = t<<10 | t>>(64-10)
|
||||
bc2 = bits.RotateLeft64(t, 10)
|
||||
t = a[17] ^ d2
|
||||
bc3 = t<<15 | t>>(64-15)
|
||||
bc3 = bits.RotateLeft64(t, 15)
|
||||
t = a[23] ^ d3
|
||||
bc4 = t<<56 | t>>(64-56)
|
||||
bc4 = bits.RotateLeft64(t, 56)
|
||||
t = a[4] ^ d4
|
||||
bc0 = t<<27 | t>>(64-27)
|
||||
bc0 = bits.RotateLeft64(t, 27)
|
||||
a[5] = bc0 ^ (bc2 &^ bc1)
|
||||
a[11] = bc1 ^ (bc3 &^ bc2)
|
||||
a[17] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -122,15 +124,15 @@ func keccakF1600(a *[25]uint64) {
|
||||
a[4] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[15] ^ d0
|
||||
bc3 = t<<41 | t>>(64-41)
|
||||
bc3 = bits.RotateLeft64(t, 41)
|
||||
t = a[21] ^ d1
|
||||
bc4 = t<<2 | t>>(64-2)
|
||||
bc4 = bits.RotateLeft64(t, 2)
|
||||
t = a[2] ^ d2
|
||||
bc0 = t<<62 | t>>(64-62)
|
||||
bc0 = bits.RotateLeft64(t, 62)
|
||||
t = a[8] ^ d3
|
||||
bc1 = t<<55 | t>>(64-55)
|
||||
bc1 = bits.RotateLeft64(t, 55)
|
||||
t = a[14] ^ d4
|
||||
bc2 = t<<39 | t>>(64-39)
|
||||
bc2 = bits.RotateLeft64(t, 39)
|
||||
a[15] = bc0 ^ (bc2 &^ bc1)
|
||||
a[21] = bc1 ^ (bc3 &^ bc2)
|
||||
a[2] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -151,13 +153,13 @@ func keccakF1600(a *[25]uint64) {
|
||||
|
||||
bc0 = a[0] ^ d0
|
||||
t = a[16] ^ d1
|
||||
bc1 = t<<44 | t>>(64-44)
|
||||
bc1 = bits.RotateLeft64(t, 44)
|
||||
t = a[7] ^ d2
|
||||
bc2 = t<<43 | t>>(64-43)
|
||||
bc2 = bits.RotateLeft64(t, 43)
|
||||
t = a[23] ^ d3
|
||||
bc3 = t<<21 | t>>(64-21)
|
||||
bc3 = bits.RotateLeft64(t, 21)
|
||||
t = a[14] ^ d4
|
||||
bc4 = t<<14 | t>>(64-14)
|
||||
bc4 = bits.RotateLeft64(t, 14)
|
||||
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
|
||||
a[16] = bc1 ^ (bc3 &^ bc2)
|
||||
a[7] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -165,15 +167,15 @@ func keccakF1600(a *[25]uint64) {
|
||||
a[14] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[20] ^ d0
|
||||
bc2 = t<<3 | t>>(64-3)
|
||||
bc2 = bits.RotateLeft64(t, 3)
|
||||
t = a[11] ^ d1
|
||||
bc3 = t<<45 | t>>(64-45)
|
||||
bc3 = bits.RotateLeft64(t, 45)
|
||||
t = a[2] ^ d2
|
||||
bc4 = t<<61 | t>>(64-61)
|
||||
bc4 = bits.RotateLeft64(t, 61)
|
||||
t = a[18] ^ d3
|
||||
bc0 = t<<28 | t>>(64-28)
|
||||
bc0 = bits.RotateLeft64(t, 28)
|
||||
t = a[9] ^ d4
|
||||
bc1 = t<<20 | t>>(64-20)
|
||||
bc1 = bits.RotateLeft64(t, 20)
|
||||
a[20] = bc0 ^ (bc2 &^ bc1)
|
||||
a[11] = bc1 ^ (bc3 &^ bc2)
|
||||
a[2] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -181,15 +183,15 @@ func keccakF1600(a *[25]uint64) {
|
||||
a[9] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[15] ^ d0
|
||||
bc4 = t<<18 | t>>(64-18)
|
||||
bc4 = bits.RotateLeft64(t, 18)
|
||||
t = a[6] ^ d1
|
||||
bc0 = t<<1 | t>>(64-1)
|
||||
bc0 = bits.RotateLeft64(t, 1)
|
||||
t = a[22] ^ d2
|
||||
bc1 = t<<6 | t>>(64-6)
|
||||
bc1 = bits.RotateLeft64(t, 6)
|
||||
t = a[13] ^ d3
|
||||
bc2 = t<<25 | t>>(64-25)
|
||||
bc2 = bits.RotateLeft64(t, 25)
|
||||
t = a[4] ^ d4
|
||||
bc3 = t<<8 | t>>(64-8)
|
||||
bc3 = bits.RotateLeft64(t, 8)
|
||||
a[15] = bc0 ^ (bc2 &^ bc1)
|
||||
a[6] = bc1 ^ (bc3 &^ bc2)
|
||||
a[22] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -197,15 +199,15 @@ func keccakF1600(a *[25]uint64) {
|
||||
a[4] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[10] ^ d0
|
||||
bc1 = t<<36 | t>>(64-36)
|
||||
bc1 = bits.RotateLeft64(t, 36)
|
||||
t = a[1] ^ d1
|
||||
bc2 = t<<10 | t>>(64-10)
|
||||
bc2 = bits.RotateLeft64(t, 10)
|
||||
t = a[17] ^ d2
|
||||
bc3 = t<<15 | t>>(64-15)
|
||||
bc3 = bits.RotateLeft64(t, 15)
|
||||
t = a[8] ^ d3
|
||||
bc4 = t<<56 | t>>(64-56)
|
||||
bc4 = bits.RotateLeft64(t, 56)
|
||||
t = a[24] ^ d4
|
||||
bc0 = t<<27 | t>>(64-27)
|
||||
bc0 = bits.RotateLeft64(t, 27)
|
||||
a[10] = bc0 ^ (bc2 &^ bc1)
|
||||
a[1] = bc1 ^ (bc3 &^ bc2)
|
||||
a[17] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -213,15 +215,15 @@ func keccakF1600(a *[25]uint64) {
|
||||
a[24] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[5] ^ d0
|
||||
bc3 = t<<41 | t>>(64-41)
|
||||
bc3 = bits.RotateLeft64(t, 41)
|
||||
t = a[21] ^ d1
|
||||
bc4 = t<<2 | t>>(64-2)
|
||||
bc4 = bits.RotateLeft64(t, 2)
|
||||
t = a[12] ^ d2
|
||||
bc0 = t<<62 | t>>(64-62)
|
||||
bc0 = bits.RotateLeft64(t, 62)
|
||||
t = a[3] ^ d3
|
||||
bc1 = t<<55 | t>>(64-55)
|
||||
bc1 = bits.RotateLeft64(t, 55)
|
||||
t = a[19] ^ d4
|
||||
bc2 = t<<39 | t>>(64-39)
|
||||
bc2 = bits.RotateLeft64(t, 39)
|
||||
a[5] = bc0 ^ (bc2 &^ bc1)
|
||||
a[21] = bc1 ^ (bc3 &^ bc2)
|
||||
a[12] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -242,13 +244,13 @@ func keccakF1600(a *[25]uint64) {
|
||||
|
||||
bc0 = a[0] ^ d0
|
||||
t = a[11] ^ d1
|
||||
bc1 = t<<44 | t>>(64-44)
|
||||
bc1 = bits.RotateLeft64(t, 44)
|
||||
t = a[22] ^ d2
|
||||
bc2 = t<<43 | t>>(64-43)
|
||||
bc2 = bits.RotateLeft64(t, 43)
|
||||
t = a[8] ^ d3
|
||||
bc3 = t<<21 | t>>(64-21)
|
||||
bc3 = bits.RotateLeft64(t, 21)
|
||||
t = a[19] ^ d4
|
||||
bc4 = t<<14 | t>>(64-14)
|
||||
bc4 = bits.RotateLeft64(t, 14)
|
||||
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
|
||||
a[11] = bc1 ^ (bc3 &^ bc2)
|
||||
a[22] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -256,15 +258,15 @@ func keccakF1600(a *[25]uint64) {
|
||||
a[19] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[15] ^ d0
|
||||
bc2 = t<<3 | t>>(64-3)
|
||||
bc2 = bits.RotateLeft64(t, 3)
|
||||
t = a[1] ^ d1
|
||||
bc3 = t<<45 | t>>(64-45)
|
||||
bc3 = bits.RotateLeft64(t, 45)
|
||||
t = a[12] ^ d2
|
||||
bc4 = t<<61 | t>>(64-61)
|
||||
bc4 = bits.RotateLeft64(t, 61)
|
||||
t = a[23] ^ d3
|
||||
bc0 = t<<28 | t>>(64-28)
|
||||
bc0 = bits.RotateLeft64(t, 28)
|
||||
t = a[9] ^ d4
|
||||
bc1 = t<<20 | t>>(64-20)
|
||||
bc1 = bits.RotateLeft64(t, 20)
|
||||
a[15] = bc0 ^ (bc2 &^ bc1)
|
||||
a[1] = bc1 ^ (bc3 &^ bc2)
|
||||
a[12] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -272,15 +274,15 @@ func keccakF1600(a *[25]uint64) {
|
||||
a[9] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[5] ^ d0
|
||||
bc4 = t<<18 | t>>(64-18)
|
||||
bc4 = bits.RotateLeft64(t, 18)
|
||||
t = a[16] ^ d1
|
||||
bc0 = t<<1 | t>>(64-1)
|
||||
bc0 = bits.RotateLeft64(t, 1)
|
||||
t = a[2] ^ d2
|
||||
bc1 = t<<6 | t>>(64-6)
|
||||
bc1 = bits.RotateLeft64(t, 6)
|
||||
t = a[13] ^ d3
|
||||
bc2 = t<<25 | t>>(64-25)
|
||||
bc2 = bits.RotateLeft64(t, 25)
|
||||
t = a[24] ^ d4
|
||||
bc3 = t<<8 | t>>(64-8)
|
||||
bc3 = bits.RotateLeft64(t, 8)
|
||||
a[5] = bc0 ^ (bc2 &^ bc1)
|
||||
a[16] = bc1 ^ (bc3 &^ bc2)
|
||||
a[2] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -288,15 +290,15 @@ func keccakF1600(a *[25]uint64) {
|
||||
a[24] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[20] ^ d0
|
||||
bc1 = t<<36 | t>>(64-36)
|
||||
bc1 = bits.RotateLeft64(t, 36)
|
||||
t = a[6] ^ d1
|
||||
bc2 = t<<10 | t>>(64-10)
|
||||
bc2 = bits.RotateLeft64(t, 10)
|
||||
t = a[17] ^ d2
|
||||
bc3 = t<<15 | t>>(64-15)
|
||||
bc3 = bits.RotateLeft64(t, 15)
|
||||
t = a[3] ^ d3
|
||||
bc4 = t<<56 | t>>(64-56)
|
||||
bc4 = bits.RotateLeft64(t, 56)
|
||||
t = a[14] ^ d4
|
||||
bc0 = t<<27 | t>>(64-27)
|
||||
bc0 = bits.RotateLeft64(t, 27)
|
||||
a[20] = bc0 ^ (bc2 &^ bc1)
|
||||
a[6] = bc1 ^ (bc3 &^ bc2)
|
||||
a[17] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -304,15 +306,15 @@ func keccakF1600(a *[25]uint64) {
|
||||
a[14] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[10] ^ d0
|
||||
bc3 = t<<41 | t>>(64-41)
|
||||
bc3 = bits.RotateLeft64(t, 41)
|
||||
t = a[21] ^ d1
|
||||
bc4 = t<<2 | t>>(64-2)
|
||||
bc4 = bits.RotateLeft64(t, 2)
|
||||
t = a[7] ^ d2
|
||||
bc0 = t<<62 | t>>(64-62)
|
||||
bc0 = bits.RotateLeft64(t, 62)
|
||||
t = a[18] ^ d3
|
||||
bc1 = t<<55 | t>>(64-55)
|
||||
bc1 = bits.RotateLeft64(t, 55)
|
||||
t = a[4] ^ d4
|
||||
bc2 = t<<39 | t>>(64-39)
|
||||
bc2 = bits.RotateLeft64(t, 39)
|
||||
a[10] = bc0 ^ (bc2 &^ bc1)
|
||||
a[21] = bc1 ^ (bc3 &^ bc2)
|
||||
a[7] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -333,13 +335,13 @@ func keccakF1600(a *[25]uint64) {
|
||||
|
||||
bc0 = a[0] ^ d0
|
||||
t = a[1] ^ d1
|
||||
bc1 = t<<44 | t>>(64-44)
|
||||
bc1 = bits.RotateLeft64(t, 44)
|
||||
t = a[2] ^ d2
|
||||
bc2 = t<<43 | t>>(64-43)
|
||||
bc2 = bits.RotateLeft64(t, 43)
|
||||
t = a[3] ^ d3
|
||||
bc3 = t<<21 | t>>(64-21)
|
||||
bc3 = bits.RotateLeft64(t, 21)
|
||||
t = a[4] ^ d4
|
||||
bc4 = t<<14 | t>>(64-14)
|
||||
bc4 = bits.RotateLeft64(t, 14)
|
||||
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
|
||||
a[1] = bc1 ^ (bc3 &^ bc2)
|
||||
a[2] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -347,15 +349,15 @@ func keccakF1600(a *[25]uint64) {
|
||||
a[4] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[5] ^ d0
|
||||
bc2 = t<<3 | t>>(64-3)
|
||||
bc2 = bits.RotateLeft64(t, 3)
|
||||
t = a[6] ^ d1
|
||||
bc3 = t<<45 | t>>(64-45)
|
||||
bc3 = bits.RotateLeft64(t, 45)
|
||||
t = a[7] ^ d2
|
||||
bc4 = t<<61 | t>>(64-61)
|
||||
bc4 = bits.RotateLeft64(t, 61)
|
||||
t = a[8] ^ d3
|
||||
bc0 = t<<28 | t>>(64-28)
|
||||
bc0 = bits.RotateLeft64(t, 28)
|
||||
t = a[9] ^ d4
|
||||
bc1 = t<<20 | t>>(64-20)
|
||||
bc1 = bits.RotateLeft64(t, 20)
|
||||
a[5] = bc0 ^ (bc2 &^ bc1)
|
||||
a[6] = bc1 ^ (bc3 &^ bc2)
|
||||
a[7] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -363,15 +365,15 @@ func keccakF1600(a *[25]uint64) {
|
||||
a[9] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[10] ^ d0
|
||||
bc4 = t<<18 | t>>(64-18)
|
||||
bc4 = bits.RotateLeft64(t, 18)
|
||||
t = a[11] ^ d1
|
||||
bc0 = t<<1 | t>>(64-1)
|
||||
bc0 = bits.RotateLeft64(t, 1)
|
||||
t = a[12] ^ d2
|
||||
bc1 = t<<6 | t>>(64-6)
|
||||
bc1 = bits.RotateLeft64(t, 6)
|
||||
t = a[13] ^ d3
|
||||
bc2 = t<<25 | t>>(64-25)
|
||||
bc2 = bits.RotateLeft64(t, 25)
|
||||
t = a[14] ^ d4
|
||||
bc3 = t<<8 | t>>(64-8)
|
||||
bc3 = bits.RotateLeft64(t, 8)
|
||||
a[10] = bc0 ^ (bc2 &^ bc1)
|
||||
a[11] = bc1 ^ (bc3 &^ bc2)
|
||||
a[12] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -379,15 +381,15 @@ func keccakF1600(a *[25]uint64) {
|
||||
a[14] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[15] ^ d0
|
||||
bc1 = t<<36 | t>>(64-36)
|
||||
bc1 = bits.RotateLeft64(t, 36)
|
||||
t = a[16] ^ d1
|
||||
bc2 = t<<10 | t>>(64-10)
|
||||
bc2 = bits.RotateLeft64(t, 10)
|
||||
t = a[17] ^ d2
|
||||
bc3 = t<<15 | t>>(64-15)
|
||||
bc3 = bits.RotateLeft64(t, 15)
|
||||
t = a[18] ^ d3
|
||||
bc4 = t<<56 | t>>(64-56)
|
||||
bc4 = bits.RotateLeft64(t, 56)
|
||||
t = a[19] ^ d4
|
||||
bc0 = t<<27 | t>>(64-27)
|
||||
bc0 = bits.RotateLeft64(t, 27)
|
||||
a[15] = bc0 ^ (bc2 &^ bc1)
|
||||
a[16] = bc1 ^ (bc3 &^ bc2)
|
||||
a[17] = bc2 ^ (bc4 &^ bc3)
|
||||
@ -395,15 +397,15 @@ func keccakF1600(a *[25]uint64) {
|
||||
a[19] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[20] ^ d0
|
||||
bc3 = t<<41 | t>>(64-41)
|
||||
bc3 = bits.RotateLeft64(t, 41)
|
||||
t = a[21] ^ d1
|
||||
bc4 = t<<2 | t>>(64-2)
|
||||
bc4 = bits.RotateLeft64(t, 2)
|
||||
t = a[22] ^ d2
|
||||
bc0 = t<<62 | t>>(64-62)
|
||||
bc0 = bits.RotateLeft64(t, 62)
|
||||
t = a[23] ^ d3
|
||||
bc1 = t<<55 | t>>(64-55)
|
||||
bc1 = bits.RotateLeft64(t, 55)
|
||||
t = a[24] ^ d4
|
||||
bc2 = t<<39 | t>>(64-39)
|
||||
bc2 = bits.RotateLeft64(t, 39)
|
||||
a[20] = bc0 ^ (bc2 &^ bc1)
|
||||
a[21] = bc1 ^ (bc3 &^ bc2)
|
||||
a[22] = bc2 ^ (bc4 &^ bc3)
|
||||
|
2
vendor/golang.org/x/crypto/ssh/messages.go
generated
vendored
2
vendor/golang.org/x/crypto/ssh/messages.go
generated
vendored
@ -68,7 +68,7 @@ type kexInitMsg struct {
|
||||
|
||||
// See RFC 4253, section 8.
|
||||
|
||||
// Diffie-Helman
|
||||
// Diffie-Hellman
|
||||
const msgKexDHInit = 30
|
||||
|
||||
type kexDHInitMsg struct {
|
||||
|
8
vendor/golang.org/x/net/html/token.go
generated
vendored
8
vendor/golang.org/x/net/html/token.go
generated
vendored
@ -605,7 +605,10 @@ func (z *Tokenizer) readComment() {
|
||||
z.data.end = z.data.start
|
||||
}
|
||||
}()
|
||||
for dashCount := 2; ; {
|
||||
|
||||
var dashCount int
|
||||
beginning := true
|
||||
for {
|
||||
c := z.readByte()
|
||||
if z.err != nil {
|
||||
// Ignore up to two dashes at EOF.
|
||||
@ -620,7 +623,7 @@ func (z *Tokenizer) readComment() {
|
||||
dashCount++
|
||||
continue
|
||||
case '>':
|
||||
if dashCount >= 2 {
|
||||
if dashCount >= 2 || beginning {
|
||||
z.data.end = z.raw.end - len("-->")
|
||||
return
|
||||
}
|
||||
@ -638,6 +641,7 @@ func (z *Tokenizer) readComment() {
|
||||
}
|
||||
}
|
||||
dashCount = 0
|
||||
beginning = false
|
||||
}
|
||||
}
|
||||
|
||||
|
18
vendor/golang.org/x/net/http2/headermap.go
generated
vendored
18
vendor/golang.org/x/net/http2/headermap.go
generated
vendored
@ -27,7 +27,14 @@ func buildCommonHeaderMaps() {
|
||||
"accept-language",
|
||||
"accept-ranges",
|
||||
"age",
|
||||
"access-control-allow-credentials",
|
||||
"access-control-allow-headers",
|
||||
"access-control-allow-methods",
|
||||
"access-control-allow-origin",
|
||||
"access-control-expose-headers",
|
||||
"access-control-max-age",
|
||||
"access-control-request-headers",
|
||||
"access-control-request-method",
|
||||
"allow",
|
||||
"authorization",
|
||||
"cache-control",
|
||||
@ -53,6 +60,7 @@ func buildCommonHeaderMaps() {
|
||||
"link",
|
||||
"location",
|
||||
"max-forwards",
|
||||
"origin",
|
||||
"proxy-authenticate",
|
||||
"proxy-authorization",
|
||||
"range",
|
||||
@ -68,6 +76,8 @@ func buildCommonHeaderMaps() {
|
||||
"vary",
|
||||
"via",
|
||||
"www-authenticate",
|
||||
"x-forwarded-for",
|
||||
"x-forwarded-proto",
|
||||
}
|
||||
commonLowerHeader = make(map[string]string, len(common))
|
||||
commonCanonHeader = make(map[string]string, len(common))
|
||||
@ -85,3 +95,11 @@ func lowerHeader(v string) (lower string, ascii bool) {
|
||||
}
|
||||
return asciiToLower(v)
|
||||
}
|
||||
|
||||
func canonicalHeader(v string) string {
|
||||
buildCommonHeaderMapsOnce()
|
||||
if s, ok := commonCanonHeader[v]; ok {
|
||||
return s
|
||||
}
|
||||
return http.CanonicalHeaderKey(v)
|
||||
}
|
||||
|
188
vendor/golang.org/x/net/http2/hpack/static_table.go
generated
vendored
Normal file
188
vendor/golang.org/x/net/http2/hpack/static_table.go
generated
vendored
Normal file
@ -0,0 +1,188 @@
|
||||
// go generate gen.go
|
||||
// Code generated by the command above; DO NOT EDIT.
|
||||
|
||||
package hpack
|
||||
|
||||
var staticTable = &headerFieldTable{
|
||||
evictCount: 0,
|
||||
byName: map[string]uint64{
|
||||
":authority": 1,
|
||||
":method": 3,
|
||||
":path": 5,
|
||||
":scheme": 7,
|
||||
":status": 14,
|
||||
"accept-charset": 15,
|
||||
"accept-encoding": 16,
|
||||
"accept-language": 17,
|
||||
"accept-ranges": 18,
|
||||
"accept": 19,
|
||||
"access-control-allow-origin": 20,
|
||||
"age": 21,
|
||||
"allow": 22,
|
||||
"authorization": 23,
|
||||
"cache-control": 24,
|
||||
"content-disposition": 25,
|
||||
"content-encoding": 26,
|
||||
"content-language": 27,
|
||||
"content-length": 28,
|
||||
"content-location": 29,
|
||||
"content-range": 30,
|
||||
"content-type": 31,
|
||||
"cookie": 32,
|
||||
"date": 33,
|
||||
"etag": 34,
|
||||
"expect": 35,
|
||||
"expires": 36,
|
||||
"from": 37,
|
||||
"host": 38,
|
||||
"if-match": 39,
|
||||
"if-modified-since": 40,
|
||||
"if-none-match": 41,
|
||||
"if-range": 42,
|
||||
"if-unmodified-since": 43,
|
||||
"last-modified": 44,
|
||||
"link": 45,
|
||||
"location": 46,
|
||||
"max-forwards": 47,
|
||||
"proxy-authenticate": 48,
|
||||
"proxy-authorization": 49,
|
||||
"range": 50,
|
||||
"referer": 51,
|
||||
"refresh": 52,
|
||||
"retry-after": 53,
|
||||
"server": 54,
|
||||
"set-cookie": 55,
|
||||
"strict-transport-security": 56,
|
||||
"transfer-encoding": 57,
|
||||
"user-agent": 58,
|
||||
"vary": 59,
|
||||
"via": 60,
|
||||
"www-authenticate": 61,
|
||||
},
|
||||
byNameValue: map[pairNameValue]uint64{
|
||||
{name: ":authority", value: ""}: 1,
|
||||
{name: ":method", value: "GET"}: 2,
|
||||
{name: ":method", value: "POST"}: 3,
|
||||
{name: ":path", value: "/"}: 4,
|
||||
{name: ":path", value: "/index.html"}: 5,
|
||||
{name: ":scheme", value: "http"}: 6,
|
||||
{name: ":scheme", value: "https"}: 7,
|
||||
{name: ":status", value: "200"}: 8,
|
||||
{name: ":status", value: "204"}: 9,
|
||||
{name: ":status", value: "206"}: 10,
|
||||
{name: ":status", value: "304"}: 11,
|
||||
{name: ":status", value: "400"}: 12,
|
||||
{name: ":status", value: "404"}: 13,
|
||||
{name: ":status", value: "500"}: 14,
|
||||
{name: "accept-charset", value: ""}: 15,
|
||||
{name: "accept-encoding", value: "gzip, deflate"}: 16,
|
||||
{name: "accept-language", value: ""}: 17,
|
||||
{name: "accept-ranges", value: ""}: 18,
|
||||
{name: "accept", value: ""}: 19,
|
||||
{name: "access-control-allow-origin", value: ""}: 20,
|
||||
{name: "age", value: ""}: 21,
|
||||
{name: "allow", value: ""}: 22,
|
||||
{name: "authorization", value: ""}: 23,
|
||||
{name: "cache-control", value: ""}: 24,
|
||||
{name: "content-disposition", value: ""}: 25,
|
||||
{name: "content-encoding", value: ""}: 26,
|
||||
{name: "content-language", value: ""}: 27,
|
||||
{name: "content-length", value: ""}: 28,
|
||||
{name: "content-location", value: ""}: 29,
|
||||
{name: "content-range", value: ""}: 30,
|
||||
{name: "content-type", value: ""}: 31,
|
||||
{name: "cookie", value: ""}: 32,
|
||||
{name: "date", value: ""}: 33,
|
||||
{name: "etag", value: ""}: 34,
|
||||
{name: "expect", value: ""}: 35,
|
||||
{name: "expires", value: ""}: 36,
|
||||
{name: "from", value: ""}: 37,
|
||||
{name: "host", value: ""}: 38,
|
||||
{name: "if-match", value: ""}: 39,
|
||||
{name: "if-modified-since", value: ""}: 40,
|
||||
{name: "if-none-match", value: ""}: 41,
|
||||
{name: "if-range", value: ""}: 42,
|
||||
{name: "if-unmodified-since", value: ""}: 43,
|
||||
{name: "last-modified", value: ""}: 44,
|
||||
{name: "link", value: ""}: 45,
|
||||
{name: "location", value: ""}: 46,
|
||||
{name: "max-forwards", value: ""}: 47,
|
||||
{name: "proxy-authenticate", value: ""}: 48,
|
||||
{name: "proxy-authorization", value: ""}: 49,
|
||||
{name: "range", value: ""}: 50,
|
||||
{name: "referer", value: ""}: 51,
|
||||
{name: "refresh", value: ""}: 52,
|
||||
{name: "retry-after", value: ""}: 53,
|
||||
{name: "server", value: ""}: 54,
|
||||
{name: "set-cookie", value: ""}: 55,
|
||||
{name: "strict-transport-security", value: ""}: 56,
|
||||
{name: "transfer-encoding", value: ""}: 57,
|
||||
{name: "user-agent", value: ""}: 58,
|
||||
{name: "vary", value: ""}: 59,
|
||||
{name: "via", value: ""}: 60,
|
||||
{name: "www-authenticate", value: ""}: 61,
|
||||
},
|
||||
ents: []HeaderField{
|
||||
{Name: ":authority", Value: "", Sensitive: false},
|
||||
{Name: ":method", Value: "GET", Sensitive: false},
|
||||
{Name: ":method", Value: "POST", Sensitive: false},
|
||||
{Name: ":path", Value: "/", Sensitive: false},
|
||||
{Name: ":path", Value: "/index.html", Sensitive: false},
|
||||
{Name: ":scheme", Value: "http", Sensitive: false},
|
||||
{Name: ":scheme", Value: "https", Sensitive: false},
|
||||
{Name: ":status", Value: "200", Sensitive: false},
|
||||
{Name: ":status", Value: "204", Sensitive: false},
|
||||
{Name: ":status", Value: "206", Sensitive: false},
|
||||
{Name: ":status", Value: "304", Sensitive: false},
|
||||
{Name: ":status", Value: "400", Sensitive: false},
|
||||
{Name: ":status", Value: "404", Sensitive: false},
|
||||
{Name: ":status", Value: "500", Sensitive: false},
|
||||
{Name: "accept-charset", Value: "", Sensitive: false},
|
||||
{Name: "accept-encoding", Value: "gzip, deflate", Sensitive: false},
|
||||
{Name: "accept-language", Value: "", Sensitive: false},
|
||||
{Name: "accept-ranges", Value: "", Sensitive: false},
|
||||
{Name: "accept", Value: "", Sensitive: false},
|
||||
{Name: "access-control-allow-origin", Value: "", Sensitive: false},
|
||||
{Name: "age", Value: "", Sensitive: false},
|
||||
{Name: "allow", Value: "", Sensitive: false},
|
||||
{Name: "authorization", Value: "", Sensitive: false},
|
||||
{Name: "cache-control", Value: "", Sensitive: false},
|
||||
{Name: "content-disposition", Value: "", Sensitive: false},
|
||||
{Name: "content-encoding", Value: "", Sensitive: false},
|
||||
{Name: "content-language", Value: "", Sensitive: false},
|
||||
{Name: "content-length", Value: "", Sensitive: false},
|
||||
{Name: "content-location", Value: "", Sensitive: false},
|
||||
{Name: "content-range", Value: "", Sensitive: false},
|
||||
{Name: "content-type", Value: "", Sensitive: false},
|
||||
{Name: "cookie", Value: "", Sensitive: false},
|
||||
{Name: "date", Value: "", Sensitive: false},
|
||||
{Name: "etag", Value: "", Sensitive: false},
|
||||
{Name: "expect", Value: "", Sensitive: false},
|
||||
{Name: "expires", Value: "", Sensitive: false},
|
||||
{Name: "from", Value: "", Sensitive: false},
|
||||
{Name: "host", Value: "", Sensitive: false},
|
||||
{Name: "if-match", Value: "", Sensitive: false},
|
||||
{Name: "if-modified-since", Value: "", Sensitive: false},
|
||||
{Name: "if-none-match", Value: "", Sensitive: false},
|
||||
{Name: "if-range", Value: "", Sensitive: false},
|
||||
{Name: "if-unmodified-since", Value: "", Sensitive: false},
|
||||
{Name: "last-modified", Value: "", Sensitive: false},
|
||||
{Name: "link", Value: "", Sensitive: false},
|
||||
{Name: "location", Value: "", Sensitive: false},
|
||||
{Name: "max-forwards", Value: "", Sensitive: false},
|
||||
{Name: "proxy-authenticate", Value: "", Sensitive: false},
|
||||
{Name: "proxy-authorization", Value: "", Sensitive: false},
|
||||
{Name: "range", Value: "", Sensitive: false},
|
||||
{Name: "referer", Value: "", Sensitive: false},
|
||||
{Name: "refresh", Value: "", Sensitive: false},
|
||||
{Name: "retry-after", Value: "", Sensitive: false},
|
||||
{Name: "server", Value: "", Sensitive: false},
|
||||
{Name: "set-cookie", Value: "", Sensitive: false},
|
||||
{Name: "strict-transport-security", Value: "", Sensitive: false},
|
||||
{Name: "transfer-encoding", Value: "", Sensitive: false},
|
||||
{Name: "user-agent", Value: "", Sensitive: false},
|
||||
{Name: "vary", Value: "", Sensitive: false},
|
||||
{Name: "via", Value: "", Sensitive: false},
|
||||
{Name: "www-authenticate", Value: "", Sensitive: false},
|
||||
},
|
||||
}
|
78
vendor/golang.org/x/net/http2/hpack/tables.go
generated
vendored
78
vendor/golang.org/x/net/http2/hpack/tables.go
generated
vendored
@ -96,8 +96,7 @@ func (t *headerFieldTable) evictOldest(n int) {
|
||||
// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic
|
||||
// table, the return value i actually refers to the entry t.ents[t.len()-i].
|
||||
//
|
||||
// All tables are assumed to be a dynamic tables except for the global
|
||||
// staticTable pointer.
|
||||
// All tables are assumed to be a dynamic tables except for the global staticTable.
|
||||
//
|
||||
// See Section 2.3.3.
|
||||
func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||
@ -125,81 +124,6 @@ func (t *headerFieldTable) idToIndex(id uint64) uint64 {
|
||||
return k + 1
|
||||
}
|
||||
|
||||
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
|
||||
var staticTable = newStaticTable()
|
||||
var staticTableEntries = [...]HeaderField{
|
||||
{Name: ":authority"},
|
||||
{Name: ":method", Value: "GET"},
|
||||
{Name: ":method", Value: "POST"},
|
||||
{Name: ":path", Value: "/"},
|
||||
{Name: ":path", Value: "/index.html"},
|
||||
{Name: ":scheme", Value: "http"},
|
||||
{Name: ":scheme", Value: "https"},
|
||||
{Name: ":status", Value: "200"},
|
||||
{Name: ":status", Value: "204"},
|
||||
{Name: ":status", Value: "206"},
|
||||
{Name: ":status", Value: "304"},
|
||||
{Name: ":status", Value: "400"},
|
||||
{Name: ":status", Value: "404"},
|
||||
{Name: ":status", Value: "500"},
|
||||
{Name: "accept-charset"},
|
||||
{Name: "accept-encoding", Value: "gzip, deflate"},
|
||||
{Name: "accept-language"},
|
||||
{Name: "accept-ranges"},
|
||||
{Name: "accept"},
|
||||
{Name: "access-control-allow-origin"},
|
||||
{Name: "age"},
|
||||
{Name: "allow"},
|
||||
{Name: "authorization"},
|
||||
{Name: "cache-control"},
|
||||
{Name: "content-disposition"},
|
||||
{Name: "content-encoding"},
|
||||
{Name: "content-language"},
|
||||
{Name: "content-length"},
|
||||
{Name: "content-location"},
|
||||
{Name: "content-range"},
|
||||
{Name: "content-type"},
|
||||
{Name: "cookie"},
|
||||
{Name: "date"},
|
||||
{Name: "etag"},
|
||||
{Name: "expect"},
|
||||
{Name: "expires"},
|
||||
{Name: "from"},
|
||||
{Name: "host"},
|
||||
{Name: "if-match"},
|
||||
{Name: "if-modified-since"},
|
||||
{Name: "if-none-match"},
|
||||
{Name: "if-range"},
|
||||
{Name: "if-unmodified-since"},
|
||||
{Name: "last-modified"},
|
||||
{Name: "link"},
|
||||
{Name: "location"},
|
||||
{Name: "max-forwards"},
|
||||
{Name: "proxy-authenticate"},
|
||||
{Name: "proxy-authorization"},
|
||||
{Name: "range"},
|
||||
{Name: "referer"},
|
||||
{Name: "refresh"},
|
||||
{Name: "retry-after"},
|
||||
{Name: "server"},
|
||||
{Name: "set-cookie"},
|
||||
{Name: "strict-transport-security"},
|
||||
{Name: "transfer-encoding"},
|
||||
{Name: "user-agent"},
|
||||
{Name: "vary"},
|
||||
{Name: "via"},
|
||||
{Name: "www-authenticate"},
|
||||
}
|
||||
|
||||
func newStaticTable() *headerFieldTable {
|
||||
t := &headerFieldTable{}
|
||||
t.init()
|
||||
for _, e := range staticTableEntries[:] {
|
||||
t.addEntry(e)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
var huffmanCodes = [256]uint32{
|
||||
0x1ff8,
|
||||
0x7fffd8,
|
||||
|
184
vendor/golang.org/x/net/http2/server.go
generated
vendored
184
vendor/golang.org/x/net/http2/server.go
generated
vendored
@ -622,7 +622,9 @@ type stream struct {
|
||||
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
|
||||
gotTrailerHeader bool // HEADER frame for trailers was seen
|
||||
wroteHeaders bool // whether we wrote headers (not status 100)
|
||||
readDeadline *time.Timer // nil if unused
|
||||
writeDeadline *time.Timer // nil if unused
|
||||
closeErr error // set before cw is closed
|
||||
|
||||
trailer http.Header // accumulated trailers
|
||||
reqTrailer http.Header // handler's Request.Trailer
|
||||
@ -869,7 +871,9 @@ func (sc *serverConn) serve() {
|
||||
|
||||
// Each connection starts with initialWindowSize inflow tokens.
|
||||
// If a higher value is configured, we add more tokens.
|
||||
sc.sendWindowUpdate(nil)
|
||||
if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
|
||||
sc.sendWindowUpdate(nil, int(diff))
|
||||
}
|
||||
|
||||
if err := sc.readPreface(); err != nil {
|
||||
sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
|
||||
@ -946,6 +950,8 @@ func (sc *serverConn) serve() {
|
||||
}
|
||||
case *startPushRequest:
|
||||
sc.startPush(v)
|
||||
case func(*serverConn):
|
||||
v(sc)
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected type %T", v))
|
||||
}
|
||||
@ -1459,6 +1465,21 @@ func (sc *serverConn) processFrame(f Frame) error {
|
||||
sc.sawFirstSettings = true
|
||||
}
|
||||
|
||||
// Discard frames for streams initiated after the identified last
|
||||
// stream sent in a GOAWAY, or all frames after sending an error.
|
||||
// We still need to return connection-level flow control for DATA frames.
|
||||
// RFC 9113 Section 6.8.
|
||||
if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) {
|
||||
|
||||
if f, ok := f.(*DataFrame); ok {
|
||||
if sc.inflow.available() < int32(f.Length) {
|
||||
return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl))
|
||||
}
|
||||
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
switch f := f.(type) {
|
||||
case *SettingsFrame:
|
||||
return sc.processSettings(f)
|
||||
@ -1501,9 +1522,6 @@ func (sc *serverConn) processPing(f *PingFrame) error {
|
||||
// PROTOCOL_ERROR."
|
||||
return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol))
|
||||
}
|
||||
if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
|
||||
return nil
|
||||
}
|
||||
sc.writeFrame(FrameWriteRequest{write: writePingAck{f}})
|
||||
return nil
|
||||
}
|
||||
@ -1565,6 +1583,9 @@ func (sc *serverConn) closeStream(st *stream, err error) {
|
||||
panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
|
||||
}
|
||||
st.state = stateClosed
|
||||
if st.readDeadline != nil {
|
||||
st.readDeadline.Stop()
|
||||
}
|
||||
if st.writeDeadline != nil {
|
||||
st.writeDeadline.Stop()
|
||||
}
|
||||
@ -1586,10 +1607,18 @@ func (sc *serverConn) closeStream(st *stream, err error) {
|
||||
if p := st.body; p != nil {
|
||||
// Return any buffered unread bytes worth of conn-level flow control.
|
||||
// See golang.org/issue/16481
|
||||
sc.sendWindowUpdate(nil)
|
||||
sc.sendWindowUpdate(nil, p.Len())
|
||||
|
||||
p.CloseWithError(err)
|
||||
}
|
||||
if e, ok := err.(StreamError); ok {
|
||||
if e.Cause != nil {
|
||||
err = e.Cause
|
||||
} else {
|
||||
err = errStreamClosed
|
||||
}
|
||||
}
|
||||
st.closeErr = err
|
||||
st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
|
||||
sc.writeSched.CloseStream(st.id)
|
||||
}
|
||||
@ -1686,16 +1715,6 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
|
||||
func (sc *serverConn) processData(f *DataFrame) error {
|
||||
sc.serveG.check()
|
||||
id := f.Header().StreamID
|
||||
if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || id > sc.maxClientStreamID) {
|
||||
// Discard all DATA frames if the GOAWAY is due to an
|
||||
// error, or:
|
||||
//
|
||||
// Section 6.8: After sending a GOAWAY frame, the sender
|
||||
// can discard frames for streams initiated by the
|
||||
// receiver with identifiers higher than the identified
|
||||
// last stream.
|
||||
return nil
|
||||
}
|
||||
|
||||
data := f.Data()
|
||||
state, st := sc.state(id)
|
||||
@ -1734,7 +1753,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||
// sendWindowUpdate, which also schedules sending the
|
||||
// frames.
|
||||
sc.inflow.take(int32(f.Length))
|
||||
sc.sendWindowUpdate(nil) // conn-level
|
||||
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
|
||||
|
||||
if st != nil && st.resetQueued {
|
||||
// Already have a stream error in flight. Don't send another.
|
||||
@ -1752,7 +1771,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||
return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
|
||||
}
|
||||
sc.inflow.take(int32(f.Length))
|
||||
sc.sendWindowUpdate(nil) // conn-level
|
||||
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
|
||||
|
||||
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
|
||||
// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
|
||||
@ -1770,7 +1789,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||
if len(data) > 0 {
|
||||
wrote, err := st.body.Write(data)
|
||||
if err != nil {
|
||||
sc.sendWindowUpdate32(nil, int32(f.Length)-int32(wrote))
|
||||
sc.sendWindowUpdate(nil, int(f.Length)-wrote)
|
||||
return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed))
|
||||
}
|
||||
if wrote != len(data) {
|
||||
@ -1838,19 +1857,27 @@ func (st *stream) copyTrailersToHandlerRequest() {
|
||||
}
|
||||
}
|
||||
|
||||
// onReadTimeout is run on its own goroutine (from time.AfterFunc)
|
||||
// when the stream's ReadTimeout has fired.
|
||||
func (st *stream) onReadTimeout() {
|
||||
// Wrap the ErrDeadlineExceeded to avoid callers depending on us
|
||||
// returning the bare error.
|
||||
st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
|
||||
}
|
||||
|
||||
// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
|
||||
// when the stream's WriteTimeout has fired.
|
||||
func (st *stream) onWriteTimeout() {
|
||||
st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)})
|
||||
st.sc.writeFrameFromHandler(FrameWriteRequest{write: StreamError{
|
||||
StreamID: st.id,
|
||||
Code: ErrCodeInternal,
|
||||
Cause: os.ErrDeadlineExceeded,
|
||||
}})
|
||||
}
|
||||
|
||||
func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||
sc.serveG.check()
|
||||
id := f.StreamID
|
||||
if sc.inGoAway {
|
||||
// Ignore.
|
||||
return nil
|
||||
}
|
||||
// http://tools.ietf.org/html/rfc7540#section-5.1.1
|
||||
// Streams initiated by a client MUST use odd-numbered stream
|
||||
// identifiers. [...] An endpoint that receives an unexpected
|
||||
@ -1953,6 +1980,9 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||
// (in Go 1.8), though. That's a more sane option anyway.
|
||||
if sc.hs.ReadTimeout != 0 {
|
||||
sc.conn.SetReadDeadline(time.Time{})
|
||||
if st.body != nil {
|
||||
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
go sc.runHandler(rw, req, handler)
|
||||
@ -2021,9 +2051,6 @@ func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error {
|
||||
}
|
||||
|
||||
func (sc *serverConn) processPriority(f *PriorityFrame) error {
|
||||
if sc.inGoAway {
|
||||
return nil
|
||||
}
|
||||
if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -2322,39 +2349,24 @@ func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
|
||||
|
||||
func (sc *serverConn) noteBodyRead(st *stream, n int) {
|
||||
sc.serveG.check()
|
||||
sc.sendWindowUpdate(nil) // conn-level
|
||||
sc.sendWindowUpdate(nil, n) // conn-level
|
||||
if st.state != stateHalfClosedRemote && st.state != stateClosed {
|
||||
// Don't send this WINDOW_UPDATE if the stream is closed
|
||||
// remotely.
|
||||
sc.sendWindowUpdate(st)
|
||||
sc.sendWindowUpdate(st, n)
|
||||
}
|
||||
}
|
||||
|
||||
// st may be nil for conn-level
|
||||
func (sc *serverConn) sendWindowUpdate(st *stream) {
|
||||
func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
|
||||
sc.serveG.check()
|
||||
|
||||
var n int32
|
||||
if st == nil {
|
||||
if avail, windowSize := sc.inflow.available(), sc.srv.initialConnRecvWindowSize(); avail > windowSize/2 {
|
||||
return
|
||||
} else {
|
||||
n = windowSize - avail
|
||||
}
|
||||
} else {
|
||||
if avail, windowSize := st.inflow.available(), sc.srv.initialStreamRecvWindowSize(); avail > windowSize/2 {
|
||||
return
|
||||
} else {
|
||||
n = windowSize - avail
|
||||
}
|
||||
}
|
||||
// "The legal range for the increment to the flow control
|
||||
// window is 1 to 2^31-1 (2,147,483,647) octets."
|
||||
// A Go Read call on 64-bit machines could in theory read
|
||||
// a larger Read than this. Very unlikely, but we handle it here
|
||||
// rather than elsewhere for now.
|
||||
const maxUint31 = 1<<31 - 1
|
||||
for n >= maxUint31 {
|
||||
for n > maxUint31 {
|
||||
sc.sendWindowUpdate32(st, maxUint31)
|
||||
n -= maxUint31
|
||||
}
|
||||
@ -2474,7 +2486,15 @@ type responseWriterState struct {
|
||||
|
||||
type chunkWriter struct{ rws *responseWriterState }
|
||||
|
||||
func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
|
||||
func (cw chunkWriter) Write(p []byte) (n int, err error) {
|
||||
n, err = cw.rws.writeChunk(p)
|
||||
if err == errStreamClosed {
|
||||
// If writing failed because the stream has been closed,
|
||||
// return the reason it was closed.
|
||||
err = cw.rws.stream.closeErr
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 }
|
||||
|
||||
@ -2668,23 +2688,85 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() {
|
||||
}
|
||||
}
|
||||
|
||||
func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
|
||||
st := w.rws.stream
|
||||
if !deadline.IsZero() && deadline.Before(time.Now()) {
|
||||
// If we're setting a deadline in the past, reset the stream immediately
|
||||
// so writes after SetWriteDeadline returns will fail.
|
||||
st.onReadTimeout()
|
||||
return nil
|
||||
}
|
||||
w.rws.conn.sendServeMsg(func(sc *serverConn) {
|
||||
if st.readDeadline != nil {
|
||||
if !st.readDeadline.Stop() {
|
||||
// Deadline already exceeded, or stream has been closed.
|
||||
return
|
||||
}
|
||||
}
|
||||
if deadline.IsZero() {
|
||||
st.readDeadline = nil
|
||||
} else if st.readDeadline == nil {
|
||||
st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
|
||||
} else {
|
||||
st.readDeadline.Reset(deadline.Sub(time.Now()))
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
|
||||
st := w.rws.stream
|
||||
if !deadline.IsZero() && deadline.Before(time.Now()) {
|
||||
// If we're setting a deadline in the past, reset the stream immediately
|
||||
// so writes after SetWriteDeadline returns will fail.
|
||||
st.onWriteTimeout()
|
||||
return nil
|
||||
}
|
||||
w.rws.conn.sendServeMsg(func(sc *serverConn) {
|
||||
if st.writeDeadline != nil {
|
||||
if !st.writeDeadline.Stop() {
|
||||
// Deadline already exceeded, or stream has been closed.
|
||||
return
|
||||
}
|
||||
}
|
||||
if deadline.IsZero() {
|
||||
st.writeDeadline = nil
|
||||
} else if st.writeDeadline == nil {
|
||||
st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
|
||||
} else {
|
||||
st.writeDeadline.Reset(deadline.Sub(time.Now()))
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *responseWriter) Flush() {
|
||||
w.FlushError()
|
||||
}
|
||||
|
||||
func (w *responseWriter) FlushError() error {
|
||||
rws := w.rws
|
||||
if rws == nil {
|
||||
panic("Header called after Handler finished")
|
||||
}
|
||||
var err error
|
||||
if rws.bw.Buffered() > 0 {
|
||||
if err := rws.bw.Flush(); err != nil {
|
||||
// Ignore the error. The frame writer already knows.
|
||||
return
|
||||
}
|
||||
err = rws.bw.Flush()
|
||||
} else {
|
||||
// The bufio.Writer won't call chunkWriter.Write
|
||||
// (writeChunk with zero bytes, so we have to do it
|
||||
// ourselves to force the HTTP response header and/or
|
||||
// final DATA frame (with END_STREAM) to be sent.
|
||||
rws.writeChunk(nil)
|
||||
_, err = chunkWriter{rws}.Write(nil)
|
||||
if err == nil {
|
||||
select {
|
||||
case <-rws.stream.cw:
|
||||
err = rws.stream.closeErr
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *responseWriter) CloseNotify() <-chan bool {
|
||||
|
35
vendor/golang.org/x/net/http2/transport.go
generated
vendored
35
vendor/golang.org/x/net/http2/transport.go
generated
vendored
@ -16,6 +16,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log"
|
||||
"math"
|
||||
mathrand "math/rand"
|
||||
@ -501,6 +502,15 @@ func authorityAddr(scheme string, authority string) (addr string) {
|
||||
return net.JoinHostPort(host, port)
|
||||
}
|
||||
|
||||
var retryBackoffHook func(time.Duration) *time.Timer
|
||||
|
||||
func backoffNewTimer(d time.Duration) *time.Timer {
|
||||
if retryBackoffHook != nil {
|
||||
return retryBackoffHook(d)
|
||||
}
|
||||
return time.NewTimer(d)
|
||||
}
|
||||
|
||||
// RoundTripOpt is like RoundTrip, but takes options.
|
||||
func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
|
||||
if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
|
||||
@ -526,11 +536,14 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
|
||||
}
|
||||
backoff := float64(uint(1) << (uint(retry) - 1))
|
||||
backoff += backoff * (0.1 * mathrand.Float64())
|
||||
d := time.Second * time.Duration(backoff)
|
||||
timer := backoffNewTimer(d)
|
||||
select {
|
||||
case <-time.After(time.Second * time.Duration(backoff)):
|
||||
case <-timer.C:
|
||||
t.vlogf("RoundTrip retrying after failure: %v", err)
|
||||
continue
|
||||
case <-req.Context().Done():
|
||||
timer.Stop()
|
||||
err = req.Context().Err()
|
||||
}
|
||||
}
|
||||
@ -1075,7 +1088,7 @@ var errRequestCanceled = errors.New("net/http: request canceled")
|
||||
func commaSeparatedTrailers(req *http.Request) (string, error) {
|
||||
keys := make([]string, 0, len(req.Trailer))
|
||||
for k := range req.Trailer {
|
||||
k = http.CanonicalHeaderKey(k)
|
||||
k = canonicalHeader(k)
|
||||
switch k {
|
||||
case "Transfer-Encoding", "Trailer", "Content-Length":
|
||||
return "", fmt.Errorf("invalid Trailer key %q", k)
|
||||
@ -1612,7 +1625,7 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
|
||||
|
||||
var sawEOF bool
|
||||
for !sawEOF {
|
||||
n, err := body.Read(buf[:len(buf)])
|
||||
n, err := body.Read(buf)
|
||||
if hasContentLen {
|
||||
remainLen -= int64(n)
|
||||
if remainLen == 0 && err == nil {
|
||||
@ -1915,7 +1928,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
|
||||
|
||||
// Header list size is ok. Write the headers.
|
||||
enumerateHeaders(func(name, value string) {
|
||||
name, ascii := asciiToLower(name)
|
||||
name, ascii := lowerHeader(name)
|
||||
if !ascii {
|
||||
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
|
||||
// field names have to be ASCII characters (just as in HTTP/1.x).
|
||||
@ -1968,7 +1981,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
|
||||
}
|
||||
|
||||
for k, vv := range trailer {
|
||||
lowKey, ascii := asciiToLower(k)
|
||||
lowKey, ascii := lowerHeader(k)
|
||||
if !ascii {
|
||||
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
|
||||
// field names have to be ASCII characters (just as in HTTP/1.x).
|
||||
@ -2301,7 +2314,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
|
||||
Status: status + " " + http.StatusText(statusCode),
|
||||
}
|
||||
for _, hf := range regularFields {
|
||||
key := http.CanonicalHeaderKey(hf.Name)
|
||||
key := canonicalHeader(hf.Name)
|
||||
if key == "Trailer" {
|
||||
t := res.Trailer
|
||||
if t == nil {
|
||||
@ -2309,7 +2322,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
|
||||
res.Trailer = t
|
||||
}
|
||||
foreachHeaderElement(hf.Value, func(v string) {
|
||||
t[http.CanonicalHeaderKey(v)] = nil
|
||||
t[canonicalHeader(v)] = nil
|
||||
})
|
||||
} else {
|
||||
vv := header[key]
|
||||
@ -2414,7 +2427,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr
|
||||
|
||||
trailer := make(http.Header)
|
||||
for _, hf := range f.RegularFields() {
|
||||
key := http.CanonicalHeaderKey(hf.Name)
|
||||
key := canonicalHeader(hf.Name)
|
||||
trailer[key] = append(trailer[key], hf.Value)
|
||||
}
|
||||
cs.trailer = trailer
|
||||
@ -2985,7 +2998,11 @@ func (gz *gzipReader) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
func (gz *gzipReader) Close() error {
|
||||
return gz.body.Close()
|
||||
if err := gz.body.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
gz.zerr = fs.ErrClosed
|
||||
return nil
|
||||
}
|
||||
|
||||
type errorReader struct{ err error }
|
||||
|
14
vendor/modules.txt
vendored
14
vendor/modules.txt
vendored
@ -118,7 +118,7 @@ github.com/containers/buildah/pkg/rusage
|
||||
github.com/containers/buildah/pkg/sshagent
|
||||
github.com/containers/buildah/pkg/util
|
||||
github.com/containers/buildah/util
|
||||
# github.com/containers/common v0.50.2-0.20221109162103-1e40f47dd90b
|
||||
# github.com/containers/common v0.50.2-0.20221111184705-791b83e1cdf1
|
||||
## explicit; go 1.17
|
||||
github.com/containers/common/libimage
|
||||
github.com/containers/common/libimage/define
|
||||
@ -172,7 +172,7 @@ github.com/containers/common/version
|
||||
# github.com/containers/conmon v2.0.20+incompatible
|
||||
## explicit
|
||||
github.com/containers/conmon/runner/config
|
||||
# github.com/containers/image/v5 v5.23.1-0.20221101011818-2f770d6d5a0c
|
||||
# github.com/containers/image/v5 v5.23.1-0.20221109193300-0d85878d7a77
|
||||
## explicit; go 1.17
|
||||
github.com/containers/image/v5/copy
|
||||
github.com/containers/image/v5/directory
|
||||
@ -264,7 +264,7 @@ github.com/containers/psgo/internal/dev
|
||||
github.com/containers/psgo/internal/host
|
||||
github.com/containers/psgo/internal/proc
|
||||
github.com/containers/psgo/internal/process
|
||||
# github.com/containers/storage v1.43.1-0.20221104122514-74e37433a2a0
|
||||
# github.com/containers/storage v1.44.1-0.20221110192950-67e9778710f8
|
||||
## explicit; go 1.17
|
||||
github.com/containers/storage
|
||||
github.com/containers/storage/drivers
|
||||
@ -587,7 +587,7 @@ github.com/onsi/ginkgo/reporters/stenographer
|
||||
github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable
|
||||
github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty
|
||||
github.com/onsi/ginkgo/types
|
||||
# github.com/onsi/gomega v1.24.0
|
||||
# github.com/onsi/gomega v1.24.1
|
||||
## explicit; go 1.18
|
||||
github.com/onsi/gomega
|
||||
github.com/onsi/gomega/format
|
||||
@ -706,7 +706,7 @@ github.com/stefanberger/go-pkcs11uri
|
||||
## explicit; go 1.13
|
||||
github.com/stretchr/testify/assert
|
||||
github.com/stretchr/testify/require
|
||||
# github.com/sylabs/sif/v2 v2.8.1
|
||||
# github.com/sylabs/sif/v2 v2.8.3
|
||||
## explicit; go 1.18
|
||||
github.com/sylabs/sif/v2/pkg/sif
|
||||
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
@ -775,7 +775,7 @@ go.opencensus.io/internal
|
||||
go.opencensus.io/trace
|
||||
go.opencensus.io/trace/internal
|
||||
go.opencensus.io/trace/tracestate
|
||||
# golang.org/x/crypto v0.1.0
|
||||
# golang.org/x/crypto v0.2.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/crypto/blowfish
|
||||
golang.org/x/crypto/cast5
|
||||
@ -804,7 +804,7 @@ golang.org/x/crypto/ssh/knownhosts
|
||||
# golang.org/x/mod v0.6.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/mod/semver
|
||||
# golang.org/x/net v0.1.0
|
||||
# golang.org/x/net v0.2.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/net/context
|
||||
golang.org/x/net/html
|
||||
|
Reference in New Issue
Block a user