mirror of
https://github.com/containers/podman.git
synced 2025-06-23 18:59:30 +08:00
Vendor in latest c/common
Signed-off-by: Ashley Cui <acui@redhat.com>
This commit is contained in:
14
go.mod
14
go.mod
@ -10,12 +10,12 @@ require (
|
||||
github.com/checkpoint-restore/go-criu/v6 v6.3.0
|
||||
github.com/container-orchestrated-devices/container-device-interface v0.5.3
|
||||
github.com/containernetworking/cni v1.1.2
|
||||
github.com/containernetworking/plugins v1.1.1
|
||||
github.com/containernetworking/plugins v1.2.0
|
||||
github.com/containers/buildah v1.28.1-0.20221221082547-8403b6ebc13d
|
||||
github.com/containers/common v0.50.2-0.20230113010242-57f81310d68a
|
||||
github.com/containers/common v0.50.2-0.20230117154327-37d31888e634
|
||||
github.com/containers/conmon v2.0.20+incompatible
|
||||
github.com/containers/image/v5 v5.23.1-0.20230104183125-aaf0985b36f9
|
||||
github.com/containers/ocicrypt v1.1.6
|
||||
github.com/containers/image/v5 v5.23.1-0.20230116122250-3d22f4e96c53
|
||||
github.com/containers/ocicrypt v1.1.7-0.20230115130455-e0cec6f7be0d
|
||||
github.com/containers/psgo v1.8.0
|
||||
github.com/containers/storage v1.45.0
|
||||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
@ -58,7 +58,7 @@ require (
|
||||
github.com/uber/jaeger-client-go v2.30.0+incompatible
|
||||
github.com/ulikunitz/xz v0.5.11
|
||||
github.com/vbauerster/mpb/v7 v7.5.3
|
||||
github.com/vishvananda/netlink v1.1.1-0.20220115184804-dd687eb2f2d4
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2
|
||||
go.etcd.io/bbolt v1.3.6
|
||||
golang.org/x/net v0.5.0
|
||||
golang.org/x/sync v0.1.0
|
||||
@ -117,7 +117,7 @@ require (
|
||||
github.com/pkg/sftp v1.13.5 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/proglottis/gpgme v0.1.3 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.3 // indirect
|
||||
github.com/seccomp/libseccomp-golang v0.10.0 // indirect
|
||||
github.com/sigstore/sigstore v1.5.0 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
@ -135,7 +135,7 @@ require (
|
||||
golang.org/x/crypto v0.5.0 // indirect
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/tools v0.4.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect
|
||||
google.golang.org/grpc v1.51.0 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
|
29
go.sum
29
go.sum
@ -260,23 +260,23 @@ github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl3
|
||||
github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
|
||||
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
|
||||
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
|
||||
github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE=
|
||||
github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
|
||||
github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP7mkPDoSfP9VU=
|
||||
github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4=
|
||||
github.com/containers/buildah v1.28.1-0.20221221082547-8403b6ebc13d h1:OyqOrN7QTtA7g5ZgQkV5lChAn5cVQB0dnVqjNd93DuQ=
|
||||
github.com/containers/buildah v1.28.1-0.20221221082547-8403b6ebc13d/go.mod h1:PAftqTiRApPwzIaY42fDm/FRqOuLgd+ZZtVzIu3/eco=
|
||||
github.com/containers/common v0.50.2-0.20230113010242-57f81310d68a h1:lb8F/PZltX0YJMLw7dNPQBv5+e4I8u3id2/3bdv4O6Q=
|
||||
github.com/containers/common v0.50.2-0.20230113010242-57f81310d68a/go.mod h1:5MiXTCF9sxo5FUk5rK66LDqHAoNwv8Ym5It3RYxFdTw=
|
||||
github.com/containers/common v0.50.2-0.20230117154327-37d31888e634 h1:9DJigXQ0vvwqmnQd/MknvBRv5xRkM31PxcNQfoqgh58=
|
||||
github.com/containers/common v0.50.2-0.20230117154327-37d31888e634/go.mod h1:ibwXFof4GdMlP4ndvEeLAImvYlZ7cGt8Bm7bJRaPvWE=
|
||||
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
||||
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||
github.com/containers/image/v5 v5.23.1-0.20230104183125-aaf0985b36f9 h1:iLiH5/Tt8uFf+pmbxAc5WtLnjfUeLDlWWDy8VN5Fjm4=
|
||||
github.com/containers/image/v5 v5.23.1-0.20230104183125-aaf0985b36f9/go.mod h1:fQ5+3n5S2aEH6RcIb90OuuAHvk6w+CeZXhNn7Xrwlak=
|
||||
github.com/containers/image/v5 v5.23.1-0.20230116122250-3d22f4e96c53 h1:xXPmSOWBg/4df+XubFTCDDLwRhsJcuEs5wJbND6kNMI=
|
||||
github.com/containers/image/v5 v5.23.1-0.20230116122250-3d22f4e96c53/go.mod h1:7PVuTsEPUHPKTr1QYjASPW6LumumM4/oCJ5Y+hM4QKE=
|
||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU=
|
||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
||||
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
|
||||
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
|
||||
github.com/containers/ocicrypt v1.1.6 h1:uoG52u2e91RE4UqmBICZY8dNshgfvkdl3BW6jnxiFaI=
|
||||
github.com/containers/ocicrypt v1.1.6/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc=
|
||||
github.com/containers/ocicrypt v1.1.7-0.20230115130455-e0cec6f7be0d h1:9PyNGtThqLWgN0JnczaWEPEYfrLhITsSzwljJNurfwE=
|
||||
github.com/containers/ocicrypt v1.1.7-0.20230115130455-e0cec6f7be0d/go.mod h1:k6j0C2yEGkwl+mOhD1pJ54H7RaDRkLKmucMSZfVrIzA=
|
||||
github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY=
|
||||
github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc=
|
||||
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
|
||||
@ -829,13 +829,14 @@ github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
|
||||
github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
|
||||
github.com/rootless-containers/rootlesskit v1.1.0 h1:cRaRIYxY8oce4eE/zeAUZhgKu/4tU1p9YHN4+suwV7M=
|
||||
github.com/rootless-containers/rootlesskit v1.1.0/go.mod h1:H+o9ndNe7tS91WqU0/+vpvc+VaCd7TCIWaJjnV0ujUo=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
@ -945,8 +946,8 @@ github.com/vbauerster/mpb/v7 v7.5.3/go.mod h1:i+h4QY6lmLvBNK2ah1fSreiw3ajskRlBp9
|
||||
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||
github.com/vishvananda/netlink v1.1.1-0.20220115184804-dd687eb2f2d4 h1:fB26rIBlWTVJyEB6ONHdoEvUbvwoudH0/cMEXHiD1RU=
|
||||
github.com/vishvananda/netlink v1.1.1-0.20220115184804-dd687eb2f2d4/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs=
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
@ -1403,8 +1404,8 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6 h1:AGXp12e/9rItf6/4QymU7WsAUwCf+ICW75cuR91nJIc=
|
||||
google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6/go.mod h1:1dOng4TWOomJrDGhpXjfCD35wQC6jnC7HpRmOFRqEV0=
|
||||
google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70=
|
||||
google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
|
||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
|
3
vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go
generated
vendored
3
vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go
generated
vendored
@ -327,6 +327,9 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ
|
||||
}
|
||||
cniPathName := ""
|
||||
if writeToDisk {
|
||||
if err := os.MkdirAll(n.cniConfigDir, 0o755); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
cniPathName = filepath.Join(n.cniConfigDir, network.Name+".conflist")
|
||||
err = os.WriteFile(cniPathName, b, 0o644)
|
||||
if err != nil {
|
||||
|
8
vendor/github.com/containers/common/libnetwork/cni/network.go
generated
vendored
8
vendor/github.com/containers/common/libnetwork/cni/network.go
generated
vendored
@ -141,11 +141,15 @@ func (n *cniNetwork) DefaultNetworkName() string {
|
||||
|
||||
func (n *cniNetwork) loadNetworks() error {
|
||||
// check the mod time of the config dir
|
||||
var modTime time.Time
|
||||
f, err := os.Stat(n.cniConfigDir)
|
||||
if err != nil {
|
||||
// ignore error if the file does not exists
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return err
|
||||
}
|
||||
modTime := f.ModTime()
|
||||
if err == nil {
|
||||
modTime = f.ModTime()
|
||||
}
|
||||
|
||||
// skip loading networks if they are already loaded and
|
||||
// if the config dir was not modified since the last call
|
||||
|
60
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
60
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/containers/image/v5/signature/signer"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
@ -61,6 +62,7 @@ var expectedCompressionFormats = map[string]*compressiontypes.Algorithm{
|
||||
|
||||
// copier allows us to keep track of diffID values for blobs, and other
|
||||
// data shared across one or more images in a possible manifest list.
|
||||
// The owner must call close() when done.
|
||||
type copier struct {
|
||||
dest private.ImageDestination
|
||||
rawSource private.ImageSource
|
||||
@ -75,6 +77,8 @@ type copier struct {
|
||||
ociEncryptConfig *encconfig.EncryptConfig
|
||||
concurrentBlobCopiesSemaphore *semaphore.Weighted // Limits the amount of concurrently copied blobs
|
||||
downloadForeignLayers bool
|
||||
signers []*signer.Signer // Signers to use to create new signatures for the image
|
||||
signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed.
|
||||
}
|
||||
|
||||
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
|
||||
@ -121,12 +125,16 @@ type ImageListSelection int
|
||||
|
||||
// Options allows supplying non-default configuration modifying the behavior of CopyImage.
|
||||
type Options struct {
|
||||
RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature.
|
||||
RemoveSignatures bool // Remove any pre-existing signatures. Signers and SignBy… will still add a new signature.
|
||||
// Signers to use to add signatures during the copy.
|
||||
// Callers are still responsible for closing these Signer objects; they can be reused for multiple copy.Image operations in a row.
|
||||
Signers []*signer.Signer
|
||||
SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(),
|
||||
SignPassphrase string // Passphare to use when signing with the key ID from `SignBy`.
|
||||
SignPassphrase string // Passphrase to use when signing with the key ID from `SignBy`.
|
||||
SignBySigstorePrivateKeyFile string // If non-empty, asks for a signature to be added during the copy, using a sigstore private key file at the provided path.
|
||||
SignSigstorePrivateKeyPassphrase []byte // Passphare to use when signing with `SignBySigstorePrivateKeyFile`.
|
||||
SignSigstorePrivateKeyPassphrase []byte // Passphrase to use when signing with `SignBySigstorePrivateKeyFile`.
|
||||
SignIdentity reference.Named // Identify to use when signing, defaults to the docker reference of the destination
|
||||
|
||||
ReportWriter io.Writer
|
||||
SourceCtx *types.SystemContext
|
||||
DestinationCtx *types.SystemContext
|
||||
@ -257,6 +265,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
ociEncryptConfig: options.OciEncryptConfig,
|
||||
downloadForeignLayers: options.DownloadForeignLayers,
|
||||
}
|
||||
defer c.close()
|
||||
|
||||
// Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel.
|
||||
if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() {
|
||||
@ -284,6 +293,10 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
c.compressionLevel = options.DestinationCtx.CompressionLevel
|
||||
}
|
||||
|
||||
if err := c.setupSigners(options); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
|
||||
multiImage, err := isMultiImage(ctx, unparsedToplevel)
|
||||
if err != nil {
|
||||
@ -340,6 +353,15 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
return copiedManifest, nil
|
||||
}
|
||||
|
||||
// close tears down state owned by copier.
|
||||
func (c *copier) close() {
|
||||
for i, s := range c.signersToClose {
|
||||
if err := s.Close(); err != nil {
|
||||
logrus.Warnf("Error closing per-copy signer %d: %v", i+1, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Checks if the destination supports accepting multiple images by checking if it can support
|
||||
// manifest types that are lists of other manifests.
|
||||
func supportsMultipleImages(dest types.ImageDestination) bool {
|
||||
@ -564,20 +586,11 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
}
|
||||
|
||||
// Sign the manifest list.
|
||||
if options.SignBy != "" {
|
||||
newSig, err := c.createSignature(manifestList, options.SignBy, options.SignPassphrase, options.SignIdentity)
|
||||
newSigs, err := c.createSignatures(ctx, manifestList, options.SignIdentity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigs = append(sigs, newSig)
|
||||
}
|
||||
if options.SignBySigstorePrivateKeyFile != "" {
|
||||
newSig, err := c.createSigstoreSignature(manifestList, options.SignBySigstorePrivateKeyFile, options.SignSigstorePrivateKeyPassphrase, options.SignIdentity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigs = append(sigs, newSig)
|
||||
}
|
||||
sigs = append(sigs, newSigs...)
|
||||
|
||||
c.Printf("Storing list signatures\n")
|
||||
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil {
|
||||
@ -675,12 +688,12 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
// Decide whether we can substitute blobs with semantic equivalents:
|
||||
// - Don’t do that if we can’t modify the manifest at all
|
||||
// - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
|
||||
// This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
|
||||
// This may be too conservative, but for now, better safe than sorry, _especially_ on the len(c.signers) != 0 path:
|
||||
// The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
|
||||
// We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
|
||||
// that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
|
||||
// and we would reuse and sign it.
|
||||
ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && options.SignBy == "" && options.SignBySigstorePrivateKeyFile == ""
|
||||
ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && len(c.signers) == 0
|
||||
|
||||
if err := ic.updateEmbeddedDockerReference(); err != nil {
|
||||
return nil, "", "", err
|
||||
@ -711,7 +724,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
|
||||
// If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal
|
||||
if options.OptimizeDestinationImageAlreadyExists {
|
||||
shouldUpdateSigs := len(sigs) > 0 || options.SignBy != "" || options.SignBySigstorePrivateKeyFile != "" // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
|
||||
shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
|
||||
noPendingManifestUpdates := ic.noPendingManifestUpdates()
|
||||
|
||||
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates)
|
||||
@ -791,20 +804,11 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
targetInstance = &retManifestDigest
|
||||
}
|
||||
|
||||
if options.SignBy != "" {
|
||||
newSig, err := c.createSignature(manifestBytes, options.SignBy, options.SignPassphrase, options.SignIdentity)
|
||||
newSigs, err := c.createSignatures(ctx, manifestBytes, options.SignIdentity)
|
||||
if err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
sigs = append(sigs, newSig)
|
||||
}
|
||||
if options.SignBySigstorePrivateKeyFile != "" {
|
||||
newSig, err := c.createSigstoreSignature(manifestBytes, options.SignBySigstorePrivateKeyFile, options.SignSigstorePrivateKeyPassphrase, options.SignIdentity)
|
||||
if err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
sigs = append(sigs, newSig)
|
||||
}
|
||||
sigs = append(sigs, newSigs...)
|
||||
|
||||
c.Printf("Storing signatures\n")
|
||||
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil {
|
||||
|
98
vendor/github.com/containers/image/v5/copy/sign.go
generated
vendored
98
vendor/github.com/containers/image/v5/copy/sign.go
generated
vendored
@ -7,11 +7,49 @@ import (
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
internalsig "github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/signature"
|
||||
internalSigner "github.com/containers/image/v5/internal/signer"
|
||||
"github.com/containers/image/v5/signature/sigstore"
|
||||
"github.com/containers/image/v5/signature/simplesigning"
|
||||
"github.com/containers/image/v5/transports"
|
||||
)
|
||||
|
||||
// setupSigners initializes c.signers based on options.
|
||||
func (c *copier) setupSigners(options *Options) error {
|
||||
c.signers = append(c.signers, options.Signers...)
|
||||
// c.signersToClose is intentionally not updated with options.Signers.
|
||||
|
||||
// We immediately append created signers to c.signers, and we rely on c.close() to clean them up; so we don’t need
|
||||
// to clean up any created signers on failure.
|
||||
|
||||
if options.SignBy != "" {
|
||||
opts := []simplesigning.Option{
|
||||
simplesigning.WithKeyFingerprint(options.SignBy),
|
||||
}
|
||||
if options.SignPassphrase != "" {
|
||||
opts = append(opts, simplesigning.WithPassphrase(options.SignPassphrase))
|
||||
}
|
||||
signer, err := simplesigning.NewSigner(opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.signers = append(c.signers, signer)
|
||||
c.signersToClose = append(c.signersToClose, signer)
|
||||
}
|
||||
|
||||
if options.SignBySigstorePrivateKeyFile != "" {
|
||||
signer, err := sigstore.NewSigner(
|
||||
sigstore.WithPrivateKeyFile(options.SignBySigstorePrivateKeyFile, options.SignSigstorePrivateKeyPassphrase),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.signers = append(c.signers, signer)
|
||||
c.signersToClose = append(c.signersToClose, signer)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sourceSignatures returns signatures from unparsedSource based on options,
|
||||
// and verifies that they can be used (to avoid copying a large image when we
|
||||
// can tell in advance that it would ultimately fail)
|
||||
@ -37,38 +75,13 @@ func (c *copier) sourceSignatures(ctx context.Context, unparsed private.Unparsed
|
||||
return sigs, nil
|
||||
}
|
||||
|
||||
// createSignature creates a new signature of manifest using keyIdentity.
|
||||
func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string, identity reference.Named) (internalsig.Signature, error) {
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing GPG: %w", err)
|
||||
}
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil {
|
||||
return nil, fmt.Errorf("Signing not supported: %w", err)
|
||||
// createSignatures creates signatures for manifest and an optional identity.
|
||||
func (c *copier) createSignatures(ctx context.Context, manifest []byte, identity reference.Named) ([]internalsig.Signature, error) {
|
||||
if len(c.signers) == 0 {
|
||||
// We must exit early here, otherwise copies with no Docker reference wouldn’t be possible.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if identity != nil {
|
||||
if reference.IsNameOnly(identity) {
|
||||
return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity)
|
||||
}
|
||||
} else {
|
||||
identity = c.dest.Reference().DockerReference()
|
||||
if identity == nil {
|
||||
return nil, fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference()))
|
||||
}
|
||||
}
|
||||
|
||||
c.Printf("Signing manifest using simple signing\n")
|
||||
newSig, err := signature.SignDockerManifestWithOptions(manifest, identity.String(), mech, keyIdentity, &signature.SignOptions{Passphrase: passphrase})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating signature: %w", err)
|
||||
}
|
||||
return internalsig.SimpleSigningFromBlob(newSig), nil
|
||||
}
|
||||
|
||||
// createSigstoreSignature creates a new sigstore signature of manifest using privateKeyFile and identity.
|
||||
func (c *copier) createSigstoreSignature(manifest []byte, privateKeyFile string, passphrase []byte, identity reference.Named) (internalsig.Signature, error) {
|
||||
if identity != nil {
|
||||
if reference.IsNameOnly(identity) {
|
||||
return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity.String())
|
||||
@ -80,10 +93,23 @@ func (c *copier) createSigstoreSignature(manifest []byte, privateKeyFile string,
|
||||
}
|
||||
}
|
||||
|
||||
c.Printf("Signing manifest using a sigstore signature\n")
|
||||
newSig, err := sigstore.SignDockerManifestWithPrivateKeyFileUnstable(manifest, identity, privateKeyFile, passphrase)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating signature: %w", err)
|
||||
res := make([]internalsig.Signature, 0, len(c.signers))
|
||||
for signerIndex, signer := range c.signers {
|
||||
msg := internalSigner.ProgressMessage(signer)
|
||||
if len(c.signers) == 1 {
|
||||
c.Printf("Creating signature: %s\n", msg)
|
||||
} else {
|
||||
c.Printf("Creating signature %d: %s\n", signerIndex+1, msg)
|
||||
}
|
||||
return newSig, nil
|
||||
newSig, err := internalSigner.SignImageManifest(ctx, signer, manifest, identity)
|
||||
if err != nil {
|
||||
if len(c.signers) == 1 {
|
||||
return nil, fmt.Errorf("creating signature: %w", err)
|
||||
} else {
|
||||
return nil, fmt.Errorf("creating signature %d: %w", signerIndex, err)
|
||||
}
|
||||
}
|
||||
res = append(res, newSig)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
6
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
6
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@ -19,12 +19,12 @@ import (
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/internal/useragent"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/docker/config"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/pkg/tlsclientconfig"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/image/v5/version"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
@ -68,8 +68,6 @@ var (
|
||||
{path: etcDir + "/containers/certs.d", absolute: true},
|
||||
{path: etcDir + "/docker/certs.d", absolute: true},
|
||||
}
|
||||
|
||||
defaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)"
|
||||
)
|
||||
|
||||
// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go:
|
||||
@ -284,7 +282,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
|
||||
}
|
||||
tlsClientConfig.InsecureSkipVerify = skipVerify
|
||||
|
||||
userAgent := defaultUserAgent
|
||||
userAgent := useragent.DefaultUserAgent
|
||||
if sys != nil && sys.DockerRegistryUserAgent != "" {
|
||||
userAgent = sys.DockerRegistryUserAgent
|
||||
}
|
||||
|
4
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
4
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
@ -10,7 +10,6 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@ -24,6 +23,7 @@ import (
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/regexp"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -303,7 +303,7 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
|
||||
}
|
||||
}
|
||||
|
||||
var multipartByteRangesRe = regexp.MustCompile("multipart/byteranges; boundary=([A-Za-z-0-9:]+)")
|
||||
var multipartByteRangesRe = regexp.Delayed("multipart/byteranges; boundary=([A-Za-z-0-9:]+)")
|
||||
|
||||
func parseMediaType(contentType string) (string, map[string]string, error) {
|
||||
mediaType, params, err := mime.ParseMediaType(contentType)
|
||||
|
15
vendor/github.com/containers/image/v5/docker/reference/regexp.go
generated
vendored
15
vendor/github.com/containers/image/v5/docker/reference/regexp.go
generated
vendored
@ -1,6 +1,7 @@
|
||||
package reference
|
||||
|
||||
import (
|
||||
storageRegexp "github.com/containers/storage/pkg/regexp"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
@ -60,7 +61,7 @@ var (
|
||||
anchoredTag = anchored(tag)
|
||||
// anchoredTagRegexp matches valid tag names, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredTagRegexp = re(anchoredTag)
|
||||
anchoredTagRegexp = storageRegexp.Delayed(anchoredTag)
|
||||
|
||||
// DigestRegexp matches valid digests.
|
||||
DigestRegexp = re(digestPat)
|
||||
@ -68,7 +69,7 @@ var (
|
||||
anchoredDigest = anchored(digestPat)
|
||||
// anchoredDigestRegexp matches valid digests, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredDigestRegexp = re(anchoredDigest)
|
||||
anchoredDigestRegexp = storageRegexp.Delayed(anchoredDigest)
|
||||
|
||||
namePat = expression(
|
||||
optional(domain, literal(`/`)),
|
||||
@ -85,7 +86,7 @@ var (
|
||||
optional(repeated(literal(`/`), nameComponent))))
|
||||
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||
// domain and trailing components.
|
||||
anchoredNameRegexp = re(anchoredName)
|
||||
anchoredNameRegexp = storageRegexp.Delayed(anchoredName)
|
||||
|
||||
referencePat = anchored(capture(namePat),
|
||||
optional(literal(":"), capture(tag)),
|
||||
@ -108,13 +109,7 @@ var (
|
||||
anchoredIdentifier = anchored(identifier)
|
||||
// anchoredIdentifierRegexp is used to check or match an
|
||||
// identifier value, anchored at start and end of string.
|
||||
anchoredIdentifierRegexp = re(anchoredIdentifier)
|
||||
|
||||
anchoredShortIdentifier = anchored(shortIdentifier)
|
||||
// anchoredShortIdentifierRegexp is used to check if a value
|
||||
// is a possible identifier prefix, anchored at start and end
|
||||
// of string.
|
||||
anchoredShortIdentifierRegexp = re(anchoredShortIdentifier)
|
||||
anchoredIdentifierRegexp = storageRegexp.Delayed(anchoredIdentifier)
|
||||
)
|
||||
|
||||
// re compiles the string to a regular expression.
|
||||
|
6
vendor/github.com/containers/image/v5/internal/signature/sigstore.go
generated
vendored
6
vendor/github.com/containers/image/v5/internal/signature/sigstore.go
generated
vendored
@ -7,6 +7,12 @@ const (
|
||||
SigstoreSignatureMIMEType = "application/vnd.dev.cosign.simplesigning.v1+json"
|
||||
// from sigstore/cosign/pkg/oci/static.SignatureAnnotationKey
|
||||
SigstoreSignatureAnnotationKey = "dev.cosignproject.cosign/signature"
|
||||
// from sigstore/cosign/pkg/oci/static.BundleAnnotationKey
|
||||
SigstoreSETAnnotationKey = "dev.sigstore.cosign/bundle"
|
||||
// from sigstore/cosign/pkg/oci/static.CertificateAnnotationKey
|
||||
SigstoreCertificateAnnotationKey = "dev.sigstore.cosign/certificate"
|
||||
// from sigstore/cosign/pkg/oci/static.ChainAnnotationKey
|
||||
SigstoreIntermediateCertificateChainAnnotationKey = "dev.sigstore.cosign/chain"
|
||||
)
|
||||
|
||||
// Sigstore is a github.com/cosign/cosign signature.
|
||||
|
47
vendor/github.com/containers/image/v5/internal/signer/signer.go
generated
vendored
Normal file
47
vendor/github.com/containers/image/v5/internal/signer/signer.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
package signer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/signature"
|
||||
)
|
||||
|
||||
// Signer is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images.
|
||||
// This type is visible to external callers, so it has no public fields or methods apart from Close().
|
||||
//
|
||||
// The owner of a Signer must call Close() when done.
|
||||
type Signer struct {
|
||||
implementation SignerImplementation
|
||||
}
|
||||
|
||||
// NewSigner creates a public Signer from a SignerImplementation
|
||||
func NewSigner(impl SignerImplementation) *Signer {
|
||||
return &Signer{implementation: impl}
|
||||
}
|
||||
|
||||
func (s *Signer) Close() error {
|
||||
return s.implementation.Close()
|
||||
}
|
||||
|
||||
// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature.
|
||||
// Alternatively, should SignImageManifest be provided a logging writer of some kind?
|
||||
func ProgressMessage(signer *Signer) string {
|
||||
return signer.implementation.ProgressMessage()
|
||||
}
|
||||
|
||||
// SignImageManifest invokes a SignerImplementation.
|
||||
// This is a function, not a method, so that it can only be called by code that is allowed to import this internal subpackage.
|
||||
func SignImageManifest(ctx context.Context, signer *Signer, manifest []byte, dockerReference reference.Named) (signature.Signature, error) {
|
||||
return signer.implementation.SignImageManifest(ctx, manifest, dockerReference)
|
||||
}
|
||||
|
||||
// SignerImplementation is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images.
|
||||
// This interface is distinct from Signer so that implementations can be created outside of this package.
|
||||
type SignerImplementation interface {
|
||||
// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature.
|
||||
ProgressMessage() string
|
||||
// SignImageManifest creates a new signature for manifest m as dockerReference.
|
||||
SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error)
|
||||
Close() error
|
||||
}
|
6
vendor/github.com/containers/image/v5/internal/useragent/useragent.go
generated
vendored
Normal file
6
vendor/github.com/containers/image/v5/internal/useragent/useragent.go
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
package useragent
|
||||
|
||||
import "github.com/containers/image/v5/version"
|
||||
|
||||
// DefaultUserAgent is a value that should be used by User-Agent headers, unless the user specifically instructs us otherwise.
|
||||
var DefaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)"
|
4
vendor/github.com/containers/image/v5/manifest/docker_schema1.go
generated
vendored
4
vendor/github.com/containers/image/v5/manifest/docker_schema1.go
generated
vendored
@ -4,12 +4,12 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/regexp"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
@ -206,7 +206,7 @@ func (m *Schema1) fixManifestLayers() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
|
||||
var validHex = regexp.Delayed(`^([a-f0-9]{64})$`)
|
||||
|
||||
func validateV1ID(id string) error {
|
||||
if ok := validHex.MatchString(id); !ok {
|
||||
|
4
vendor/github.com/containers/image/v5/openshift/openshift_transport.go
generated
vendored
4
vendor/github.com/containers/image/v5/openshift/openshift_transport.go
generated
vendored
@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/policyconfiguration"
|
||||
@ -12,6 +11,7 @@ import (
|
||||
genericImage "github.com/containers/image/v5/internal/image"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/regexp"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -35,7 +35,7 @@ func (t openshiftTransport) ParseReference(reference string) (types.ImageReferen
|
||||
// Note that imageNameRegexp is namespace/stream:tag, this
|
||||
// is HOSTNAME/namespace/stream:tag or parent prefixes.
|
||||
// Keep this in sync with imageNameRegexp!
|
||||
var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$")
|
||||
var scopeRegexp = regexp.Delayed("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$")
|
||||
|
||||
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
|
||||
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
|
||||
|
4
vendor/github.com/containers/image/v5/ostree/ostree_transport.go
generated
vendored
4
vendor/github.com/containers/image/v5/ostree/ostree_transport.go
generated
vendored
@ -10,7 +10,6 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/directory/explicitfilepath"
|
||||
@ -18,6 +17,7 @@ import (
|
||||
"github.com/containers/image/v5/internal/image"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/regexp"
|
||||
)
|
||||
|
||||
const defaultOSTreeRepo = "/ostree/repo"
|
||||
@ -216,7 +216,7 @@ func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemCon
|
||||
return errors.New("Deleting images not implemented for ostree: images")
|
||||
}
|
||||
|
||||
var ostreeRefRegexp = regexp.MustCompile(`^[A-Za-z0-9.-]$`)
|
||||
var ostreeRefRegexp = regexp.Delayed(`^[A-Za-z0-9.-]$`)
|
||||
|
||||
func encodeOStreeRef(in string) string {
|
||||
var buffer bytes.Buffer
|
||||
|
4
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
4
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
@ -6,7 +6,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -15,6 +14,7 @@ import (
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/regexp"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -384,7 +384,7 @@ func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) {
|
||||
}
|
||||
|
||||
// anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries.
|
||||
var anchoredDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$")
|
||||
var anchoredDomainRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "$")
|
||||
|
||||
// postProcess checks the consistency of all the configuration, looks for conflicts,
|
||||
// and normalizes the configuration (e.g., sets the Prefix to Location if not set).
|
||||
|
98
vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
generated
vendored
Normal file
98
vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// This is the github.com/sigstore/rekor/pkg/generated/models.Hashedrekord.APIVersion for github.com/sigstore/rekor/pkg/generated/models.HashedrekordV001Schema.
|
||||
// We could alternatively use github.com/sigstore/rekor/pkg/types/hashedrekord.APIVERSION, but that subpackage adds too many dependencies.
|
||||
const HashedRekordV001APIVersion = "0.0.1"
|
||||
|
||||
// UntrustedRekorSET is a parsed content of the sigstore-signature Rekor SET
|
||||
// (note that this a signature-specific format, not a format directly used by the Rekor API).
|
||||
// This corresponds to github.com/sigstore/cosign/bundle.RekorBundle, but we impose a stricter decoder.
|
||||
type UntrustedRekorSET struct {
|
||||
UntrustedSignedEntryTimestamp []byte // A signature over some canonical JSON form of UntrustedPayload
|
||||
UntrustedPayload json.RawMessage
|
||||
}
|
||||
|
||||
type UntrustedRekorPayload struct {
|
||||
Body []byte // In cosign, this is an interface{}, but only a string works
|
||||
IntegratedTime int64
|
||||
LogIndex int64
|
||||
LogID string
|
||||
}
|
||||
|
||||
// A compile-time check that UntrustedRekorSET implements json.Unmarshaler
|
||||
var _ json.Unmarshaler = (*UntrustedRekorSET)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error {
|
||||
err := s.strictUnmarshalJSON(data)
|
||||
if err != nil {
|
||||
if formatErr, ok := err.(JSONFormatError); ok {
|
||||
err = NewInvalidSignatureError(formatErr.Error())
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
|
||||
// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
|
||||
func (s *UntrustedRekorSET) strictUnmarshalJSON(data []byte) error {
|
||||
return ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
"SignedEntryTimestamp": &s.UntrustedSignedEntryTimestamp,
|
||||
"Payload": &s.UntrustedPayload,
|
||||
})
|
||||
}
|
||||
|
||||
// A compile-time check that UntrustedRekorSET and *UntrustedRekorSET implements json.Marshaler
|
||||
var _ json.Marshaler = UntrustedRekorSET{}
|
||||
var _ json.Marshaler = (*UntrustedRekorSET)(nil)
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (s UntrustedRekorSET) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]interface{}{
|
||||
"SignedEntryTimestamp": s.UntrustedSignedEntryTimestamp,
|
||||
"Payload": s.UntrustedPayload,
|
||||
})
|
||||
}
|
||||
|
||||
// A compile-time check that UntrustedRekorPayload implements json.Unmarshaler
|
||||
var _ json.Unmarshaler = (*UntrustedRekorPayload)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (p *UntrustedRekorPayload) UnmarshalJSON(data []byte) error {
|
||||
err := p.strictUnmarshalJSON(data)
|
||||
if err != nil {
|
||||
if formatErr, ok := err.(JSONFormatError); ok {
|
||||
err = NewInvalidSignatureError(formatErr.Error())
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
|
||||
// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
|
||||
func (p *UntrustedRekorPayload) strictUnmarshalJSON(data []byte) error {
|
||||
return ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
"body": &p.Body,
|
||||
"integratedTime": &p.IntegratedTime,
|
||||
"logIndex": &p.LogIndex,
|
||||
"logID": &p.LogID,
|
||||
})
|
||||
}
|
||||
|
||||
// A compile-time check that UntrustedRekorPayload and *UntrustedRekorPayload implements json.Marshaler
|
||||
var _ json.Marshaler = UntrustedRekorPayload{}
|
||||
var _ json.Marshaler = (*UntrustedRekorPayload)(nil)
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]interface{}{
|
||||
"body": p.Body,
|
||||
"integratedTime": p.IntegratedTime,
|
||||
"logIndex": p.LogIndex,
|
||||
"logID": p.LogID,
|
||||
})
|
||||
}
|
3
vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go
generated
vendored
3
vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go
generated
vendored
@ -46,7 +46,8 @@ func NewUntrustedSigstorePayload(dockerManifestDigest digest.Digest, dockerRefer
|
||||
}
|
||||
}
|
||||
|
||||
// Compile-time check that UntrustedSigstorePayload implements json.Marshaler
|
||||
// A compile-time check that UntrustedSigstorePayload and *UntrustedSigstorePayload implements json.Marshaler
|
||||
var _ json.Marshaler = UntrustedSigstorePayload{}
|
||||
var _ json.Marshaler = (*UntrustedSigstorePayload)(nil)
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
|
8
vendor/github.com/containers/image/v5/signature/policy_config.go
generated
vendored
8
vendor/github.com/containers/image/v5/signature/policy_config.go
generated
vendored
@ -19,13 +19,13 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/signature/internal"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/regexp"
|
||||
)
|
||||
|
||||
// systemDefaultPolicyPath is the policy path used for DefaultPolicy().
|
||||
@ -829,12 +829,12 @@ func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
|
||||
// Private objects for validateIdentityRemappingPrefix
|
||||
var (
|
||||
// remapIdentityDomainRegexp matches exactly a reference domain (name[:port])
|
||||
remapIdentityDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$")
|
||||
remapIdentityDomainRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "$")
|
||||
// remapIdentityDomainPrefixRegexp matches a reference that starts with a domain;
|
||||
// we need this because reference.NameRegexp accepts short names with docker.io implied.
|
||||
remapIdentityDomainPrefixRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "/")
|
||||
remapIdentityDomainPrefixRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "/")
|
||||
// remapIdentityNameRegexp matches exactly a reference.Named name (possibly unnormalized)
|
||||
remapIdentityNameRegexp = regexp.MustCompile("^" + reference.NameRegexp.String() + "$")
|
||||
remapIdentityNameRegexp = regexp.Delayed("^" + reference.NameRegexp.String() + "$")
|
||||
)
|
||||
|
||||
// validateIdentityRemappingPrefix returns an InvalidPolicyFormatError if s is detected to be invalid
|
||||
|
9
vendor/github.com/containers/image/v5/signature/signer/signer.go
generated
vendored
Normal file
9
vendor/github.com/containers/image/v5/signature/signer/signer.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
package signer
|
||||
|
||||
import "github.com/containers/image/v5/internal/signer"
|
||||
|
||||
// Signer is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images.
|
||||
// It can only be created from within the containers/image package; it can’t be implemented externally.
|
||||
//
|
||||
// The owner of a Signer must call Close() when done.
|
||||
type Signer = signer.Signer
|
95
vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go
generated
vendored
Normal file
95
vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/signature/internal"
|
||||
sigstoreSignature "github.com/sigstore/sigstore/pkg/signature"
|
||||
)
|
||||
|
||||
type Option func(*SigstoreSigner) error
|
||||
|
||||
// SigstoreSigner is a signer.SignerImplementation implementation for sigstore signatures.
|
||||
// It is initialized using various closures that implement Option, sadly over several subpackages, to decrease the
|
||||
// dependency impact.
|
||||
type SigstoreSigner struct {
|
||||
PrivateKey sigstoreSignature.Signer // May be nil during initialization
|
||||
SigningKeyOrCert []byte // For possible Rekor upload; always initialized together with PrivateKey
|
||||
|
||||
// Fulcio results to include
|
||||
FulcioGeneratedCertificate []byte // Or nil
|
||||
FulcioGeneratedCertificateChain []byte // Or nil
|
||||
|
||||
// Rekor state
|
||||
RekorUploader func(ctx context.Context, keyOrCertBytes []byte, signatureBytes []byte, payloadBytes []byte) ([]byte, error) // Or nil
|
||||
}
|
||||
|
||||
// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature.
|
||||
func (s *SigstoreSigner) ProgressMessage() string {
|
||||
return "Signing image using a sigstore signature"
|
||||
}
|
||||
|
||||
// SignImageManifest creates a new signature for manifest m as dockerReference.
|
||||
func (s *SigstoreSigner) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error) {
|
||||
if s.PrivateKey == nil {
|
||||
return nil, errors.New("internal error: nothing to sign with, should have been detected in NewSigner")
|
||||
}
|
||||
|
||||
if reference.IsNameOnly(dockerReference) {
|
||||
return nil, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String())
|
||||
}
|
||||
manifestDigest, err := manifest.Digest(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// sigstore/cosign completely ignores dockerReference for actual policy decisions.
|
||||
// They record the repo (but NOT THE TAG) in the value; without the tag we can’t detect version rollbacks.
|
||||
// So, just do what simple signing does, and cosign won’t mind.
|
||||
payloadData := internal.NewUntrustedSigstorePayload(manifestDigest, dockerReference.String())
|
||||
payloadBytes, err := json.Marshal(payloadData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// github.com/sigstore/cosign/internal/pkg/cosign.payloadSigner uses signatureoptions.WithContext(),
|
||||
// which seems to be not used by anything. So we don’t bother.
|
||||
signatureBytes, err := s.PrivateKey.SignMessage(bytes.NewReader(payloadBytes))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating signature: %w", err)
|
||||
}
|
||||
base64Signature := base64.StdEncoding.EncodeToString(signatureBytes)
|
||||
var rekorSETBytes []byte // = nil
|
||||
if s.RekorUploader != nil {
|
||||
set, err := s.RekorUploader(ctx, s.SigningKeyOrCert, signatureBytes, payloadBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rekorSETBytes = set
|
||||
}
|
||||
|
||||
annotations := map[string]string{
|
||||
signature.SigstoreSignatureAnnotationKey: base64Signature,
|
||||
}
|
||||
if s.FulcioGeneratedCertificate != nil {
|
||||
annotations[signature.SigstoreCertificateAnnotationKey] = string(s.FulcioGeneratedCertificate)
|
||||
}
|
||||
if s.FulcioGeneratedCertificateChain != nil {
|
||||
annotations[signature.SigstoreIntermediateCertificateChainAnnotationKey] = string(s.FulcioGeneratedCertificateChain)
|
||||
}
|
||||
if rekorSETBytes != nil {
|
||||
annotations[signature.SigstoreSETAnnotationKey] = string(rekorSETBytes)
|
||||
}
|
||||
return signature.SigstoreFromComponents(signature.SigstoreSignatureMIMEType, payloadBytes, annotations), nil
|
||||
}
|
||||
|
||||
func (s *SigstoreSigner) Close() error {
|
||||
return nil
|
||||
}
|
65
vendor/github.com/containers/image/v5/signature/sigstore/sign.go
generated
vendored
65
vendor/github.com/containers/image/v5/signature/sigstore/sign.go
generated
vendored
@ -1,65 +0,0 @@
|
||||
package sigstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/signature/internal"
|
||||
sigstoreSignature "github.com/sigstore/sigstore/pkg/signature"
|
||||
)
|
||||
|
||||
// SignDockerManifestWithPrivateKeyFileUnstable returns a signature for manifest as the specified dockerReference,
|
||||
// using a private key and an optional passphrase.
|
||||
//
|
||||
// Yes, this returns an internal type, and should currently not be used outside of c/image.
|
||||
// There is NO COMITTMENT TO STABLE API.
|
||||
func SignDockerManifestWithPrivateKeyFileUnstable(m []byte, dockerReference reference.Named, privateKeyFile string, passphrase []byte) (signature.Sigstore, error) {
|
||||
privateKeyPEM, err := os.ReadFile(privateKeyFile)
|
||||
if err != nil {
|
||||
return signature.Sigstore{}, fmt.Errorf("reading private key from %s: %w", privateKeyFile, err)
|
||||
}
|
||||
signer, err := loadPrivateKey(privateKeyPEM, passphrase)
|
||||
if err != nil {
|
||||
return signature.Sigstore{}, fmt.Errorf("initializing private key: %w", err)
|
||||
}
|
||||
|
||||
return signDockerManifest(m, dockerReference, signer)
|
||||
}
|
||||
|
||||
func signDockerManifest(m []byte, dockerReference reference.Named, signer sigstoreSignature.Signer) (signature.Sigstore, error) {
|
||||
if reference.IsNameOnly(dockerReference) {
|
||||
return signature.Sigstore{}, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String())
|
||||
}
|
||||
manifestDigest, err := manifest.Digest(m)
|
||||
if err != nil {
|
||||
return signature.Sigstore{}, err
|
||||
}
|
||||
// sigstore/cosign completely ignores dockerReference for actual policy decisions.
|
||||
// They record the repo (but NOT THE TAG) in the value; without the tag we can’t detect version rollbacks.
|
||||
// So, just do what simple signing does, and cosign won’t mind.
|
||||
payloadData := internal.NewUntrustedSigstorePayload(manifestDigest, dockerReference.String())
|
||||
payloadBytes, err := json.Marshal(payloadData)
|
||||
if err != nil {
|
||||
return signature.Sigstore{}, err
|
||||
}
|
||||
|
||||
// github.com/sigstore/cosign/internal/pkg/cosign.payloadSigner uses signatureoptions.WithContext(),
|
||||
// which seems to be not used by anything. So we don’t bother.
|
||||
signatureBytes, err := signer.SignMessage(bytes.NewReader(payloadBytes))
|
||||
if err != nil {
|
||||
return signature.Sigstore{}, fmt.Errorf("creating signature: %w", err)
|
||||
}
|
||||
base64Signature := base64.StdEncoding.EncodeToString(signatureBytes)
|
||||
|
||||
return signature.SigstoreFromComponents(signature.SigstoreSignatureMIMEType,
|
||||
payloadBytes,
|
||||
map[string]string{
|
||||
signature.SigstoreSignatureAnnotationKey: base64Signature,
|
||||
}), nil
|
||||
}
|
60
vendor/github.com/containers/image/v5/signature/sigstore/signer.go
generated
vendored
Normal file
60
vendor/github.com/containers/image/v5/signature/sigstore/signer.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
package sigstore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
internalSigner "github.com/containers/image/v5/internal/signer"
|
||||
"github.com/containers/image/v5/signature/signer"
|
||||
"github.com/containers/image/v5/signature/sigstore/internal"
|
||||
"github.com/sigstore/sigstore/pkg/cryptoutils"
|
||||
)
|
||||
|
||||
type Option = internal.Option
|
||||
|
||||
func WithPrivateKeyFile(file string, passphrase []byte) Option {
|
||||
return func(s *internal.SigstoreSigner) error {
|
||||
if s.PrivateKey != nil {
|
||||
return fmt.Errorf("multiple private key sources specified when preparing to create sigstore signatures")
|
||||
}
|
||||
|
||||
if passphrase == nil {
|
||||
return errors.New("private key passphrase not provided")
|
||||
}
|
||||
|
||||
privateKeyPEM, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading private key from %s: %w", file, err)
|
||||
}
|
||||
signerVerifier, err := loadPrivateKey(privateKeyPEM, passphrase)
|
||||
if err != nil {
|
||||
return fmt.Errorf("initializing private key: %w", err)
|
||||
}
|
||||
publicKey, err := signerVerifier.PublicKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting public key from private key: %w", err)
|
||||
}
|
||||
publicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(publicKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("converting public key to PEM: %w", err)
|
||||
}
|
||||
s.PrivateKey = signerVerifier
|
||||
s.SigningKeyOrCert = publicKeyPEM
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func NewSigner(opts ...Option) (*signer.Signer, error) {
|
||||
s := internal.SigstoreSigner{}
|
||||
for _, o := range opts {
|
||||
if err := o(&s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if s.PrivateKey == nil {
|
||||
return nil, errors.New("no private key source provided (neither a private key nor Fulcio) when preparing to create sigstore signatures")
|
||||
}
|
||||
|
||||
return internalSigner.NewSigner(&s), nil
|
||||
}
|
3
vendor/github.com/containers/image/v5/signature/simple.go
generated
vendored
3
vendor/github.com/containers/image/v5/signature/simple.go
generated
vendored
@ -72,7 +72,8 @@ func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference s
|
||||
}
|
||||
}
|
||||
|
||||
// Compile-time check that untrustedSignature implements json.Marshaler
|
||||
// A compile-time check that untrustedSignature and *untrustedSignature implements json.Marshaler
|
||||
var _ json.Marshaler = untrustedSignature{}
|
||||
var _ json.Marshaler = (*untrustedSignature)(nil)
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
|
105
vendor/github.com/containers/image/v5/signature/simplesigning/signer.go
generated
vendored
Normal file
105
vendor/github.com/containers/image/v5/signature/simplesigning/signer.go
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
package simplesigning
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
internalSig "github.com/containers/image/v5/internal/signature"
|
||||
internalSigner "github.com/containers/image/v5/internal/signer"
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/containers/image/v5/signature/signer"
|
||||
)
|
||||
|
||||
// simpleSigner is a signer.SignerImplementation implementation for simple signing signatures.
|
||||
type simpleSigner struct {
|
||||
mech signature.SigningMechanism
|
||||
keyFingerprint string
|
||||
passphrase string // "" if not provided.
|
||||
}
|
||||
|
||||
type Option func(*simpleSigner) error
|
||||
|
||||
// WithKeyFingerprint returns an Option for NewSigner, specifying a key to sign with, using the provided GPG key fingerprint.
|
||||
func WithKeyFingerprint(keyFingerprint string) Option {
|
||||
return func(s *simpleSigner) error {
|
||||
s.keyFingerprint = keyFingerprint
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPassphrase returns an Option for NewSigner, specifying a passphrase for the private key.
|
||||
// If this is not specified, the system may interactively prompt using a gpg-agent / pinentry.
|
||||
func WithPassphrase(passphrase string) Option {
|
||||
return func(s *simpleSigner) error {
|
||||
// The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior.
|
||||
if strings.Contains(passphrase, "\n") {
|
||||
return errors.New("invalid passphrase: must not contain a line break")
|
||||
}
|
||||
s.passphrase = passphrase
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewSigner returns a signature.Signer which creates “simple signing” signatures using the user’s default
|
||||
// GPG configuration ($GNUPGHOME / ~/.gnupg).
|
||||
//
|
||||
// The set of options must identify a key to sign with, probably using a WithKeyFingerprint.
|
||||
//
|
||||
// The caller must call Close() on the returned Signer.
|
||||
func NewSigner(opts ...Option) (*signer.Signer, error) {
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing GPG: %w", err)
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
mech.Close()
|
||||
}
|
||||
}()
|
||||
if err := mech.SupportsSigning(); err != nil {
|
||||
return nil, fmt.Errorf("Signing not supported: %w", err)
|
||||
}
|
||||
|
||||
s := simpleSigner{
|
||||
mech: mech,
|
||||
}
|
||||
for _, o := range opts {
|
||||
if err := o(&s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if s.keyFingerprint == "" {
|
||||
return nil, errors.New("no key identity provided for simple signing")
|
||||
}
|
||||
// Ideally, we should look up (and unlock?) the key at this point already, but our current SigningMechanism API does not allow that.
|
||||
|
||||
succeeded = true
|
||||
return internalSigner.NewSigner(&s), nil
|
||||
}
|
||||
|
||||
// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature.
|
||||
func (s *simpleSigner) ProgressMessage() string {
|
||||
return "Signing image using simple signing"
|
||||
}
|
||||
|
||||
// SignImageManifest creates a new signature for manifest m as dockerReference.
|
||||
func (s *simpleSigner) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (internalSig.Signature, error) {
|
||||
if reference.IsNameOnly(dockerReference) {
|
||||
return nil, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String())
|
||||
}
|
||||
simpleSig, err := signature.SignDockerManifestWithOptions(m, dockerReference.String(), s.mech, s.keyFingerprint, &signature.SignOptions{
|
||||
Passphrase: s.passphrase,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return internalSig.SimpleSigningFromBlob(simpleSig), nil
|
||||
}
|
||||
|
||||
func (s *simpleSigner) Close() error {
|
||||
return s.mech.Close()
|
||||
}
|
1
vendor/github.com/containers/ocicrypt/.gitignore
generated
vendored
Normal file
1
vendor/github.com/containers/ocicrypt/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
*~
|
31
vendor/github.com/containers/ocicrypt/.golangci.yml
generated
vendored
Normal file
31
vendor/github.com/containers/ocicrypt/.golangci.yml
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
linters:
|
||||
enable:
|
||||
- depguard
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- gofmt
|
||||
- goimports
|
||||
- revive
|
||||
- ineffassign
|
||||
- vet
|
||||
- unused
|
||||
- misspell
|
||||
|
||||
linters-settings:
|
||||
depguard:
|
||||
list-type: denylist
|
||||
include-go-root: true
|
||||
packages:
|
||||
# use "io" or "os" instead
|
||||
# https://go.dev/doc/go1.16#ioutil
|
||||
- io/ioutil
|
||||
|
||||
revive:
|
||||
severity: error
|
||||
rules:
|
||||
- name: indent-error-flow
|
||||
severity: warning
|
||||
disabled: false
|
||||
|
||||
- name: error-strings
|
||||
disabled: false
|
29
vendor/github.com/containers/ocicrypt/.travis.yml
generated
vendored
29
vendor/github.com/containers/ocicrypt/.travis.yml
generated
vendored
@ -1,29 +0,0 @@
|
||||
dist: bionic
|
||||
language: go
|
||||
|
||||
os:
|
||||
- linux
|
||||
|
||||
go:
|
||||
- "1.13.x"
|
||||
- "1.16.x"
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- gnutls-bin
|
||||
- softhsm2
|
||||
|
||||
go_import_path: github.com/containers/ocicrypt
|
||||
|
||||
install:
|
||||
- curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.46.2
|
||||
|
||||
script:
|
||||
- make
|
||||
- make check
|
||||
- make test
|
2
vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go
generated
vendored
2
vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go
generated
vendored
@ -138,7 +138,7 @@ func (h *LayerBlockCipherHandler) Decrypt(encDataReader io.Reader, opt LayerBloc
|
||||
if typ == "" {
|
||||
return nil, LayerBlockCipherOptions{}, errors.New("no cipher type provided")
|
||||
}
|
||||
if c, ok := h.cipherMap[LayerCipherType(typ)]; ok {
|
||||
if c, ok := h.cipherMap[typ]; ok {
|
||||
return c.Decrypt(encDataReader, opt)
|
||||
}
|
||||
return nil, LayerBlockCipherOptions{}, errors.Errorf("unsupported cipher type: %s", typ)
|
||||
|
2
vendor/github.com/containers/ocicrypt/config/constructors.go
generated
vendored
2
vendor/github.com/containers/ocicrypt/config/constructors.go
generated
vendored
@ -17,9 +17,9 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/containers/ocicrypt/crypto/pkcs11"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/ocicrypt/crypto/pkcs11"
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
6
vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go
generated
vendored
6
vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go
generated
vendored
@ -18,9 +18,9 @@ package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/pkg/errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Command describes the structure of command, it consist of path and args, where path defines the location of
|
||||
@ -52,7 +52,7 @@ func parseConfigFile(filename string) (*OcicryptConfig, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
data, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
3
vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go
generated
vendored
3
vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go
generated
vendored
@ -18,7 +18,6 @@ package pkcs11config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
@ -51,7 +50,7 @@ func parseConfigFile(filename string) (*OcicryptConfig, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
data, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
5
vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go
generated
vendored
5
vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go
generated
vendored
@ -15,6 +15,7 @@ package pkcs11
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
pkcs11uri "github.com/stefanberger/go-pkcs11uri"
|
||||
"gopkg.in/yaml.v3"
|
||||
@ -55,7 +56,7 @@ func ParsePkcs11Uri(uri string) (*pkcs11uri.Pkcs11URI, error) {
|
||||
func ParsePkcs11KeyFile(yamlstr []byte) (*Pkcs11KeyFileObject, error) {
|
||||
p11keyfile := Pkcs11KeyFile{}
|
||||
|
||||
err := yaml.Unmarshal([]byte(yamlstr), &p11keyfile)
|
||||
err := yaml.Unmarshal(yamlstr, &p11keyfile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not unmarshal pkcs11 keyfile")
|
||||
}
|
||||
@ -126,7 +127,7 @@ func GetDefaultModuleDirectoriesYaml(indent string) string {
|
||||
func ParsePkcs11ConfigFile(yamlstr []byte) (*Pkcs11Config, error) {
|
||||
p11conf := Pkcs11Config{}
|
||||
|
||||
err := yaml.Unmarshal([]byte(yamlstr), &p11conf)
|
||||
err := yaml.Unmarshal(yamlstr, &p11conf)
|
||||
if err != nil {
|
||||
return &p11conf, errors.Wrapf(err, "Could not parse Pkcs11Config")
|
||||
}
|
||||
|
14
vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go
generated
vendored
14
vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build cgo
|
||||
// +build cgo
|
||||
|
||||
/*
|
||||
@ -138,7 +139,7 @@ func pkcs11UriGetKeyIdAndLabel(p11uri *pkcs11uri.Pkcs11URI) (string, string, err
|
||||
|
||||
// pkcs11OpenSession opens a session with a pkcs11 device at the given slot and logs in with the given PIN
|
||||
func pkcs11OpenSession(p11ctx *pkcs11.Ctx, slotid uint, pin string) (session pkcs11.SessionHandle, err error) {
|
||||
session, err = p11ctx.OpenSession(uint(slotid), pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION)
|
||||
session, err = p11ctx.OpenSession(slotid, pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "OpenSession to slot %d failed", slotid)
|
||||
}
|
||||
@ -152,7 +153,7 @@ func pkcs11OpenSession(p11ctx *pkcs11.Ctx, slotid uint, pin string) (session pkc
|
||||
return session, nil
|
||||
}
|
||||
|
||||
// pkcs11UriLogin uses the given pkcs11 URI to select the pkcs11 module (share libary) and to get
|
||||
// pkcs11UriLogin uses the given pkcs11 URI to select the pkcs11 module (shared library) and to get
|
||||
// the PIN to use for login; if the URI contains a slot-id, the given slot-id will be used, otherwise
|
||||
// one slot after the other will be attempted and the first one where login succeeds will be used
|
||||
func pkcs11UriLogin(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (ctx *pkcs11.Ctx, session pkcs11.SessionHandle, err error) {
|
||||
@ -177,7 +178,8 @@ func pkcs11UriLogin(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (ctx
|
||||
if slotid >= 0 {
|
||||
session, err := pkcs11OpenSession(p11ctx, uint(slotid), pin)
|
||||
return p11ctx, session, err
|
||||
} else {
|
||||
}
|
||||
|
||||
slots, err := p11ctx.GetSlotList(true)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, "GetSlotList failed")
|
||||
@ -203,7 +205,6 @@ func pkcs11UriLogin(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (ctx
|
||||
return nil, 0, errors.New("Could not create session to any slot and/or log in")
|
||||
}
|
||||
return nil, 0, errors.New("Could not create session to any slot")
|
||||
}
|
||||
}
|
||||
|
||||
func pkcs11Logout(ctx *pkcs11.Ctx, session pkcs11.SessionHandle) {
|
||||
@ -437,7 +438,6 @@ func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) {
|
||||
// }
|
||||
// Note: More recent versions of this code explicitly write 'sha1'
|
||||
// while older versions left it empty in case of 'sha1'.
|
||||
//
|
||||
func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) {
|
||||
pkcs11blob := Pkcs11Blob{}
|
||||
err := json.Unmarshal(pkcs11blobstr, &pkcs11blob)
|
||||
@ -448,7 +448,7 @@ func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte,
|
||||
case 0:
|
||||
// latest supported version
|
||||
default:
|
||||
return nil, errors.Errorf("Found Pkcs11Blob with version %d but maximum supported version is 0.", pkcs11blob.Version)
|
||||
return nil, errors.Errorf("found Pkcs11Blob with version %d but maximum supported version is 0", pkcs11blob.Version)
|
||||
}
|
||||
// since we do trial and error, collect all encountered errors
|
||||
errs := ""
|
||||
@ -458,7 +458,7 @@ func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte,
|
||||
case 0:
|
||||
// last supported version
|
||||
default:
|
||||
return nil, errors.Errorf("Found Pkcs11Recipient with version %d but maximum supported version is 0.", recipient.Version)
|
||||
return nil, errors.Errorf("found Pkcs11Recipient with version %d but maximum supported version is 0", recipient.Version)
|
||||
}
|
||||
|
||||
ciphertext, err := base64.StdEncoding.DecodeString(recipient.Blob)
|
||||
|
6
vendor/github.com/containers/ocicrypt/encryption.go
generated
vendored
6
vendor/github.com/containers/ocicrypt/encryption.go
generated
vendored
@ -20,15 +20,15 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config"
|
||||
"github.com/containers/ocicrypt/keywrap/keyprovider"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/ocicrypt/blockcipher"
|
||||
"github.com/containers/ocicrypt/config"
|
||||
keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config"
|
||||
"github.com/containers/ocicrypt/keywrap"
|
||||
"github.com/containers/ocicrypt/keywrap/jwe"
|
||||
"github.com/containers/ocicrypt/keywrap/keyprovider"
|
||||
"github.com/containers/ocicrypt/keywrap/pgp"
|
||||
"github.com/containers/ocicrypt/keywrap/pkcs11"
|
||||
"github.com/containers/ocicrypt/keywrap/pkcs7"
|
||||
@ -243,7 +243,7 @@ func decryptLayerKeyOptsData(dc *config.DecryptConfig, desc ocispec.Descriptor)
|
||||
}
|
||||
}
|
||||
if !privKeyGiven {
|
||||
return nil, errors.New("missing private key needed for decryption")
|
||||
return nil, errors.Errorf("missing private key needed for decryption:\n%s", errs)
|
||||
}
|
||||
return nil, errors.Errorf("no suitable key unwrapper found or none of the private keys could be used for decryption:\n%s", errs)
|
||||
}
|
||||
|
17
vendor/github.com/containers/ocicrypt/gpg.go
generated
vendored
17
vendor/github.com/containers/ocicrypt/gpg.go
generated
vendored
@ -18,12 +18,13 @@ package ocicrypt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
@ -272,8 +273,8 @@ func runGPGGetOutput(cmd *exec.Cmd) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stdoutstr, err2 := ioutil.ReadAll(stdout)
|
||||
stderrstr, _ := ioutil.ReadAll(stderr)
|
||||
stdoutstr, err2 := io.ReadAll(stdout)
|
||||
stderrstr, _ := io.ReadAll(stderr)
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
return nil, fmt.Errorf("error from %s: %s", cmd.Path, string(stderrstr))
|
||||
@ -310,9 +311,15 @@ func resolveRecipients(gc GPGClient, recipients []string) []string {
|
||||
return result
|
||||
}
|
||||
|
||||
var emailPattern = regexp.MustCompile(`uid\s+\[.*\]\s.*\s<(?P<email>.+)>`)
|
||||
var (
|
||||
onceRegexp sync.Once
|
||||
emailPattern *regexp.Regexp
|
||||
)
|
||||
|
||||
func extractEmailFromDetails(details []byte) string {
|
||||
onceRegexp.Do(func() {
|
||||
emailPattern = regexp.MustCompile(`uid\s+\[.*\]\s.*\s<(?P<email>.+)>`)
|
||||
})
|
||||
loc := emailPattern.FindSubmatchIndex(details)
|
||||
if len(loc) == 0 {
|
||||
return ""
|
||||
@ -352,7 +359,7 @@ func GPGGetPrivateKey(descs []ocispec.Descriptor, gpgClient GPGClient, gpgVault
|
||||
}
|
||||
keywrapper := GetKeyWrapper(scheme)
|
||||
if keywrapper == nil {
|
||||
return nil, nil, errors.Errorf("could not get KeyWrapper for %s\n", scheme)
|
||||
return nil, nil, errors.Errorf("could not get KeyWrapper for %s", scheme)
|
||||
}
|
||||
keyIds, err := keywrapper.GetKeyIdsFromPacket(b64pgpPackets)
|
||||
if err != nil {
|
||||
|
4
vendor/github.com/containers/ocicrypt/gpgvault.go
generated
vendored
4
vendor/github.com/containers/ocicrypt/gpgvault.go
generated
vendored
@ -18,7 +18,7 @@ package ocicrypt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/crypto/openpgp"
|
||||
@ -76,7 +76,7 @@ func (g *gpgVault) AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte)
|
||||
// AddSecretKeyRingFiles adds the secret key rings given their filenames
|
||||
func (g *gpgVault) AddSecretKeyRingFiles(filenames []string) error {
|
||||
for _, filename := range filenames {
|
||||
gpgSecretKeyRingData, err := ioutil.ReadFile(filename)
|
||||
gpgSecretKeyRingData, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
16
vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go
generated
vendored
16
vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go
generated
vendored
@ -2,7 +2,6 @@ package helpers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -43,7 +42,7 @@ func processRecipientKeys(recipients []string) ([][]byte, [][]byte, [][]byte, []
|
||||
gpgRecipients = append(gpgRecipients, []byte(value))
|
||||
|
||||
case "jwe":
|
||||
tmp, err := ioutil.ReadFile(value)
|
||||
tmp, err := os.ReadFile(value)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, errors.Wrap(err, "Unable to read file")
|
||||
}
|
||||
@ -53,7 +52,7 @@ func processRecipientKeys(recipients []string) ([][]byte, [][]byte, [][]byte, []
|
||||
pubkeys = append(pubkeys, tmp)
|
||||
|
||||
case "pkcs7":
|
||||
tmp, err := ioutil.ReadFile(value)
|
||||
tmp, err := os.ReadFile(value)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, errors.Wrap(err, "Unable to read file")
|
||||
}
|
||||
@ -63,7 +62,7 @@ func processRecipientKeys(recipients []string) ([][]byte, [][]byte, [][]byte, []
|
||||
x509s = append(x509s, tmp)
|
||||
|
||||
case "pkcs11":
|
||||
tmp, err := ioutil.ReadFile(value)
|
||||
tmp, err := os.ReadFile(value)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, errors.Wrap(err, "Unable to read file")
|
||||
}
|
||||
@ -93,7 +92,7 @@ func processx509Certs(keys []string) ([][]byte, error) {
|
||||
if _, err := os.Stat(fileName); os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
tmp, err := ioutil.ReadFile(fileName)
|
||||
tmp, err := os.ReadFile(fileName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Unable to read file")
|
||||
}
|
||||
@ -113,7 +112,7 @@ func processx509Certs(keys []string) ([][]byte, error) {
|
||||
// - <password>
|
||||
func processPwdString(pwdString string) ([]byte, error) {
|
||||
if strings.HasPrefix(pwdString, "file=") {
|
||||
return ioutil.ReadFile(pwdString[5:])
|
||||
return os.ReadFile(pwdString[5:])
|
||||
} else if strings.HasPrefix(pwdString, "pass=") {
|
||||
return []byte(pwdString[5:]), nil
|
||||
} else if strings.HasPrefix(pwdString, "fd=") {
|
||||
@ -174,7 +173,7 @@ func processPrivateKeyFiles(keyFilesAndPwds []string) ([][]byte, [][]byte, [][]b
|
||||
}
|
||||
|
||||
keyfile := parts[0]
|
||||
tmp, err := ioutil.ReadFile(keyfile)
|
||||
tmp, err := os.ReadFile(keyfile)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, err
|
||||
}
|
||||
@ -374,7 +373,6 @@ func CreateCryptoConfig(recipients []string, keys []string) (encconfig.CryptoCon
|
||||
|
||||
if len(ccs) > 0 {
|
||||
return encconfig.CombineCryptoConfigs(ccs), nil
|
||||
} else {
|
||||
return encconfig.CryptoConfig{}, nil
|
||||
}
|
||||
return encconfig.CryptoConfig{}, nil
|
||||
}
|
||||
|
1
vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go
generated
vendored
1
vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go
generated
vendored
@ -19,6 +19,7 @@ package keyprovider
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/containers/ocicrypt/config"
|
||||
keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config"
|
||||
"github.com/containers/ocicrypt/keywrap"
|
||||
|
3
vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go
generated
vendored
3
vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go
generated
vendored
@ -23,7 +23,6 @@ import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/mail"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -126,7 +125,7 @@ func (kw *gpgKeyWrapper) UnwrapKey(dc *config.DecryptConfig, pgpPacket []byte) (
|
||||
continue
|
||||
}
|
||||
// we get the plain key options back
|
||||
optsData, err := ioutil.ReadAll(md.UnverifiedBody)
|
||||
optsData, err := io.ReadAll(md.UnverifiedBody)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
7
vendor/github.com/containers/ocicrypt/utils/ioutils.go
generated
vendored
7
vendor/github.com/containers/ocicrypt/utils/ioutils.go
generated
vendored
@ -18,9 +18,10 @@ package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"os/exec"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// FillBuffer fills the given buffer with as many bytes from the reader as possible. It returns
|
||||
@ -44,13 +45,15 @@ type Runner struct{}
|
||||
// ExecuteCommand is used to execute a linux command line command and return the output of the command with an error if it exists.
|
||||
func (r Runner) Exec(cmdName string, args []string, input []byte) ([]byte, error) {
|
||||
var out bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
stdInputBuffer := bytes.NewBuffer(input)
|
||||
cmd := exec.Command(cmdName, args...)
|
||||
cmd.Stdin = stdInputBuffer
|
||||
cmd.Stdout = &out
|
||||
cmd.Stderr = &stderr
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error while running command: %s", cmdName)
|
||||
return nil, errors.Wrapf(err, "Error while running command: %s. stderr: %s", cmdName, stderr.String())
|
||||
}
|
||||
return out.Bytes(), nil
|
||||
}
|
||||
|
141
vendor/github.com/rivo/uniseg/README.md
generated
vendored
141
vendor/github.com/rivo/uniseg/README.md
generated
vendored
@ -1,15 +1,15 @@
|
||||
# Unicode Text Segmentation for Go
|
||||
|
||||
[](https://godoc.org/github.com/rivo/uniseg)
|
||||
[](https://pkg.go.dev/github.com/rivo/uniseg)
|
||||
[](https://goreportcard.com/report/github.com/rivo/uniseg)
|
||||
|
||||
This Go package implements Unicode Text Segmentation according to [Unicode Standard Annex #29](http://unicode.org/reports/tr29/) (Unicode version 12.0.0).
|
||||
|
||||
At this point, only the determination of grapheme cluster boundaries is implemented.
|
||||
This Go package implements Unicode Text Segmentation according to [Unicode Standard Annex #29](https://unicode.org/reports/tr29/), Unicode Line Breaking according to [Unicode Standard Annex #14](https://unicode.org/reports/tr14/) (Unicode version 14.0.0), and monospace font string width calculation similar to [wcwidth](https://man7.org/linux/man-pages/man3/wcwidth.3.html).
|
||||
|
||||
## Background
|
||||
|
||||
In Go, [strings are read-only slices of bytes](https://blog.golang.org/strings). They can be turned into Unicode code points using the `for` loop or by casting: `[]rune(str)`. However, multiple code points may be combined into one user-perceived character or what the Unicode specification calls "grapheme cluster". Here are some examples:
|
||||
### Grapheme Clusters
|
||||
|
||||
In Go, [strings are read-only slices of bytes](https://go.dev/blog/strings). They can be turned into Unicode code points using the `for` loop or by casting: `[]rune(str)`. However, multiple code points may be combined into one user-perceived character or what the Unicode specification calls "grapheme cluster". Here are some examples:
|
||||
|
||||
|String|Bytes (UTF-8)|Code points (runes)|Grapheme clusters|
|
||||
|-|-|-|-|
|
||||
@ -17,7 +17,23 @@ In Go, [strings are read-only slices of bytes](https://blog.golang.org/strings).
|
||||
|🏳️🌈|14 bytes: `f0 9f 8f b3 ef b8 8f e2 80 8d f0 9f 8c 88`|4 code points: `1f3f3 fe0f 200d 1f308`|1 cluster: `[1f3f3 fe0f 200d 1f308]`|
|
||||
|🇩🇪|8 bytes: `f0 9f 87 a9 f0 9f 87 aa`|2 code points: `1f1e9 1f1ea`|1 cluster: `[1f1e9 1f1ea]`|
|
||||
|
||||
This package provides a tool to iterate over these grapheme clusters. This may be used to determine the number of user-perceived characters, to split strings in their intended places, or to extract individual characters which form a unit.
|
||||
This package provides tools to iterate over these grapheme clusters. This may be used to determine the number of user-perceived characters, to split strings in their intended places, or to extract individual characters which form a unit.
|
||||
|
||||
### Word Boundaries
|
||||
|
||||
Word boundaries are used in a number of different contexts. The most familiar ones are selection (double-click mouse selection), cursor movement ("move to next word" control-arrow keys), and the dialog option "Whole Word Search" for search and replace. They are also used in database queries, to determine whether elements are within a certain number of words of one another. Searching may also use word boundaries in determining matching items. This package provides tools to determine word boundaries within strings.
|
||||
|
||||
### Sentence Boundaries
|
||||
|
||||
Sentence boundaries are often used for triple-click or some other method of selecting or iterating through blocks of text that are larger than single words. They are also used to determine whether words occur within the same sentence in database queries. This package provides tools to determine sentence boundaries within strings.
|
||||
|
||||
### Line Breaking
|
||||
|
||||
Line breaking, also known as word wrapping, is the process of breaking a section of text into lines such that it will fit in the available width of a page, window or other display area. This package provides tools to determine where a string may or may not be broken and where it must be broken (for example after newline characters).
|
||||
|
||||
### Monospace Width
|
||||
|
||||
Most terminals or text displays / text editors using a monospace font (for example source code editors) use a fixed width for each character. Some characters such as emojis or characters found in Asian and other languages may take up more than one character cell. This package provides tools to determine the number of cells a string will take up when displayed in a monospace font. See [here](https://pkg.go.dev/github.com/rivo/uniseg#hdr-Monospace_Width) for more information.
|
||||
|
||||
## Installation
|
||||
|
||||
@ -25,38 +41,117 @@ This package provides a tool to iterate over these grapheme clusters. This may b
|
||||
go get github.com/rivo/uniseg
|
||||
```
|
||||
|
||||
## Basic Example
|
||||
## Examples
|
||||
|
||||
### Counting Characters in a String
|
||||
|
||||
```go
|
||||
package uniseg
|
||||
n := uniseg.GraphemeClusterCount("🇩🇪🏳️🌈")
|
||||
fmt.Println(n)
|
||||
// 2
|
||||
```
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
### Calculating the Monospace String Width
|
||||
|
||||
"github.com/rivo/uniseg"
|
||||
)
|
||||
```go
|
||||
width := uniseg.StringWidth("🇩🇪🏳️🌈!")
|
||||
fmt.Println(width)
|
||||
// 5
|
||||
```
|
||||
|
||||
func main() {
|
||||
gr := uniseg.NewGraphemes("👍🏼!")
|
||||
for gr.Next() {
|
||||
### Using the [`Graphemes`](https://pkg.go.dev/github.com/rivo/uniseg#Graphemes) Class
|
||||
|
||||
This is the most convenient method of iterating over grapheme clusters:
|
||||
|
||||
```go
|
||||
gr := uniseg.NewGraphemes("👍🏼!")
|
||||
for gr.Next() {
|
||||
fmt.Printf("%x ", gr.Runes())
|
||||
}
|
||||
// Output: [1f44d 1f3fc] [21]
|
||||
}
|
||||
// [1f44d 1f3fc] [21]
|
||||
```
|
||||
|
||||
### Using the [`Step`](https://pkg.go.dev/github.com/rivo/uniseg#Step) or [`StepString`](https://pkg.go.dev/github.com/rivo/uniseg#StepString) Function
|
||||
|
||||
This is orders of magnitude faster than the `Graphemes` class, but it requires the handling of states and boundaries:
|
||||
|
||||
```go
|
||||
str := "🇩🇪🏳️🌈"
|
||||
state := -1
|
||||
var c string
|
||||
for len(str) > 0 {
|
||||
c, str, _, state = uniseg.StepString(str, state)
|
||||
fmt.Printf("%x ", []rune(c))
|
||||
}
|
||||
// [1f1e9 1f1ea] [1f3f3 fe0f 200d 1f308]
|
||||
```
|
||||
|
||||
### Advanced Examples
|
||||
|
||||
Breaking into grapheme clusters and evaluating line breaks:
|
||||
|
||||
```go
|
||||
str := "First line.\nSecond line."
|
||||
state := -1
|
||||
var (
|
||||
c string
|
||||
boundaries int
|
||||
)
|
||||
for len(str) > 0 {
|
||||
c, str, boundaries, state = uniseg.StepString(str, state)
|
||||
fmt.Print(c)
|
||||
if boundaries&uniseg.MaskLine == uniseg.LineCanBreak {
|
||||
fmt.Print("|")
|
||||
} else if boundaries&uniseg.MaskLine == uniseg.LineMustBreak {
|
||||
fmt.Print("‖")
|
||||
}
|
||||
}
|
||||
// First |line.
|
||||
// ‖Second |line.‖
|
||||
```
|
||||
|
||||
If you're only interested in word segmentation, use [`FirstWord`](https://pkg.go.dev/github.com/rivo/uniseg#FirstWord) or [`FirstWordInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstWordInString):
|
||||
|
||||
```go
|
||||
str := "Hello, world!"
|
||||
state := -1
|
||||
var c string
|
||||
for len(str) > 0 {
|
||||
c, str, state = uniseg.FirstWordInString(str, state)
|
||||
fmt.Printf("(%s)\n", c)
|
||||
}
|
||||
// (Hello)
|
||||
// (,)
|
||||
// ( )
|
||||
// (world)
|
||||
// (!)
|
||||
```
|
||||
|
||||
Similarly, use
|
||||
|
||||
- [`FirstGraphemeCluster`](https://pkg.go.dev/github.com/rivo/uniseg#FirstGraphemeCluster) or [`FirstGraphemeClusterInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstGraphemeClusterInString) for grapheme cluster determination only,
|
||||
- [`FirstSentence`](https://pkg.go.dev/github.com/rivo/uniseg#FirstSentence) or [`FirstSentenceInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstSentenceInString) for sentence segmentation only, and
|
||||
- [`FirstLineSegment`](https://pkg.go.dev/github.com/rivo/uniseg#FirstLineSegment) or [`FirstLineSegmentInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstLineSegmentInString) for line breaking / word wrapping (although using [`Step`](https://pkg.go.dev/github.com/rivo/uniseg#Step) or [`StepString`](https://pkg.go.dev/github.com/rivo/uniseg#StepString) is preferred as it will observe grapheme cluster boundaries).
|
||||
|
||||
Finally, if you need to reverse a string while preserving grapheme clusters, use [`ReverseString`](https://pkg.go.dev/github.com/rivo/uniseg#ReverseString):
|
||||
|
||||
```go
|
||||
fmt.Println(uniseg.ReverseString("🇩🇪🏳️🌈"))
|
||||
// 🏳️🌈🇩🇪
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
Refer to https://godoc.org/github.com/rivo/uniseg for the package's documentation.
|
||||
Refer to https://pkg.go.dev/github.com/rivo/uniseg for the package's documentation.
|
||||
|
||||
## Dependencies
|
||||
|
||||
This package does not depend on any packages outside the standard library.
|
||||
|
||||
## Sponsor this Project
|
||||
|
||||
[Become a Sponsor on GitHub](https://github.com/sponsors/rivo?metadata_source=uniseg_readme) to support this project!
|
||||
|
||||
## Your Feedback
|
||||
|
||||
Add your issue here on GitHub. Feel free to get in touch if you have any questions.
|
||||
|
||||
## Version
|
||||
|
||||
Version tags will be introduced once Golang modules are official. Consider this version 0.1.
|
||||
Add your issue here on GitHub, preferably before submitting any PR's. Feel free to get in touch if you have any questions.
|
108
vendor/github.com/rivo/uniseg/doc.go
generated
vendored
108
vendor/github.com/rivo/uniseg/doc.go
generated
vendored
@ -1,8 +1,108 @@
|
||||
/*
|
||||
Package uniseg implements Unicode Text Segmentation according to Unicode
|
||||
Standard Annex #29 (http://unicode.org/reports/tr29/).
|
||||
Package uniseg implements Unicode Text Segmentation, Unicode Line Breaking, and
|
||||
string width calculation for monospace fonts. Unicode Text Segmentation conforms
|
||||
to Unicode Standard Annex #29 (https://unicode.org/reports/tr29/) and Unicode
|
||||
Line Breaking conforms to Unicode Standard Annex #14
|
||||
(https://unicode.org/reports/tr14/).
|
||||
|
||||
At this point, only the determination of grapheme cluster boundaries is
|
||||
implemented.
|
||||
In short, using this package, you can split a string into grapheme clusters
|
||||
(what people would usually refer to as a "character"), into words, and into
|
||||
sentences. Or, in its simplest case, this package allows you to count the number
|
||||
of characters in a string, especially when it contains complex characters such
|
||||
as emojis, combining characters, or characters from Asian, Arabic, Hebrew, or
|
||||
other languages. Additionally, you can use it to implement line breaking (or
|
||||
"word wrapping"), that is, to determine where text can be broken over to the
|
||||
next line when the width of the line is not big enough to fit the entire text.
|
||||
Finally, you can use it to calculate the display width of a string for monospace
|
||||
fonts.
|
||||
|
||||
# Getting Started
|
||||
|
||||
If you just want to count the number of characters in a string, you can use
|
||||
[GraphemeClusterCount]. If you want to determine the display width of a string,
|
||||
you can use [StringWidth]. If you want to iterate over a string, you can use
|
||||
[Step], [StepString], or the [Graphemes] class (more convenient but less
|
||||
performant). This will provide you with all information: grapheme clusters,
|
||||
word boundaries, sentence boundaries, line breaks, and monospace character
|
||||
widths. The specialized functions [FirstGraphemeCluster],
|
||||
[FirstGraphemeClusterInString], [FirstWord], [FirstWordInString],
|
||||
[FirstSentence], and [FirstSentenceInString] can be used if only one type of
|
||||
information is needed.
|
||||
|
||||
# Grapheme Clusters
|
||||
|
||||
Consider the rainbow flag emoji: 🏳️🌈. On most modern systems, it appears as one
|
||||
character. But its string representation actually has 14 bytes, so counting
|
||||
bytes (or using len("🏳️🌈")) will not work as expected. Counting runes won't,
|
||||
either: The flag has 4 Unicode code points, thus 4 runes. The stdlib function
|
||||
utf8.RuneCountInString("🏳️🌈") and len([]rune("🏳️🌈")) will both return 4.
|
||||
|
||||
The [GraphemeClusterCount] function will return 1 for the rainbow flag emoji.
|
||||
The Graphemes class and a variety of functions in this package will allow you to
|
||||
split strings into its grapheme clusters.
|
||||
|
||||
# Word Boundaries
|
||||
|
||||
Word boundaries are used in a number of different contexts. The most familiar
|
||||
ones are selection (double-click mouse selection), cursor movement ("move to
|
||||
next word" control-arrow keys), and the dialog option "Whole Word Search" for
|
||||
search and replace. This package provides methods for determining word
|
||||
boundaries.
|
||||
|
||||
# Sentence Boundaries
|
||||
|
||||
Sentence boundaries are often used for triple-click or some other method of
|
||||
selecting or iterating through blocks of text that are larger than single words.
|
||||
They are also used to determine whether words occur within the same sentence in
|
||||
database queries. This package provides methods for determining sentence
|
||||
boundaries.
|
||||
|
||||
# Line Breaking
|
||||
|
||||
Line breaking, also known as word wrapping, is the process of breaking a section
|
||||
of text into lines such that it will fit in the available width of a page,
|
||||
window or other display area. This package provides methods to determine the
|
||||
positions in a string where a line must be broken, may be broken, or must not be
|
||||
broken.
|
||||
|
||||
# Monospace Width
|
||||
|
||||
Monospace width, as referred to in this package, is the width of a string in a
|
||||
monospace font. This is commonly used in terminal user interfaces or text
|
||||
displays or editors that don't support proportional fonts. A width of 1
|
||||
corresponds to a single character cell. The C function [wcswidth()] and its
|
||||
implementation in other programming languages is in widespread use for the same
|
||||
purpose. However, there is no standard for the calculation of such widths, and
|
||||
this package differs from wcswidth() in a number of ways, presumably to generate
|
||||
more visually pleasing results.
|
||||
|
||||
To start, we assume that every code point has a width of 1, with the following
|
||||
exceptions:
|
||||
|
||||
- Code points with grapheme cluster break properties Control, CR, LF, Extend,
|
||||
and ZWJ have a width of 0.
|
||||
- U+2E3A, Two-Em Dash, has a width of 3.
|
||||
- U+2E3B, Three-Em Dash, has a width of 4.
|
||||
- Characters with the East-Asian Width properties "Fullwidth" (F) and "Wide"
|
||||
(W) have a width of 2. (Properties "Ambiguous" (A) and "Neutral" (N) both
|
||||
have a width of 1.)
|
||||
- Code points with grapheme cluster break property Regional Indicator have a
|
||||
width of 2.
|
||||
- Code points with grapheme cluster break property Extended Pictographic have
|
||||
a width of 2, unless their Emoji Presentation flag is "No", in which case
|
||||
the width is 1.
|
||||
|
||||
For Hangul grapheme clusters composed of conjoining Jamo and for Regional
|
||||
Indicators (flags), all code points except the first one have a width of 0. For
|
||||
grapheme clusters starting with an Extended Pictographic, any additional code
|
||||
point will force a total width of 2, except if the Variation Selector-15
|
||||
(U+FE0E) is included, in which case the total width is always 1. Grapheme
|
||||
clusters ending with Variation Selector-16 (U+FE0F) have a width of 2.
|
||||
|
||||
Note that whether these widths appear correct depends on your application's
|
||||
render engine, to which extent it conforms to the Unicode Standard, and its
|
||||
choice of font.
|
||||
|
||||
[wcswidth()]: https://man7.org/linux/man-pages/man3/wcswidth.3.html
|
||||
*/
|
||||
package uniseg
|
||||
|
2556
vendor/github.com/rivo/uniseg/eastasianwidth.go
generated
vendored
Normal file
2556
vendor/github.com/rivo/uniseg/eastasianwidth.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
285
vendor/github.com/rivo/uniseg/emojipresentation.go
generated
vendored
Normal file
285
vendor/github.com/rivo/uniseg/emojipresentation.go
generated
vendored
Normal file
@ -0,0 +1,285 @@
|
||||
package uniseg
|
||||
|
||||
// Code generated via go generate from gen_properties.go. DO NOT EDIT.
|
||||
|
||||
// emojiPresentation are taken from
|
||||
//
|
||||
// and
|
||||
// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt
|
||||
// ("Extended_Pictographic" only)
|
||||
// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode
|
||||
// license agreement.
|
||||
var emojiPresentation = [][3]int{
|
||||
{0x231A, 0x231B, prEmojiPresentation}, // E0.6 [2] (⌚..⌛) watch..hourglass done
|
||||
{0x23E9, 0x23EC, prEmojiPresentation}, // E0.6 [4] (⏩..⏬) fast-forward button..fast down button
|
||||
{0x23F0, 0x23F0, prEmojiPresentation}, // E0.6 [1] (⏰) alarm clock
|
||||
{0x23F3, 0x23F3, prEmojiPresentation}, // E0.6 [1] (⏳) hourglass not done
|
||||
{0x25FD, 0x25FE, prEmojiPresentation}, // E0.6 [2] (◽..◾) white medium-small square..black medium-small square
|
||||
{0x2614, 0x2615, prEmojiPresentation}, // E0.6 [2] (☔..☕) umbrella with rain drops..hot beverage
|
||||
{0x2648, 0x2653, prEmojiPresentation}, // E0.6 [12] (♈..♓) Aries..Pisces
|
||||
{0x267F, 0x267F, prEmojiPresentation}, // E0.6 [1] (♿) wheelchair symbol
|
||||
{0x2693, 0x2693, prEmojiPresentation}, // E0.6 [1] (⚓) anchor
|
||||
{0x26A1, 0x26A1, prEmojiPresentation}, // E0.6 [1] (⚡) high voltage
|
||||
{0x26AA, 0x26AB, prEmojiPresentation}, // E0.6 [2] (⚪..⚫) white circle..black circle
|
||||
{0x26BD, 0x26BE, prEmojiPresentation}, // E0.6 [2] (⚽..⚾) soccer ball..baseball
|
||||
{0x26C4, 0x26C5, prEmojiPresentation}, // E0.6 [2] (⛄..⛅) snowman without snow..sun behind cloud
|
||||
{0x26CE, 0x26CE, prEmojiPresentation}, // E0.6 [1] (⛎) Ophiuchus
|
||||
{0x26D4, 0x26D4, prEmojiPresentation}, // E0.6 [1] (⛔) no entry
|
||||
{0x26EA, 0x26EA, prEmojiPresentation}, // E0.6 [1] (⛪) church
|
||||
{0x26F2, 0x26F3, prEmojiPresentation}, // E0.6 [2] (⛲..⛳) fountain..flag in hole
|
||||
{0x26F5, 0x26F5, prEmojiPresentation}, // E0.6 [1] (⛵) sailboat
|
||||
{0x26FA, 0x26FA, prEmojiPresentation}, // E0.6 [1] (⛺) tent
|
||||
{0x26FD, 0x26FD, prEmojiPresentation}, // E0.6 [1] (⛽) fuel pump
|
||||
{0x2705, 0x2705, prEmojiPresentation}, // E0.6 [1] (✅) check mark button
|
||||
{0x270A, 0x270B, prEmojiPresentation}, // E0.6 [2] (✊..✋) raised fist..raised hand
|
||||
{0x2728, 0x2728, prEmojiPresentation}, // E0.6 [1] (✨) sparkles
|
||||
{0x274C, 0x274C, prEmojiPresentation}, // E0.6 [1] (❌) cross mark
|
||||
{0x274E, 0x274E, prEmojiPresentation}, // E0.6 [1] (❎) cross mark button
|
||||
{0x2753, 0x2755, prEmojiPresentation}, // E0.6 [3] (❓..❕) red question mark..white exclamation mark
|
||||
{0x2757, 0x2757, prEmojiPresentation}, // E0.6 [1] (❗) red exclamation mark
|
||||
{0x2795, 0x2797, prEmojiPresentation}, // E0.6 [3] (➕..➗) plus..divide
|
||||
{0x27B0, 0x27B0, prEmojiPresentation}, // E0.6 [1] (➰) curly loop
|
||||
{0x27BF, 0x27BF, prEmojiPresentation}, // E1.0 [1] (➿) double curly loop
|
||||
{0x2B1B, 0x2B1C, prEmojiPresentation}, // E0.6 [2] (⬛..⬜) black large square..white large square
|
||||
{0x2B50, 0x2B50, prEmojiPresentation}, // E0.6 [1] (⭐) star
|
||||
{0x2B55, 0x2B55, prEmojiPresentation}, // E0.6 [1] (⭕) hollow red circle
|
||||
{0x1F004, 0x1F004, prEmojiPresentation}, // E0.6 [1] (🀄) mahjong red dragon
|
||||
{0x1F0CF, 0x1F0CF, prEmojiPresentation}, // E0.6 [1] (🃏) joker
|
||||
{0x1F18E, 0x1F18E, prEmojiPresentation}, // E0.6 [1] (🆎) AB button (blood type)
|
||||
{0x1F191, 0x1F19A, prEmojiPresentation}, // E0.6 [10] (🆑..🆚) CL button..VS button
|
||||
{0x1F1E6, 0x1F1FF, prEmojiPresentation}, // E0.0 [26] (🇦..🇿) regional indicator symbol letter a..regional indicator symbol letter z
|
||||
{0x1F201, 0x1F201, prEmojiPresentation}, // E0.6 [1] (🈁) Japanese “here” button
|
||||
{0x1F21A, 0x1F21A, prEmojiPresentation}, // E0.6 [1] (🈚) Japanese “free of charge” button
|
||||
{0x1F22F, 0x1F22F, prEmojiPresentation}, // E0.6 [1] (🈯) Japanese “reserved” button
|
||||
{0x1F232, 0x1F236, prEmojiPresentation}, // E0.6 [5] (🈲..🈶) Japanese “prohibited” button..Japanese “not free of charge” button
|
||||
{0x1F238, 0x1F23A, prEmojiPresentation}, // E0.6 [3] (🈸..🈺) Japanese “application” button..Japanese “open for business” button
|
||||
{0x1F250, 0x1F251, prEmojiPresentation}, // E0.6 [2] (🉐..🉑) Japanese “bargain” button..Japanese “acceptable” button
|
||||
{0x1F300, 0x1F30C, prEmojiPresentation}, // E0.6 [13] (🌀..🌌) cyclone..milky way
|
||||
{0x1F30D, 0x1F30E, prEmojiPresentation}, // E0.7 [2] (🌍..🌎) globe showing Europe-Africa..globe showing Americas
|
||||
{0x1F30F, 0x1F30F, prEmojiPresentation}, // E0.6 [1] (🌏) globe showing Asia-Australia
|
||||
{0x1F310, 0x1F310, prEmojiPresentation}, // E1.0 [1] (🌐) globe with meridians
|
||||
{0x1F311, 0x1F311, prEmojiPresentation}, // E0.6 [1] (🌑) new moon
|
||||
{0x1F312, 0x1F312, prEmojiPresentation}, // E1.0 [1] (🌒) waxing crescent moon
|
||||
{0x1F313, 0x1F315, prEmojiPresentation}, // E0.6 [3] (🌓..🌕) first quarter moon..full moon
|
||||
{0x1F316, 0x1F318, prEmojiPresentation}, // E1.0 [3] (🌖..🌘) waning gibbous moon..waning crescent moon
|
||||
{0x1F319, 0x1F319, prEmojiPresentation}, // E0.6 [1] (🌙) crescent moon
|
||||
{0x1F31A, 0x1F31A, prEmojiPresentation}, // E1.0 [1] (🌚) new moon face
|
||||
{0x1F31B, 0x1F31B, prEmojiPresentation}, // E0.6 [1] (🌛) first quarter moon face
|
||||
{0x1F31C, 0x1F31C, prEmojiPresentation}, // E0.7 [1] (🌜) last quarter moon face
|
||||
{0x1F31D, 0x1F31E, prEmojiPresentation}, // E1.0 [2] (🌝..🌞) full moon face..sun with face
|
||||
{0x1F31F, 0x1F320, prEmojiPresentation}, // E0.6 [2] (🌟..🌠) glowing star..shooting star
|
||||
{0x1F32D, 0x1F32F, prEmojiPresentation}, // E1.0 [3] (🌭..🌯) hot dog..burrito
|
||||
{0x1F330, 0x1F331, prEmojiPresentation}, // E0.6 [2] (🌰..🌱) chestnut..seedling
|
||||
{0x1F332, 0x1F333, prEmojiPresentation}, // E1.0 [2] (🌲..🌳) evergreen tree..deciduous tree
|
||||
{0x1F334, 0x1F335, prEmojiPresentation}, // E0.6 [2] (🌴..🌵) palm tree..cactus
|
||||
{0x1F337, 0x1F34A, prEmojiPresentation}, // E0.6 [20] (🌷..🍊) tulip..tangerine
|
||||
{0x1F34B, 0x1F34B, prEmojiPresentation}, // E1.0 [1] (🍋) lemon
|
||||
{0x1F34C, 0x1F34F, prEmojiPresentation}, // E0.6 [4] (🍌..🍏) banana..green apple
|
||||
{0x1F350, 0x1F350, prEmojiPresentation}, // E1.0 [1] (🍐) pear
|
||||
{0x1F351, 0x1F37B, prEmojiPresentation}, // E0.6 [43] (🍑..🍻) peach..clinking beer mugs
|
||||
{0x1F37C, 0x1F37C, prEmojiPresentation}, // E1.0 [1] (🍼) baby bottle
|
||||
{0x1F37E, 0x1F37F, prEmojiPresentation}, // E1.0 [2] (🍾..🍿) bottle with popping cork..popcorn
|
||||
{0x1F380, 0x1F393, prEmojiPresentation}, // E0.6 [20] (🎀..🎓) ribbon..graduation cap
|
||||
{0x1F3A0, 0x1F3C4, prEmojiPresentation}, // E0.6 [37] (🎠..🏄) carousel horse..person surfing
|
||||
{0x1F3C5, 0x1F3C5, prEmojiPresentation}, // E1.0 [1] (🏅) sports medal
|
||||
{0x1F3C6, 0x1F3C6, prEmojiPresentation}, // E0.6 [1] (🏆) trophy
|
||||
{0x1F3C7, 0x1F3C7, prEmojiPresentation}, // E1.0 [1] (🏇) horse racing
|
||||
{0x1F3C8, 0x1F3C8, prEmojiPresentation}, // E0.6 [1] (🏈) american football
|
||||
{0x1F3C9, 0x1F3C9, prEmojiPresentation}, // E1.0 [1] (🏉) rugby football
|
||||
{0x1F3CA, 0x1F3CA, prEmojiPresentation}, // E0.6 [1] (🏊) person swimming
|
||||
{0x1F3CF, 0x1F3D3, prEmojiPresentation}, // E1.0 [5] (🏏..🏓) cricket game..ping pong
|
||||
{0x1F3E0, 0x1F3E3, prEmojiPresentation}, // E0.6 [4] (🏠..🏣) house..Japanese post office
|
||||
{0x1F3E4, 0x1F3E4, prEmojiPresentation}, // E1.0 [1] (🏤) post office
|
||||
{0x1F3E5, 0x1F3F0, prEmojiPresentation}, // E0.6 [12] (🏥..🏰) hospital..castle
|
||||
{0x1F3F4, 0x1F3F4, prEmojiPresentation}, // E1.0 [1] (🏴) black flag
|
||||
{0x1F3F8, 0x1F407, prEmojiPresentation}, // E1.0 [16] (🏸..🐇) badminton..rabbit
|
||||
{0x1F408, 0x1F408, prEmojiPresentation}, // E0.7 [1] (🐈) cat
|
||||
{0x1F409, 0x1F40B, prEmojiPresentation}, // E1.0 [3] (🐉..🐋) dragon..whale
|
||||
{0x1F40C, 0x1F40E, prEmojiPresentation}, // E0.6 [3] (🐌..🐎) snail..horse
|
||||
{0x1F40F, 0x1F410, prEmojiPresentation}, // E1.0 [2] (🐏..🐐) ram..goat
|
||||
{0x1F411, 0x1F412, prEmojiPresentation}, // E0.6 [2] (🐑..🐒) ewe..monkey
|
||||
{0x1F413, 0x1F413, prEmojiPresentation}, // E1.0 [1] (🐓) rooster
|
||||
{0x1F414, 0x1F414, prEmojiPresentation}, // E0.6 [1] (🐔) chicken
|
||||
{0x1F415, 0x1F415, prEmojiPresentation}, // E0.7 [1] (🐕) dog
|
||||
{0x1F416, 0x1F416, prEmojiPresentation}, // E1.0 [1] (🐖) pig
|
||||
{0x1F417, 0x1F429, prEmojiPresentation}, // E0.6 [19] (🐗..🐩) boar..poodle
|
||||
{0x1F42A, 0x1F42A, prEmojiPresentation}, // E1.0 [1] (🐪) camel
|
||||
{0x1F42B, 0x1F43E, prEmojiPresentation}, // E0.6 [20] (🐫..🐾) two-hump camel..paw prints
|
||||
{0x1F440, 0x1F440, prEmojiPresentation}, // E0.6 [1] (👀) eyes
|
||||
{0x1F442, 0x1F464, prEmojiPresentation}, // E0.6 [35] (👂..👤) ear..bust in silhouette
|
||||
{0x1F465, 0x1F465, prEmojiPresentation}, // E1.0 [1] (👥) busts in silhouette
|
||||
{0x1F466, 0x1F46B, prEmojiPresentation}, // E0.6 [6] (👦..👫) boy..woman and man holding hands
|
||||
{0x1F46C, 0x1F46D, prEmojiPresentation}, // E1.0 [2] (👬..👭) men holding hands..women holding hands
|
||||
{0x1F46E, 0x1F4AC, prEmojiPresentation}, // E0.6 [63] (👮..💬) police officer..speech balloon
|
||||
{0x1F4AD, 0x1F4AD, prEmojiPresentation}, // E1.0 [1] (💭) thought balloon
|
||||
{0x1F4AE, 0x1F4B5, prEmojiPresentation}, // E0.6 [8] (💮..💵) white flower..dollar banknote
|
||||
{0x1F4B6, 0x1F4B7, prEmojiPresentation}, // E1.0 [2] (💶..💷) euro banknote..pound banknote
|
||||
{0x1F4B8, 0x1F4EB, prEmojiPresentation}, // E0.6 [52] (💸..📫) money with wings..closed mailbox with raised flag
|
||||
{0x1F4EC, 0x1F4ED, prEmojiPresentation}, // E0.7 [2] (📬..📭) open mailbox with raised flag..open mailbox with lowered flag
|
||||
{0x1F4EE, 0x1F4EE, prEmojiPresentation}, // E0.6 [1] (📮) postbox
|
||||
{0x1F4EF, 0x1F4EF, prEmojiPresentation}, // E1.0 [1] (📯) postal horn
|
||||
{0x1F4F0, 0x1F4F4, prEmojiPresentation}, // E0.6 [5] (📰..📴) newspaper..mobile phone off
|
||||
{0x1F4F5, 0x1F4F5, prEmojiPresentation}, // E1.0 [1] (📵) no mobile phones
|
||||
{0x1F4F6, 0x1F4F7, prEmojiPresentation}, // E0.6 [2] (📶..📷) antenna bars..camera
|
||||
{0x1F4F8, 0x1F4F8, prEmojiPresentation}, // E1.0 [1] (📸) camera with flash
|
||||
{0x1F4F9, 0x1F4FC, prEmojiPresentation}, // E0.6 [4] (📹..📼) video camera..videocassette
|
||||
{0x1F4FF, 0x1F502, prEmojiPresentation}, // E1.0 [4] (📿..🔂) prayer beads..repeat single button
|
||||
{0x1F503, 0x1F503, prEmojiPresentation}, // E0.6 [1] (🔃) clockwise vertical arrows
|
||||
{0x1F504, 0x1F507, prEmojiPresentation}, // E1.0 [4] (🔄..🔇) counterclockwise arrows button..muted speaker
|
||||
{0x1F508, 0x1F508, prEmojiPresentation}, // E0.7 [1] (🔈) speaker low volume
|
||||
{0x1F509, 0x1F509, prEmojiPresentation}, // E1.0 [1] (🔉) speaker medium volume
|
||||
{0x1F50A, 0x1F514, prEmojiPresentation}, // E0.6 [11] (🔊..🔔) speaker high volume..bell
|
||||
{0x1F515, 0x1F515, prEmojiPresentation}, // E1.0 [1] (🔕) bell with slash
|
||||
{0x1F516, 0x1F52B, prEmojiPresentation}, // E0.6 [22] (🔖..🔫) bookmark..water pistol
|
||||
{0x1F52C, 0x1F52D, prEmojiPresentation}, // E1.0 [2] (🔬..🔭) microscope..telescope
|
||||
{0x1F52E, 0x1F53D, prEmojiPresentation}, // E0.6 [16] (🔮..🔽) crystal ball..downwards button
|
||||
{0x1F54B, 0x1F54E, prEmojiPresentation}, // E1.0 [4] (🕋..🕎) kaaba..menorah
|
||||
{0x1F550, 0x1F55B, prEmojiPresentation}, // E0.6 [12] (🕐..🕛) one o’clock..twelve o’clock
|
||||
{0x1F55C, 0x1F567, prEmojiPresentation}, // E0.7 [12] (🕜..🕧) one-thirty..twelve-thirty
|
||||
{0x1F57A, 0x1F57A, prEmojiPresentation}, // E3.0 [1] (🕺) man dancing
|
||||
{0x1F595, 0x1F596, prEmojiPresentation}, // E1.0 [2] (🖕..🖖) middle finger..vulcan salute
|
||||
{0x1F5A4, 0x1F5A4, prEmojiPresentation}, // E3.0 [1] (🖤) black heart
|
||||
{0x1F5FB, 0x1F5FF, prEmojiPresentation}, // E0.6 [5] (🗻..🗿) mount fuji..moai
|
||||
{0x1F600, 0x1F600, prEmojiPresentation}, // E1.0 [1] (😀) grinning face
|
||||
{0x1F601, 0x1F606, prEmojiPresentation}, // E0.6 [6] (😁..😆) beaming face with smiling eyes..grinning squinting face
|
||||
{0x1F607, 0x1F608, prEmojiPresentation}, // E1.0 [2] (😇..😈) smiling face with halo..smiling face with horns
|
||||
{0x1F609, 0x1F60D, prEmojiPresentation}, // E0.6 [5] (😉..😍) winking face..smiling face with heart-eyes
|
||||
{0x1F60E, 0x1F60E, prEmojiPresentation}, // E1.0 [1] (😎) smiling face with sunglasses
|
||||
{0x1F60F, 0x1F60F, prEmojiPresentation}, // E0.6 [1] (😏) smirking face
|
||||
{0x1F610, 0x1F610, prEmojiPresentation}, // E0.7 [1] (😐) neutral face
|
||||
{0x1F611, 0x1F611, prEmojiPresentation}, // E1.0 [1] (😑) expressionless face
|
||||
{0x1F612, 0x1F614, prEmojiPresentation}, // E0.6 [3] (😒..😔) unamused face..pensive face
|
||||
{0x1F615, 0x1F615, prEmojiPresentation}, // E1.0 [1] (😕) confused face
|
||||
{0x1F616, 0x1F616, prEmojiPresentation}, // E0.6 [1] (😖) confounded face
|
||||
{0x1F617, 0x1F617, prEmojiPresentation}, // E1.0 [1] (😗) kissing face
|
||||
{0x1F618, 0x1F618, prEmojiPresentation}, // E0.6 [1] (😘) face blowing a kiss
|
||||
{0x1F619, 0x1F619, prEmojiPresentation}, // E1.0 [1] (😙) kissing face with smiling eyes
|
||||
{0x1F61A, 0x1F61A, prEmojiPresentation}, // E0.6 [1] (😚) kissing face with closed eyes
|
||||
{0x1F61B, 0x1F61B, prEmojiPresentation}, // E1.0 [1] (😛) face with tongue
|
||||
{0x1F61C, 0x1F61E, prEmojiPresentation}, // E0.6 [3] (😜..😞) winking face with tongue..disappointed face
|
||||
{0x1F61F, 0x1F61F, prEmojiPresentation}, // E1.0 [1] (😟) worried face
|
||||
{0x1F620, 0x1F625, prEmojiPresentation}, // E0.6 [6] (😠..😥) angry face..sad but relieved face
|
||||
{0x1F626, 0x1F627, prEmojiPresentation}, // E1.0 [2] (😦..😧) frowning face with open mouth..anguished face
|
||||
{0x1F628, 0x1F62B, prEmojiPresentation}, // E0.6 [4] (😨..😫) fearful face..tired face
|
||||
{0x1F62C, 0x1F62C, prEmojiPresentation}, // E1.0 [1] (😬) grimacing face
|
||||
{0x1F62D, 0x1F62D, prEmojiPresentation}, // E0.6 [1] (😭) loudly crying face
|
||||
{0x1F62E, 0x1F62F, prEmojiPresentation}, // E1.0 [2] (😮..😯) face with open mouth..hushed face
|
||||
{0x1F630, 0x1F633, prEmojiPresentation}, // E0.6 [4] (😰..😳) anxious face with sweat..flushed face
|
||||
{0x1F634, 0x1F634, prEmojiPresentation}, // E1.0 [1] (😴) sleeping face
|
||||
{0x1F635, 0x1F635, prEmojiPresentation}, // E0.6 [1] (😵) face with crossed-out eyes
|
||||
{0x1F636, 0x1F636, prEmojiPresentation}, // E1.0 [1] (😶) face without mouth
|
||||
{0x1F637, 0x1F640, prEmojiPresentation}, // E0.6 [10] (😷..🙀) face with medical mask..weary cat
|
||||
{0x1F641, 0x1F644, prEmojiPresentation}, // E1.0 [4] (🙁..🙄) slightly frowning face..face with rolling eyes
|
||||
{0x1F645, 0x1F64F, prEmojiPresentation}, // E0.6 [11] (🙅..🙏) person gesturing NO..folded hands
|
||||
{0x1F680, 0x1F680, prEmojiPresentation}, // E0.6 [1] (🚀) rocket
|
||||
{0x1F681, 0x1F682, prEmojiPresentation}, // E1.0 [2] (🚁..🚂) helicopter..locomotive
|
||||
{0x1F683, 0x1F685, prEmojiPresentation}, // E0.6 [3] (🚃..🚅) railway car..bullet train
|
||||
{0x1F686, 0x1F686, prEmojiPresentation}, // E1.0 [1] (🚆) train
|
||||
{0x1F687, 0x1F687, prEmojiPresentation}, // E0.6 [1] (🚇) metro
|
||||
{0x1F688, 0x1F688, prEmojiPresentation}, // E1.0 [1] (🚈) light rail
|
||||
{0x1F689, 0x1F689, prEmojiPresentation}, // E0.6 [1] (🚉) station
|
||||
{0x1F68A, 0x1F68B, prEmojiPresentation}, // E1.0 [2] (🚊..🚋) tram..tram car
|
||||
{0x1F68C, 0x1F68C, prEmojiPresentation}, // E0.6 [1] (🚌) bus
|
||||
{0x1F68D, 0x1F68D, prEmojiPresentation}, // E0.7 [1] (🚍) oncoming bus
|
||||
{0x1F68E, 0x1F68E, prEmojiPresentation}, // E1.0 [1] (🚎) trolleybus
|
||||
{0x1F68F, 0x1F68F, prEmojiPresentation}, // E0.6 [1] (🚏) bus stop
|
||||
{0x1F690, 0x1F690, prEmojiPresentation}, // E1.0 [1] (🚐) minibus
|
||||
{0x1F691, 0x1F693, prEmojiPresentation}, // E0.6 [3] (🚑..🚓) ambulance..police car
|
||||
{0x1F694, 0x1F694, prEmojiPresentation}, // E0.7 [1] (🚔) oncoming police car
|
||||
{0x1F695, 0x1F695, prEmojiPresentation}, // E0.6 [1] (🚕) taxi
|
||||
{0x1F696, 0x1F696, prEmojiPresentation}, // E1.0 [1] (🚖) oncoming taxi
|
||||
{0x1F697, 0x1F697, prEmojiPresentation}, // E0.6 [1] (🚗) automobile
|
||||
{0x1F698, 0x1F698, prEmojiPresentation}, // E0.7 [1] (🚘) oncoming automobile
|
||||
{0x1F699, 0x1F69A, prEmojiPresentation}, // E0.6 [2] (🚙..🚚) sport utility vehicle..delivery truck
|
||||
{0x1F69B, 0x1F6A1, prEmojiPresentation}, // E1.0 [7] (🚛..🚡) articulated lorry..aerial tramway
|
||||
{0x1F6A2, 0x1F6A2, prEmojiPresentation}, // E0.6 [1] (🚢) ship
|
||||
{0x1F6A3, 0x1F6A3, prEmojiPresentation}, // E1.0 [1] (🚣) person rowing boat
|
||||
{0x1F6A4, 0x1F6A5, prEmojiPresentation}, // E0.6 [2] (🚤..🚥) speedboat..horizontal traffic light
|
||||
{0x1F6A6, 0x1F6A6, prEmojiPresentation}, // E1.0 [1] (🚦) vertical traffic light
|
||||
{0x1F6A7, 0x1F6AD, prEmojiPresentation}, // E0.6 [7] (🚧..🚭) construction..no smoking
|
||||
{0x1F6AE, 0x1F6B1, prEmojiPresentation}, // E1.0 [4] (🚮..🚱) litter in bin sign..non-potable water
|
||||
{0x1F6B2, 0x1F6B2, prEmojiPresentation}, // E0.6 [1] (🚲) bicycle
|
||||
{0x1F6B3, 0x1F6B5, prEmojiPresentation}, // E1.0 [3] (🚳..🚵) no bicycles..person mountain biking
|
||||
{0x1F6B6, 0x1F6B6, prEmojiPresentation}, // E0.6 [1] (🚶) person walking
|
||||
{0x1F6B7, 0x1F6B8, prEmojiPresentation}, // E1.0 [2] (🚷..🚸) no pedestrians..children crossing
|
||||
{0x1F6B9, 0x1F6BE, prEmojiPresentation}, // E0.6 [6] (🚹..🚾) men’s room..water closet
|
||||
{0x1F6BF, 0x1F6BF, prEmojiPresentation}, // E1.0 [1] (🚿) shower
|
||||
{0x1F6C0, 0x1F6C0, prEmojiPresentation}, // E0.6 [1] (🛀) person taking bath
|
||||
{0x1F6C1, 0x1F6C5, prEmojiPresentation}, // E1.0 [5] (🛁..🛅) bathtub..left luggage
|
||||
{0x1F6CC, 0x1F6CC, prEmojiPresentation}, // E1.0 [1] (🛌) person in bed
|
||||
{0x1F6D0, 0x1F6D0, prEmojiPresentation}, // E1.0 [1] (🛐) place of worship
|
||||
{0x1F6D1, 0x1F6D2, prEmojiPresentation}, // E3.0 [2] (🛑..🛒) stop sign..shopping cart
|
||||
{0x1F6D5, 0x1F6D5, prEmojiPresentation}, // E12.0 [1] (🛕) hindu temple
|
||||
{0x1F6D6, 0x1F6D7, prEmojiPresentation}, // E13.0 [2] (🛖..🛗) hut..elevator
|
||||
{0x1F6DD, 0x1F6DF, prEmojiPresentation}, // E14.0 [3] (🛝..🛟) playground slide..ring buoy
|
||||
{0x1F6EB, 0x1F6EC, prEmojiPresentation}, // E1.0 [2] (🛫..🛬) airplane departure..airplane arrival
|
||||
{0x1F6F4, 0x1F6F6, prEmojiPresentation}, // E3.0 [3] (🛴..🛶) kick scooter..canoe
|
||||
{0x1F6F7, 0x1F6F8, prEmojiPresentation}, // E5.0 [2] (🛷..🛸) sled..flying saucer
|
||||
{0x1F6F9, 0x1F6F9, prEmojiPresentation}, // E11.0 [1] (🛹) skateboard
|
||||
{0x1F6FA, 0x1F6FA, prEmojiPresentation}, // E12.0 [1] (🛺) auto rickshaw
|
||||
{0x1F6FB, 0x1F6FC, prEmojiPresentation}, // E13.0 [2] (🛻..🛼) pickup truck..roller skate
|
||||
{0x1F7E0, 0x1F7EB, prEmojiPresentation}, // E12.0 [12] (🟠..🟫) orange circle..brown square
|
||||
{0x1F7F0, 0x1F7F0, prEmojiPresentation}, // E14.0 [1] (🟰) heavy equals sign
|
||||
{0x1F90C, 0x1F90C, prEmojiPresentation}, // E13.0 [1] (🤌) pinched fingers
|
||||
{0x1F90D, 0x1F90F, prEmojiPresentation}, // E12.0 [3] (🤍..🤏) white heart..pinching hand
|
||||
{0x1F910, 0x1F918, prEmojiPresentation}, // E1.0 [9] (🤐..🤘) zipper-mouth face..sign of the horns
|
||||
{0x1F919, 0x1F91E, prEmojiPresentation}, // E3.0 [6] (🤙..🤞) call me hand..crossed fingers
|
||||
{0x1F91F, 0x1F91F, prEmojiPresentation}, // E5.0 [1] (🤟) love-you gesture
|
||||
{0x1F920, 0x1F927, prEmojiPresentation}, // E3.0 [8] (🤠..🤧) cowboy hat face..sneezing face
|
||||
{0x1F928, 0x1F92F, prEmojiPresentation}, // E5.0 [8] (🤨..🤯) face with raised eyebrow..exploding head
|
||||
{0x1F930, 0x1F930, prEmojiPresentation}, // E3.0 [1] (🤰) pregnant woman
|
||||
{0x1F931, 0x1F932, prEmojiPresentation}, // E5.0 [2] (🤱..🤲) breast-feeding..palms up together
|
||||
{0x1F933, 0x1F93A, prEmojiPresentation}, // E3.0 [8] (🤳..🤺) selfie..person fencing
|
||||
{0x1F93C, 0x1F93E, prEmojiPresentation}, // E3.0 [3] (🤼..🤾) people wrestling..person playing handball
|
||||
{0x1F93F, 0x1F93F, prEmojiPresentation}, // E12.0 [1] (🤿) diving mask
|
||||
{0x1F940, 0x1F945, prEmojiPresentation}, // E3.0 [6] (🥀..🥅) wilted flower..goal net
|
||||
{0x1F947, 0x1F94B, prEmojiPresentation}, // E3.0 [5] (🥇..🥋) 1st place medal..martial arts uniform
|
||||
{0x1F94C, 0x1F94C, prEmojiPresentation}, // E5.0 [1] (🥌) curling stone
|
||||
{0x1F94D, 0x1F94F, prEmojiPresentation}, // E11.0 [3] (🥍..🥏) lacrosse..flying disc
|
||||
{0x1F950, 0x1F95E, prEmojiPresentation}, // E3.0 [15] (🥐..🥞) croissant..pancakes
|
||||
{0x1F95F, 0x1F96B, prEmojiPresentation}, // E5.0 [13] (🥟..🥫) dumpling..canned food
|
||||
{0x1F96C, 0x1F970, prEmojiPresentation}, // E11.0 [5] (🥬..🥰) leafy green..smiling face with hearts
|
||||
{0x1F971, 0x1F971, prEmojiPresentation}, // E12.0 [1] (🥱) yawning face
|
||||
{0x1F972, 0x1F972, prEmojiPresentation}, // E13.0 [1] (🥲) smiling face with tear
|
||||
{0x1F973, 0x1F976, prEmojiPresentation}, // E11.0 [4] (🥳..🥶) partying face..cold face
|
||||
{0x1F977, 0x1F978, prEmojiPresentation}, // E13.0 [2] (🥷..🥸) ninja..disguised face
|
||||
{0x1F979, 0x1F979, prEmojiPresentation}, // E14.0 [1] (🥹) face holding back tears
|
||||
{0x1F97A, 0x1F97A, prEmojiPresentation}, // E11.0 [1] (🥺) pleading face
|
||||
{0x1F97B, 0x1F97B, prEmojiPresentation}, // E12.0 [1] (🥻) sari
|
||||
{0x1F97C, 0x1F97F, prEmojiPresentation}, // E11.0 [4] (🥼..🥿) lab coat..flat shoe
|
||||
{0x1F980, 0x1F984, prEmojiPresentation}, // E1.0 [5] (🦀..🦄) crab..unicorn
|
||||
{0x1F985, 0x1F991, prEmojiPresentation}, // E3.0 [13] (🦅..🦑) eagle..squid
|
||||
{0x1F992, 0x1F997, prEmojiPresentation}, // E5.0 [6] (🦒..🦗) giraffe..cricket
|
||||
{0x1F998, 0x1F9A2, prEmojiPresentation}, // E11.0 [11] (🦘..🦢) kangaroo..swan
|
||||
{0x1F9A3, 0x1F9A4, prEmojiPresentation}, // E13.0 [2] (🦣..🦤) mammoth..dodo
|
||||
{0x1F9A5, 0x1F9AA, prEmojiPresentation}, // E12.0 [6] (🦥..🦪) sloth..oyster
|
||||
{0x1F9AB, 0x1F9AD, prEmojiPresentation}, // E13.0 [3] (🦫..🦭) beaver..seal
|
||||
{0x1F9AE, 0x1F9AF, prEmojiPresentation}, // E12.0 [2] (🦮..🦯) guide dog..white cane
|
||||
{0x1F9B0, 0x1F9B9, prEmojiPresentation}, // E11.0 [10] (🦰..🦹) red hair..supervillain
|
||||
{0x1F9BA, 0x1F9BF, prEmojiPresentation}, // E12.0 [6] (🦺..🦿) safety vest..mechanical leg
|
||||
{0x1F9C0, 0x1F9C0, prEmojiPresentation}, // E1.0 [1] (🧀) cheese wedge
|
||||
{0x1F9C1, 0x1F9C2, prEmojiPresentation}, // E11.0 [2] (🧁..🧂) cupcake..salt
|
||||
{0x1F9C3, 0x1F9CA, prEmojiPresentation}, // E12.0 [8] (🧃..🧊) beverage box..ice
|
||||
{0x1F9CB, 0x1F9CB, prEmojiPresentation}, // E13.0 [1] (🧋) bubble tea
|
||||
{0x1F9CC, 0x1F9CC, prEmojiPresentation}, // E14.0 [1] (🧌) troll
|
||||
{0x1F9CD, 0x1F9CF, prEmojiPresentation}, // E12.0 [3] (🧍..🧏) person standing..deaf person
|
||||
{0x1F9D0, 0x1F9E6, prEmojiPresentation}, // E5.0 [23] (🧐..🧦) face with monocle..socks
|
||||
{0x1F9E7, 0x1F9FF, prEmojiPresentation}, // E11.0 [25] (🧧..🧿) red envelope..nazar amulet
|
||||
{0x1FA70, 0x1FA73, prEmojiPresentation}, // E12.0 [4] (🩰..🩳) ballet shoes..shorts
|
||||
{0x1FA74, 0x1FA74, prEmojiPresentation}, // E13.0 [1] (🩴) thong sandal
|
||||
{0x1FA78, 0x1FA7A, prEmojiPresentation}, // E12.0 [3] (🩸..🩺) drop of blood..stethoscope
|
||||
{0x1FA7B, 0x1FA7C, prEmojiPresentation}, // E14.0 [2] (🩻..🩼) x-ray..crutch
|
||||
{0x1FA80, 0x1FA82, prEmojiPresentation}, // E12.0 [3] (🪀..🪂) yo-yo..parachute
|
||||
{0x1FA83, 0x1FA86, prEmojiPresentation}, // E13.0 [4] (🪃..🪆) boomerang..nesting dolls
|
||||
{0x1FA90, 0x1FA95, prEmojiPresentation}, // E12.0 [6] (🪐..🪕) ringed planet..banjo
|
||||
{0x1FA96, 0x1FAA8, prEmojiPresentation}, // E13.0 [19] (🪖..🪨) military helmet..rock
|
||||
{0x1FAA9, 0x1FAAC, prEmojiPresentation}, // E14.0 [4] (🪩..🪬) mirror ball..hamsa
|
||||
{0x1FAB0, 0x1FAB6, prEmojiPresentation}, // E13.0 [7] (🪰..🪶) fly..feather
|
||||
{0x1FAB7, 0x1FABA, prEmojiPresentation}, // E14.0 [4] (🪷..🪺) lotus..nest with eggs
|
||||
{0x1FAC0, 0x1FAC2, prEmojiPresentation}, // E13.0 [3] (🫀..🫂) anatomical heart..people hugging
|
||||
{0x1FAC3, 0x1FAC5, prEmojiPresentation}, // E14.0 [3] (🫃..🫅) pregnant man..person with crown
|
||||
{0x1FAD0, 0x1FAD6, prEmojiPresentation}, // E13.0 [7] (🫐..🫖) blueberries..teapot
|
||||
{0x1FAD7, 0x1FAD9, prEmojiPresentation}, // E14.0 [3] (🫗..🫙) pouring liquid..jar
|
||||
{0x1FAE0, 0x1FAE7, prEmojiPresentation}, // E14.0 [8] (🫠..🫧) melting face..bubbles
|
||||
{0x1FAF0, 0x1FAF6, prEmojiPresentation}, // E14.0 [7] (🫰..🫶) hand with index finger and thumb crossed..heart hands
|
||||
}
|
213
vendor/github.com/rivo/uniseg/gen_breaktest.go
generated
vendored
Normal file
213
vendor/github.com/rivo/uniseg/gen_breaktest.go
generated
vendored
Normal file
@ -0,0 +1,213 @@
|
||||
//go:build generate
|
||||
|
||||
// This program generates a Go containing a slice of test cases based on the
|
||||
// Unicode Character Database auxiliary data files. The command line arguments
|
||||
// are as follows:
|
||||
//
|
||||
// 1. The name of the Unicode data file (just the filename, without extension).
|
||||
// 2. The name of the locally generated Go file.
|
||||
// 3. The name of the slice containing the test cases.
|
||||
// 4. The name of the generator, for logging purposes.
|
||||
//
|
||||
//go:generate go run gen_breaktest.go GraphemeBreakTest graphemebreak_test.go graphemeBreakTestCases graphemes
|
||||
//go:generate go run gen_breaktest.go WordBreakTest wordbreak_test.go wordBreakTestCases words
|
||||
//go:generate go run gen_breaktest.go SentenceBreakTest sentencebreak_test.go sentenceBreakTestCases sentences
|
||||
//go:generate go run gen_breaktest.go LineBreakTest linebreak_test.go lineBreakTestCases lines
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// We want to test against a specific version rather than the latest. When the
|
||||
// package is upgraded to a new version, change these to generate new tests.
|
||||
const (
|
||||
testCaseURL = `https://www.unicode.org/Public/14.0.0/ucd/auxiliary/%s.txt`
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 5 {
|
||||
fmt.Println("Not enough arguments, see code for details")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
log.SetPrefix("gen_breaktest (" + os.Args[4] + "): ")
|
||||
log.SetFlags(0)
|
||||
|
||||
// Read text of testcases and parse into Go source code.
|
||||
src, err := parse(fmt.Sprintf(testCaseURL, os.Args[1]))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Format the Go code.
|
||||
formatted, err := format.Source(src)
|
||||
if err != nil {
|
||||
log.Fatalln("gofmt:", err)
|
||||
}
|
||||
|
||||
// Write it out.
|
||||
log.Print("Writing to ", os.Args[2])
|
||||
if err := ioutil.WriteFile(os.Args[2], formatted, 0644); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// parse reads a break text file, either from a local file or from a URL. It
|
||||
// parses the file data into Go source code representing the test cases.
|
||||
func parse(url string) ([]byte, error) {
|
||||
log.Printf("Parsing %s", url)
|
||||
res, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body := res.Body
|
||||
defer body.Close()
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
buf.Grow(120 << 10)
|
||||
buf.WriteString(`package uniseg
|
||||
|
||||
// Code generated via go generate from gen_breaktest.go. DO NOT EDIT.
|
||||
|
||||
// ` + os.Args[3] + ` are Grapheme testcases taken from
|
||||
// ` + url + `
|
||||
// on ` + time.Now().Format("January 2, 2006") + `. See
|
||||
// https://www.unicode.org/license.html for the Unicode license agreement.
|
||||
var ` + os.Args[3] + ` = []testCase {
|
||||
`)
|
||||
|
||||
sc := bufio.NewScanner(body)
|
||||
num := 1
|
||||
var line []byte
|
||||
original := make([]byte, 0, 64)
|
||||
expected := make([]byte, 0, 64)
|
||||
for sc.Scan() {
|
||||
num++
|
||||
line = sc.Bytes()
|
||||
if len(line) == 0 || line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
var comment []byte
|
||||
if i := bytes.IndexByte(line, '#'); i >= 0 {
|
||||
comment = bytes.TrimSpace(line[i+1:])
|
||||
line = bytes.TrimSpace(line[:i])
|
||||
}
|
||||
original, expected, err := parseRuneSequence(line, original[:0], expected[:0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`line %d: %v: %q`, num, err, line)
|
||||
}
|
||||
fmt.Fprintf(buf, "\t{original: \"%s\", expected: %s}, // %s\n", original, expected, comment)
|
||||
}
|
||||
if err := sc.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check for final "# EOF", useful check if we're streaming via HTTP
|
||||
if !bytes.Equal(line, []byte("# EOF")) {
|
||||
return nil, fmt.Errorf(`line %d: exected "# EOF" as final line, got %q`, num, line)
|
||||
}
|
||||
buf.WriteString("}\n")
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// Used by parseRuneSequence to match input via bytes.HasPrefix.
|
||||
var (
|
||||
prefixBreak = []byte("÷ ")
|
||||
prefixDontBreak = []byte("× ")
|
||||
breakOk = []byte("÷")
|
||||
breakNo = []byte("×")
|
||||
)
|
||||
|
||||
// parseRuneSequence parses a rune + breaking opportunity sequence from b
|
||||
// and appends the Go code for testcase.original to orig
|
||||
// and appends the Go code for testcase.expected to exp.
|
||||
// It retuns the new orig and exp slices.
|
||||
//
|
||||
// E.g. for the input b="÷ 0020 × 0308 ÷ 1F1E6 ÷"
|
||||
// it will append
|
||||
// "\u0020\u0308\U0001F1E6"
|
||||
// and "[][]rune{{0x0020,0x0308},{0x1F1E6},}"
|
||||
// to orig and exp respectively.
|
||||
//
|
||||
// The formatting of exp is expected to be cleaned up by gofmt or format.Source.
|
||||
// Note we explicitly require the sequence to start with ÷ and we implicitly
|
||||
// require it to end with ÷.
|
||||
func parseRuneSequence(b, orig, exp []byte) ([]byte, []byte, error) {
|
||||
// Check for and remove first ÷ or ×.
|
||||
if !bytes.HasPrefix(b, prefixBreak) && !bytes.HasPrefix(b, prefixDontBreak) {
|
||||
return nil, nil, errors.New("expected ÷ or × as first character")
|
||||
}
|
||||
if bytes.HasPrefix(b, prefixBreak) {
|
||||
b = b[len(prefixBreak):]
|
||||
} else {
|
||||
b = b[len(prefixDontBreak):]
|
||||
}
|
||||
|
||||
boundary := true
|
||||
exp = append(exp, "[][]rune{"...)
|
||||
for len(b) > 0 {
|
||||
if boundary {
|
||||
exp = append(exp, '{')
|
||||
}
|
||||
exp = append(exp, "0x"...)
|
||||
// Find end of hex digits.
|
||||
var i int
|
||||
for i = 0; i < len(b) && b[i] != ' '; i++ {
|
||||
if d := b[i]; ('0' <= d || d <= '9') ||
|
||||
('A' <= d || d <= 'F') ||
|
||||
('a' <= d || d <= 'f') {
|
||||
continue
|
||||
}
|
||||
return nil, nil, errors.New("bad hex digit")
|
||||
}
|
||||
switch i {
|
||||
case 4:
|
||||
orig = append(orig, "\\u"...)
|
||||
case 5:
|
||||
orig = append(orig, "\\U000"...)
|
||||
default:
|
||||
return nil, nil, errors.New("unsupport code point hex length")
|
||||
}
|
||||
orig = append(orig, b[:i]...)
|
||||
exp = append(exp, b[:i]...)
|
||||
b = b[i:]
|
||||
|
||||
// Check for space between hex and ÷ or ×.
|
||||
if len(b) < 1 || b[0] != ' ' {
|
||||
return nil, nil, errors.New("bad input")
|
||||
}
|
||||
b = b[1:]
|
||||
|
||||
// Check for next boundary.
|
||||
switch {
|
||||
case bytes.HasPrefix(b, breakOk):
|
||||
boundary = true
|
||||
b = b[len(breakOk):]
|
||||
case bytes.HasPrefix(b, breakNo):
|
||||
boundary = false
|
||||
b = b[len(breakNo):]
|
||||
default:
|
||||
return nil, nil, errors.New("missing ÷ or ×")
|
||||
}
|
||||
if boundary {
|
||||
exp = append(exp, '}')
|
||||
}
|
||||
exp = append(exp, ',')
|
||||
if len(b) > 0 && b[0] == ' ' {
|
||||
b = b[1:]
|
||||
}
|
||||
}
|
||||
exp = append(exp, '}')
|
||||
return orig, exp, nil
|
||||
}
|
256
vendor/github.com/rivo/uniseg/gen_properties.go
generated
vendored
Normal file
256
vendor/github.com/rivo/uniseg/gen_properties.go
generated
vendored
Normal file
@ -0,0 +1,256 @@
|
||||
//go:build generate
|
||||
|
||||
// This program generates a property file in Go file from Unicode Character
|
||||
// Database auxiliary data files. The command line arguments are as follows:
|
||||
//
|
||||
// 1. The name of the Unicode data file (just the filename, without extension).
|
||||
// Can be "-" (to skip) if the emoji flag is included.
|
||||
// 2. The name of the locally generated Go file.
|
||||
// 3. The name of the slice mapping code points to properties.
|
||||
// 4. The name of the generator, for logging purposes.
|
||||
// 5. (Optional) Flags, comma-separated. The following flags are available:
|
||||
// - "emojis=<property>": include the specified emoji properties (e.g.
|
||||
// "Extended_Pictographic").
|
||||
// - "gencat": include general category properties.
|
||||
//
|
||||
//go:generate go run gen_properties.go auxiliary/GraphemeBreakProperty graphemeproperties.go graphemeCodePoints graphemes emojis=Extended_Pictographic
|
||||
//go:generate go run gen_properties.go auxiliary/WordBreakProperty wordproperties.go workBreakCodePoints words emojis=Extended_Pictographic
|
||||
//go:generate go run gen_properties.go auxiliary/SentenceBreakProperty sentenceproperties.go sentenceBreakCodePoints sentences
|
||||
//go:generate go run gen_properties.go LineBreak lineproperties.go lineBreakCodePoints lines gencat
|
||||
//go:generate go run gen_properties.go EastAsianWidth eastasianwidth.go eastAsianWidth eastasianwidth
|
||||
//go:generate go run gen_properties.go - emojipresentation.go emojiPresentation emojipresentation emojis=Emoji_Presentation
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// We want to test against a specific version rather than the latest. When the
|
||||
// package is upgraded to a new version, change these to generate new tests.
|
||||
const (
|
||||
propertyURL = `https://www.unicode.org/Public/14.0.0/ucd/%s.txt`
|
||||
emojiURL = `https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt`
|
||||
)
|
||||
|
||||
// The regular expression for a line containing a code point range property.
|
||||
var propertyPattern = regexp.MustCompile(`^([0-9A-F]{4,6})(\.\.([0-9A-F]{4,6}))?\s*;\s*([A-Za-z0-9_]+)\s*#\s(.+)$`)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 5 {
|
||||
fmt.Println("Not enough arguments, see code for details")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
log.SetPrefix("gen_properties (" + os.Args[4] + "): ")
|
||||
log.SetFlags(0)
|
||||
|
||||
// Parse flags.
|
||||
flags := make(map[string]string)
|
||||
if len(os.Args) >= 6 {
|
||||
for _, flag := range strings.Split(os.Args[5], ",") {
|
||||
flagFields := strings.Split(flag, "=")
|
||||
if len(flagFields) == 1 {
|
||||
flags[flagFields[0]] = "yes"
|
||||
} else {
|
||||
flags[flagFields[0]] = flagFields[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse the text file and generate Go source code from it.
|
||||
_, includeGeneralCategory := flags["gencat"]
|
||||
var mainURL string
|
||||
if os.Args[1] != "-" {
|
||||
mainURL = fmt.Sprintf(propertyURL, os.Args[1])
|
||||
}
|
||||
src, err := parse(mainURL, flags["emojis"], includeGeneralCategory)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Format the Go code.
|
||||
formatted, err := format.Source([]byte(src))
|
||||
if err != nil {
|
||||
log.Fatal("gofmt:", err)
|
||||
}
|
||||
|
||||
// Save it to the (local) target file.
|
||||
log.Print("Writing to ", os.Args[2])
|
||||
if err := ioutil.WriteFile(os.Args[2], formatted, 0644); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// parse parses the Unicode Properties text files located at the given URLs and
|
||||
// returns their equivalent Go source code to be used in the uniseg package. If
|
||||
// "emojiProperty" is not an empty string, emoji code points for that emoji
|
||||
// property (e.g. "Extended_Pictographic") will be included. In those cases, you
|
||||
// may pass an empty "propertyURL" to skip parsing the main properties file. If
|
||||
// "includeGeneralCategory" is true, the Unicode General Category property will
|
||||
// be extracted from the comments and included in the output.
|
||||
func parse(propertyURL, emojiProperty string, includeGeneralCategory bool) (string, error) {
|
||||
if propertyURL == "" && emojiProperty == "" {
|
||||
return "", errors.New("no properties to parse")
|
||||
}
|
||||
|
||||
// Temporary buffer to hold properties.
|
||||
var properties [][4]string
|
||||
|
||||
// Open the first URL.
|
||||
if propertyURL != "" {
|
||||
log.Printf("Parsing %s", propertyURL)
|
||||
res, err := http.Get(propertyURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
in1 := res.Body
|
||||
defer in1.Close()
|
||||
|
||||
// Parse it.
|
||||
scanner := bufio.NewScanner(in1)
|
||||
num := 0
|
||||
for scanner.Scan() {
|
||||
num++
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
|
||||
// Skip comments and empty lines.
|
||||
if strings.HasPrefix(line, "#") || line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Everything else must be a code point range, a property and a comment.
|
||||
from, to, property, comment, err := parseProperty(line)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%s line %d: %v", os.Args[4], num, err)
|
||||
}
|
||||
properties = append(properties, [4]string{from, to, property, comment})
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// Open the second URL.
|
||||
if emojiProperty != "" {
|
||||
log.Printf("Parsing %s", emojiURL)
|
||||
res, err := http.Get(emojiURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
in2 := res.Body
|
||||
defer in2.Close()
|
||||
|
||||
// Parse it.
|
||||
scanner := bufio.NewScanner(in2)
|
||||
num := 0
|
||||
for scanner.Scan() {
|
||||
num++
|
||||
line := scanner.Text()
|
||||
|
||||
// Skip comments, empty lines, and everything not containing
|
||||
// "Extended_Pictographic".
|
||||
if strings.HasPrefix(line, "#") || line == "" || !strings.Contains(line, emojiProperty) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Everything else must be a code point range, a property and a comment.
|
||||
from, to, property, comment, err := parseProperty(line)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("emojis line %d: %v", num, err)
|
||||
}
|
||||
properties = append(properties, [4]string{from, to, property, comment})
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// Sort properties.
|
||||
sort.Slice(properties, func(i, j int) bool {
|
||||
left, _ := strconv.ParseUint(properties[i][0], 16, 64)
|
||||
right, _ := strconv.ParseUint(properties[j][0], 16, 64)
|
||||
return left < right
|
||||
})
|
||||
|
||||
// Header.
|
||||
var (
|
||||
buf bytes.Buffer
|
||||
emojiComment string
|
||||
)
|
||||
columns := 3
|
||||
if includeGeneralCategory {
|
||||
columns = 4
|
||||
}
|
||||
if emojiURL != "" {
|
||||
emojiComment = `
|
||||
// and
|
||||
// ` + emojiURL + `
|
||||
// ("Extended_Pictographic" only)`
|
||||
}
|
||||
buf.WriteString(`package uniseg
|
||||
|
||||
// Code generated via go generate from gen_properties.go. DO NOT EDIT.
|
||||
|
||||
// ` + os.Args[3] + ` are taken from
|
||||
// ` + propertyURL + emojiComment + `
|
||||
// on ` + time.Now().Format("January 2, 2006") + `. See https://www.unicode.org/license.html for the Unicode
|
||||
// license agreement.
|
||||
var ` + os.Args[3] + ` = [][` + strconv.Itoa(columns) + `]int{
|
||||
`)
|
||||
|
||||
// Properties.
|
||||
for _, prop := range properties {
|
||||
if includeGeneralCategory {
|
||||
generalCategory := "gc" + prop[3][:2]
|
||||
if generalCategory == "gcL&" {
|
||||
generalCategory = "gcLC"
|
||||
}
|
||||
prop[3] = prop[3][3:]
|
||||
fmt.Fprintf(&buf, "{0x%s,0x%s,%s,%s}, // %s\n", prop[0], prop[1], translateProperty("pr", prop[2]), generalCategory, prop[3])
|
||||
} else {
|
||||
fmt.Fprintf(&buf, "{0x%s,0x%s,%s}, // %s\n", prop[0], prop[1], translateProperty("pr", prop[2]), prop[3])
|
||||
}
|
||||
}
|
||||
|
||||
// Tail.
|
||||
buf.WriteString("}")
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// parseProperty parses a line of the Unicode properties text file containing a
|
||||
// property for a code point range and returns it along with its comment.
|
||||
func parseProperty(line string) (from, to, property, comment string, err error) {
|
||||
fields := propertyPattern.FindStringSubmatch(line)
|
||||
if fields == nil {
|
||||
err = errors.New("no property found")
|
||||
return
|
||||
}
|
||||
from = fields[1]
|
||||
to = fields[3]
|
||||
if to == "" {
|
||||
to = from
|
||||
}
|
||||
property = fields[4]
|
||||
comment = fields[5]
|
||||
return
|
||||
}
|
||||
|
||||
// translateProperty translates a property name as used in the Unicode data file
|
||||
// to a variable used in the Go code.
|
||||
func translateProperty(prefix, property string) string {
|
||||
return prefix + strings.ReplaceAll(property, "_", "")
|
||||
}
|
484
vendor/github.com/rivo/uniseg/grapheme.go
generated
vendored
484
vendor/github.com/rivo/uniseg/grapheme.go
generated
vendored
@ -2,267 +2,331 @@ package uniseg
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
// The states of the grapheme cluster parser.
|
||||
const (
|
||||
grAny = iota
|
||||
grCR
|
||||
grControlLF
|
||||
grL
|
||||
grLVV
|
||||
grLVTT
|
||||
grPrepend
|
||||
grExtendedPictographic
|
||||
grExtendedPictographicZWJ
|
||||
grRIOdd
|
||||
grRIEven
|
||||
)
|
||||
|
||||
// The grapheme cluster parser's breaking instructions.
|
||||
const (
|
||||
grNoBoundary = iota
|
||||
grBoundary
|
||||
)
|
||||
|
||||
// The grapheme cluster parser's state transitions. Maps (state, property) to
|
||||
// (new state, breaking instruction, rule number). The breaking instruction
|
||||
// always refers to the boundary between the last and next code point.
|
||||
// Graphemes implements an iterator over Unicode grapheme clusters, or
|
||||
// user-perceived characters. While iterating, it also provides information
|
||||
// about word boundaries, sentence boundaries, line breaks, and monospace
|
||||
// character widths.
|
||||
//
|
||||
// This map is queried as follows:
|
||||
// After constructing the class via [NewGraphemes] for a given string "str",
|
||||
// [Graphemes.Next] is called for every grapheme cluster in a loop until it
|
||||
// returns false. Inside the loop, information about the grapheme cluster as
|
||||
// well as boundary information and character width is available via the various
|
||||
// methods (see examples below).
|
||||
//
|
||||
// 1. Find specific state + specific property. Stop if found.
|
||||
// 2. Find specific state + any property.
|
||||
// 3. Find any state + specific property.
|
||||
// 4. If only (2) or (3) (but not both) was found, stop.
|
||||
// 5. If both (2) and (3) were found, use state and breaking instruction from
|
||||
// the transition with the lower rule number, prefer (3) if rule numbers
|
||||
// are equal. Stop.
|
||||
// 6. Assume grAny and grBoundary.
|
||||
var grTransitions = map[[2]int][3]int{
|
||||
// GB5
|
||||
{grAny, prCR}: {grCR, grBoundary, 50},
|
||||
{grAny, prLF}: {grControlLF, grBoundary, 50},
|
||||
{grAny, prControl}: {grControlLF, grBoundary, 50},
|
||||
|
||||
// GB4
|
||||
{grCR, prAny}: {grAny, grBoundary, 40},
|
||||
{grControlLF, prAny}: {grAny, grBoundary, 40},
|
||||
|
||||
// GB3.
|
||||
{grCR, prLF}: {grAny, grNoBoundary, 30},
|
||||
|
||||
// GB6.
|
||||
{grAny, prL}: {grL, grBoundary, 9990},
|
||||
{grL, prL}: {grL, grNoBoundary, 60},
|
||||
{grL, prV}: {grLVV, grNoBoundary, 60},
|
||||
{grL, prLV}: {grLVV, grNoBoundary, 60},
|
||||
{grL, prLVT}: {grLVTT, grNoBoundary, 60},
|
||||
|
||||
// GB7.
|
||||
{grAny, prLV}: {grLVV, grBoundary, 9990},
|
||||
{grAny, prV}: {grLVV, grBoundary, 9990},
|
||||
{grLVV, prV}: {grLVV, grNoBoundary, 70},
|
||||
{grLVV, prT}: {grLVTT, grNoBoundary, 70},
|
||||
|
||||
// GB8.
|
||||
{grAny, prLVT}: {grLVTT, grBoundary, 9990},
|
||||
{grAny, prT}: {grLVTT, grBoundary, 9990},
|
||||
{grLVTT, prT}: {grLVTT, grNoBoundary, 80},
|
||||
|
||||
// GB9.
|
||||
{grAny, prExtend}: {grAny, grNoBoundary, 90},
|
||||
{grAny, prZWJ}: {grAny, grNoBoundary, 90},
|
||||
|
||||
// GB9a.
|
||||
{grAny, prSpacingMark}: {grAny, grNoBoundary, 91},
|
||||
|
||||
// GB9b.
|
||||
{grAny, prPreprend}: {grPrepend, grBoundary, 9990},
|
||||
{grPrepend, prAny}: {grAny, grNoBoundary, 92},
|
||||
|
||||
// GB11.
|
||||
{grAny, prExtendedPictographic}: {grExtendedPictographic, grBoundary, 9990},
|
||||
{grExtendedPictographic, prExtend}: {grExtendedPictographic, grNoBoundary, 110},
|
||||
{grExtendedPictographic, prZWJ}: {grExtendedPictographicZWJ, grNoBoundary, 110},
|
||||
{grExtendedPictographicZWJ, prExtendedPictographic}: {grExtendedPictographic, grNoBoundary, 110},
|
||||
|
||||
// GB12 / GB13.
|
||||
{grAny, prRegionalIndicator}: {grRIOdd, grBoundary, 9990},
|
||||
{grRIOdd, prRegionalIndicator}: {grRIEven, grNoBoundary, 120},
|
||||
{grRIEven, prRegionalIndicator}: {grRIOdd, grBoundary, 120},
|
||||
}
|
||||
|
||||
// Graphemes implements an iterator over Unicode extended grapheme clusters,
|
||||
// specified in the Unicode Standard Annex #29. Grapheme clusters correspond to
|
||||
// "user-perceived characters". These characters often consist of multiple
|
||||
// code points (e.g. the "woman kissing woman" emoji consists of 8 code points:
|
||||
// woman + ZWJ + heavy black heart (2 code points) + ZWJ + kiss mark + ZWJ +
|
||||
// woman) and the rules described in Annex #29 must be applied to group those
|
||||
// code points into clusters perceived by the user as one character.
|
||||
// Using this class to iterate over a string is convenient but it is much slower
|
||||
// than using this package's [Step] or [StepString] functions or any of the
|
||||
// other specialized functions starting with "First".
|
||||
type Graphemes struct {
|
||||
// The code points over which this class iterates.
|
||||
codePoints []rune
|
||||
// The original string.
|
||||
original string
|
||||
|
||||
// The (byte-based) indices of the code points into the original string plus
|
||||
// len(original string). Thus, len(indices) = len(codePoints) + 1.
|
||||
indices []int
|
||||
// The remaining string to be parsed.
|
||||
remaining string
|
||||
|
||||
// The current grapheme cluster to be returned. These are indices into
|
||||
// codePoints/indices. If start == end, we either haven't started iterating
|
||||
// yet (0) or the iteration has already completed (1).
|
||||
start, end int
|
||||
// The current grapheme cluster.
|
||||
cluster string
|
||||
|
||||
// The index of the next code point to be parsed.
|
||||
pos int
|
||||
// The byte offset of the current grapheme cluster relative to the original
|
||||
// string.
|
||||
offset int
|
||||
|
||||
// The current state of the code point parser.
|
||||
// The current boundary information of the [Step] parser.
|
||||
boundaries int
|
||||
|
||||
// The current state of the [Step] parser.
|
||||
state int
|
||||
}
|
||||
|
||||
// NewGraphemes returns a new grapheme cluster iterator.
|
||||
func NewGraphemes(s string) *Graphemes {
|
||||
l := utf8.RuneCountInString(s)
|
||||
codePoints := make([]rune, l)
|
||||
indices := make([]int, l+1)
|
||||
i := 0
|
||||
for pos, r := range s {
|
||||
codePoints[i] = r
|
||||
indices[i] = pos
|
||||
i++
|
||||
func NewGraphemes(str string) *Graphemes {
|
||||
return &Graphemes{
|
||||
original: str,
|
||||
remaining: str,
|
||||
state: -1,
|
||||
}
|
||||
indices[l] = len(s)
|
||||
g := &Graphemes{
|
||||
codePoints: codePoints,
|
||||
indices: indices,
|
||||
}
|
||||
g.Next() // Parse ahead.
|
||||
return g
|
||||
}
|
||||
|
||||
// Next advances the iterator by one grapheme cluster and returns false if no
|
||||
// clusters are left. This function must be called before the first cluster is
|
||||
// accessed.
|
||||
func (g *Graphemes) Next() bool {
|
||||
g.start = g.end
|
||||
|
||||
// The state transition gives us a boundary instruction BEFORE the next code
|
||||
// point so we always need to stay ahead by one code point.
|
||||
|
||||
// Parse the next code point.
|
||||
for g.pos <= len(g.codePoints) {
|
||||
// GB2.
|
||||
if g.pos == len(g.codePoints) {
|
||||
g.end = g.pos
|
||||
g.pos++
|
||||
break
|
||||
if len(g.remaining) == 0 {
|
||||
// We're already past the end.
|
||||
g.state = -2
|
||||
g.cluster = ""
|
||||
return false
|
||||
}
|
||||
|
||||
// Determine the property of the next character.
|
||||
nextProperty := property(g.codePoints[g.pos])
|
||||
g.pos++
|
||||
|
||||
// Find the applicable transition.
|
||||
var boundary bool
|
||||
transition, ok := grTransitions[[2]int{g.state, nextProperty}]
|
||||
if ok {
|
||||
// We have a specific transition. We'll use it.
|
||||
g.state = transition[0]
|
||||
boundary = transition[1] == grBoundary
|
||||
} else {
|
||||
// No specific transition found. Try the less specific ones.
|
||||
transAnyProp, okAnyProp := grTransitions[[2]int{g.state, prAny}]
|
||||
transAnyState, okAnyState := grTransitions[[2]int{grAny, nextProperty}]
|
||||
if okAnyProp && okAnyState {
|
||||
// Both apply. We'll use a mix (see comments for grTransitions).
|
||||
g.state = transAnyState[0]
|
||||
boundary = transAnyState[1] == grBoundary
|
||||
if transAnyProp[2] < transAnyState[2] {
|
||||
g.state = transAnyProp[0]
|
||||
boundary = transAnyProp[1] == grBoundary
|
||||
}
|
||||
} else if okAnyProp {
|
||||
// We only have a specific state.
|
||||
g.state = transAnyProp[0]
|
||||
boundary = transAnyProp[1] == grBoundary
|
||||
// This branch will probably never be reached because okAnyState will
|
||||
// always be true given the current transition map. But we keep it here
|
||||
// for future modifications to the transition map where this may not be
|
||||
// true anymore.
|
||||
} else if okAnyState {
|
||||
// We only have a specific property.
|
||||
g.state = transAnyState[0]
|
||||
boundary = transAnyState[1] == grBoundary
|
||||
} else {
|
||||
// No known transition. GB999: Any x Any.
|
||||
g.state = grAny
|
||||
boundary = true
|
||||
}
|
||||
}
|
||||
|
||||
// If we found a cluster boundary, let's stop here. The current cluster will
|
||||
// be the one that just ended.
|
||||
if g.pos-1 == 0 /* GB1 */ || boundary {
|
||||
g.end = g.pos - 1
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return g.start != g.end
|
||||
g.offset += len(g.cluster)
|
||||
g.cluster, g.remaining, g.boundaries, g.state = StepString(g.remaining, g.state)
|
||||
return true
|
||||
}
|
||||
|
||||
// Runes returns a slice of runes (code points) which corresponds to the current
|
||||
// grapheme cluster. If the iterator is already past the end or Next() has not
|
||||
// yet been called, nil is returned.
|
||||
// grapheme cluster. If the iterator is already past the end or [Graphemes.Next]
|
||||
// has not yet been called, nil is returned.
|
||||
func (g *Graphemes) Runes() []rune {
|
||||
if g.start == g.end {
|
||||
if g.state < 0 {
|
||||
return nil
|
||||
}
|
||||
return g.codePoints[g.start:g.end]
|
||||
return []rune(g.cluster)
|
||||
}
|
||||
|
||||
// Str returns a substring of the original string which corresponds to the
|
||||
// current grapheme cluster. If the iterator is already past the end or Next()
|
||||
// has not yet been called, an empty string is returned.
|
||||
// current grapheme cluster. If the iterator is already past the end or
|
||||
// [Graphemes.Next] has not yet been called, an empty string is returned.
|
||||
func (g *Graphemes) Str() string {
|
||||
if g.start == g.end {
|
||||
return ""
|
||||
}
|
||||
return string(g.codePoints[g.start:g.end])
|
||||
return g.cluster
|
||||
}
|
||||
|
||||
// Bytes returns a byte slice which corresponds to the current grapheme cluster.
|
||||
// If the iterator is already past the end or Next() has not yet been called,
|
||||
// nil is returned.
|
||||
// If the iterator is already past the end or [Graphemes.Next] has not yet been
|
||||
// called, nil is returned.
|
||||
func (g *Graphemes) Bytes() []byte {
|
||||
if g.start == g.end {
|
||||
if g.state < 0 {
|
||||
return nil
|
||||
}
|
||||
return []byte(string(g.codePoints[g.start:g.end]))
|
||||
return []byte(g.cluster)
|
||||
}
|
||||
|
||||
// Positions returns the interval of the current grapheme cluster as byte
|
||||
// positions into the original string. The first returned value "from" indexes
|
||||
// the first byte and the second returned value "to" indexes the first byte that
|
||||
// is not included anymore, i.e. str[from:to] is the current grapheme cluster of
|
||||
// the original string "str". If Next() has not yet been called, both values are
|
||||
// 0. If the iterator is already past the end, both values are 1.
|
||||
// the original string "str". If [Graphemes.Next] has not yet been called, both
|
||||
// values are 0. If the iterator is already past the end, both values are 1.
|
||||
func (g *Graphemes) Positions() (int, int) {
|
||||
return g.indices[g.start], g.indices[g.end]
|
||||
if g.state == -1 {
|
||||
return 0, 0
|
||||
} else if g.state == -2 {
|
||||
return 1, 1
|
||||
}
|
||||
return g.offset, g.offset + len(g.cluster)
|
||||
}
|
||||
|
||||
// IsWordBoundary returns true if a word ends after the current grapheme
|
||||
// cluster.
|
||||
func (g *Graphemes) IsWordBoundary() bool {
|
||||
if g.state < 0 {
|
||||
return true
|
||||
}
|
||||
return g.boundaries&MaskWord != 0
|
||||
}
|
||||
|
||||
// IsSentenceBoundary returns true if a sentence ends after the current
|
||||
// grapheme cluster.
|
||||
func (g *Graphemes) IsSentenceBoundary() bool {
|
||||
if g.state < 0 {
|
||||
return true
|
||||
}
|
||||
return g.boundaries&MaskSentence != 0
|
||||
}
|
||||
|
||||
// LineBreak returns whether the line can be broken after the current grapheme
|
||||
// cluster. A value of [LineDontBreak] means the line may not be broken, a value
|
||||
// of [LineMustBreak] means the line must be broken, and a value of
|
||||
// [LineCanBreak] means the line may or may not be broken.
|
||||
func (g *Graphemes) LineBreak() int {
|
||||
if g.state == -1 {
|
||||
return LineDontBreak
|
||||
}
|
||||
if g.state == -2 {
|
||||
return LineMustBreak
|
||||
}
|
||||
return g.boundaries & MaskLine
|
||||
}
|
||||
|
||||
// Width returns the monospace width of the current grapheme cluster.
|
||||
func (g *Graphemes) Width() int {
|
||||
if g.state < 0 {
|
||||
return 0
|
||||
}
|
||||
return g.boundaries >> ShiftWidth
|
||||
}
|
||||
|
||||
// Reset puts the iterator into its initial state such that the next call to
|
||||
// Next() sets it to the first grapheme cluster again.
|
||||
// [Graphemes.Next] sets it to the first grapheme cluster again.
|
||||
func (g *Graphemes) Reset() {
|
||||
g.start, g.end, g.pos, g.state = 0, 0, 0, grAny
|
||||
g.Next() // Parse ahead again.
|
||||
g.state = -1
|
||||
g.offset = 0
|
||||
g.cluster = ""
|
||||
g.remaining = g.original
|
||||
}
|
||||
|
||||
// GraphemeClusterCount returns the number of user-perceived characters
|
||||
// (grapheme clusters) for the given string. To calculate this number, it
|
||||
// iterates through the string using the Graphemes iterator.
|
||||
// (grapheme clusters) for the given string.
|
||||
func GraphemeClusterCount(s string) (n int) {
|
||||
g := NewGraphemes(s)
|
||||
for g.Next() {
|
||||
state := -1
|
||||
for len(s) > 0 {
|
||||
_, s, _, state = FirstGraphemeClusterInString(s, state)
|
||||
n++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ReverseString reverses the given string while observing grapheme cluster
|
||||
// boundaries.
|
||||
func ReverseString(s string) string {
|
||||
str := []byte(s)
|
||||
reversed := make([]byte, len(str))
|
||||
state := -1
|
||||
index := len(str)
|
||||
for len(str) > 0 {
|
||||
var cluster []byte
|
||||
cluster, str, _, state = FirstGraphemeCluster(str, state)
|
||||
index -= len(cluster)
|
||||
copy(reversed[index:], cluster)
|
||||
if index <= len(str)/2 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return string(reversed)
|
||||
}
|
||||
|
||||
// The number of bits the grapheme property must be shifted to make place for
|
||||
// grapheme states.
|
||||
const shiftGraphemePropState = 4
|
||||
|
||||
// FirstGraphemeCluster returns the first grapheme cluster found in the given
|
||||
// byte slice according to the rules of Unicode Standard Annex #29, Grapheme
|
||||
// Cluster Boundaries. This function can be called continuously to extract all
|
||||
// grapheme clusters from a byte slice, as illustrated in the example below.
|
||||
//
|
||||
// If you don't know the current state, for example when calling the function
|
||||
// for the first time, you must pass -1. For consecutive calls, pass the state
|
||||
// and rest slice returned by the previous call.
|
||||
//
|
||||
// The "rest" slice is the sub-slice of the original byte slice "b" starting
|
||||
// after the last byte of the identified grapheme cluster. If the length of the
|
||||
// "rest" slice is 0, the entire byte slice "b" has been processed. The
|
||||
// "cluster" byte slice is the sub-slice of the input slice containing the
|
||||
// identified grapheme cluster.
|
||||
//
|
||||
// The returned width is the width of the grapheme cluster for most monospace
|
||||
// fonts where a value of 1 represents one character cell.
|
||||
//
|
||||
// Given an empty byte slice "b", the function returns nil values.
|
||||
//
|
||||
// While slightly less convenient than using the Graphemes class, this function
|
||||
// has much better performance and makes no allocations. It lends itself well to
|
||||
// large byte slices.
|
||||
func FirstGraphemeCluster(b []byte, state int) (cluster, rest []byte, width, newState int) {
|
||||
// An empty byte slice returns nothing.
|
||||
if len(b) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Extract the first rune.
|
||||
r, length := utf8.DecodeRune(b)
|
||||
if len(b) <= length { // If we're already past the end, there is nothing else to parse.
|
||||
var prop int
|
||||
if state < 0 {
|
||||
prop = property(graphemeCodePoints, r)
|
||||
} else {
|
||||
prop = state >> shiftGraphemePropState
|
||||
}
|
||||
return b, nil, runeWidth(r, prop), grAny | (prop << shiftGraphemePropState)
|
||||
}
|
||||
|
||||
// If we don't know the state, determine it now.
|
||||
var firstProp int
|
||||
if state < 0 {
|
||||
state, firstProp, _ = transitionGraphemeState(state, r)
|
||||
} else {
|
||||
firstProp = state >> shiftGraphemePropState
|
||||
}
|
||||
width += runeWidth(r, firstProp)
|
||||
|
||||
// Transition until we find a boundary.
|
||||
for {
|
||||
var (
|
||||
prop int
|
||||
boundary bool
|
||||
)
|
||||
|
||||
r, l := utf8.DecodeRune(b[length:])
|
||||
state, prop, boundary = transitionGraphemeState(state&maskGraphemeState, r)
|
||||
|
||||
if boundary {
|
||||
return b[:length], b[length:], width, state | (prop << shiftGraphemePropState)
|
||||
}
|
||||
|
||||
if r == vs16 {
|
||||
width = 2
|
||||
} else if firstProp != prExtendedPictographic && firstProp != prRegionalIndicator && firstProp != prL {
|
||||
width += runeWidth(r, prop)
|
||||
} else if firstProp == prExtendedPictographic {
|
||||
if r == vs15 {
|
||||
width = 1
|
||||
} else {
|
||||
width = 2
|
||||
}
|
||||
}
|
||||
|
||||
length += l
|
||||
if len(b) <= length {
|
||||
return b, nil, width, grAny | (prop << shiftGraphemePropState)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FirstGraphemeClusterInString is like [FirstGraphemeCluster] but its input and
|
||||
// outputs are strings.
|
||||
func FirstGraphemeClusterInString(str string, state int) (cluster, rest string, width, newState int) {
|
||||
// An empty string returns nothing.
|
||||
if len(str) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Extract the first rune.
|
||||
r, length := utf8.DecodeRuneInString(str)
|
||||
if len(str) <= length { // If we're already past the end, there is nothing else to parse.
|
||||
var prop int
|
||||
if state < 0 {
|
||||
prop = property(graphemeCodePoints, r)
|
||||
} else {
|
||||
prop = state >> shiftGraphemePropState
|
||||
}
|
||||
return str, "", runeWidth(r, prop), grAny | (prop << shiftGraphemePropState)
|
||||
}
|
||||
|
||||
// If we don't know the state, determine it now.
|
||||
var firstProp int
|
||||
if state < 0 {
|
||||
state, firstProp, _ = transitionGraphemeState(state, r)
|
||||
} else {
|
||||
firstProp = state >> shiftGraphemePropState
|
||||
}
|
||||
width += runeWidth(r, firstProp)
|
||||
|
||||
// Transition until we find a boundary.
|
||||
for {
|
||||
var (
|
||||
prop int
|
||||
boundary bool
|
||||
)
|
||||
|
||||
r, l := utf8.DecodeRuneInString(str[length:])
|
||||
state, prop, boundary = transitionGraphemeState(state&maskGraphemeState, r)
|
||||
|
||||
if boundary {
|
||||
return str[:length], str[length:], width, state | (prop << shiftGraphemePropState)
|
||||
}
|
||||
|
||||
if r == vs16 {
|
||||
width = 2
|
||||
} else if firstProp != prExtendedPictographic && firstProp != prRegionalIndicator && firstProp != prL {
|
||||
width += runeWidth(r, prop)
|
||||
} else if firstProp == prExtendedPictographic {
|
||||
if r == vs15 {
|
||||
width = 1
|
||||
} else {
|
||||
width = 2
|
||||
}
|
||||
}
|
||||
|
||||
length += l
|
||||
if len(str) <= length {
|
||||
return str, "", width, grAny | (prop << shiftGraphemePropState)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
1891
vendor/github.com/rivo/uniseg/graphemeproperties.go
generated
vendored
Normal file
1891
vendor/github.com/rivo/uniseg/graphemeproperties.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
138
vendor/github.com/rivo/uniseg/graphemerules.go
generated
vendored
Normal file
138
vendor/github.com/rivo/uniseg/graphemerules.go
generated
vendored
Normal file
@ -0,0 +1,138 @@
|
||||
package uniseg
|
||||
|
||||
// The states of the grapheme cluster parser.
|
||||
const (
|
||||
grAny = iota
|
||||
grCR
|
||||
grControlLF
|
||||
grL
|
||||
grLVV
|
||||
grLVTT
|
||||
grPrepend
|
||||
grExtendedPictographic
|
||||
grExtendedPictographicZWJ
|
||||
grRIOdd
|
||||
grRIEven
|
||||
)
|
||||
|
||||
// The grapheme cluster parser's breaking instructions.
|
||||
const (
|
||||
grNoBoundary = iota
|
||||
grBoundary
|
||||
)
|
||||
|
||||
// The grapheme cluster parser's state transitions. Maps (state, property) to
|
||||
// (new state, breaking instruction, rule number). The breaking instruction
|
||||
// always refers to the boundary between the last and next code point.
|
||||
//
|
||||
// This map is queried as follows:
|
||||
//
|
||||
// 1. Find specific state + specific property. Stop if found.
|
||||
// 2. Find specific state + any property.
|
||||
// 3. Find any state + specific property.
|
||||
// 4. If only (2) or (3) (but not both) was found, stop.
|
||||
// 5. If both (2) and (3) were found, use state from (3) and breaking instruction
|
||||
// from the transition with the lower rule number, prefer (3) if rule numbers
|
||||
// are equal. Stop.
|
||||
// 6. Assume grAny and grBoundary.
|
||||
//
|
||||
// Unicode version 14.0.0.
|
||||
var grTransitions = map[[2]int][3]int{
|
||||
// GB5
|
||||
{grAny, prCR}: {grCR, grBoundary, 50},
|
||||
{grAny, prLF}: {grControlLF, grBoundary, 50},
|
||||
{grAny, prControl}: {grControlLF, grBoundary, 50},
|
||||
|
||||
// GB4
|
||||
{grCR, prAny}: {grAny, grBoundary, 40},
|
||||
{grControlLF, prAny}: {grAny, grBoundary, 40},
|
||||
|
||||
// GB3.
|
||||
{grCR, prLF}: {grAny, grNoBoundary, 30},
|
||||
|
||||
// GB6.
|
||||
{grAny, prL}: {grL, grBoundary, 9990},
|
||||
{grL, prL}: {grL, grNoBoundary, 60},
|
||||
{grL, prV}: {grLVV, grNoBoundary, 60},
|
||||
{grL, prLV}: {grLVV, grNoBoundary, 60},
|
||||
{grL, prLVT}: {grLVTT, grNoBoundary, 60},
|
||||
|
||||
// GB7.
|
||||
{grAny, prLV}: {grLVV, grBoundary, 9990},
|
||||
{grAny, prV}: {grLVV, grBoundary, 9990},
|
||||
{grLVV, prV}: {grLVV, grNoBoundary, 70},
|
||||
{grLVV, prT}: {grLVTT, grNoBoundary, 70},
|
||||
|
||||
// GB8.
|
||||
{grAny, prLVT}: {grLVTT, grBoundary, 9990},
|
||||
{grAny, prT}: {grLVTT, grBoundary, 9990},
|
||||
{grLVTT, prT}: {grLVTT, grNoBoundary, 80},
|
||||
|
||||
// GB9.
|
||||
{grAny, prExtend}: {grAny, grNoBoundary, 90},
|
||||
{grAny, prZWJ}: {grAny, grNoBoundary, 90},
|
||||
|
||||
// GB9a.
|
||||
{grAny, prSpacingMark}: {grAny, grNoBoundary, 91},
|
||||
|
||||
// GB9b.
|
||||
{grAny, prPrepend}: {grPrepend, grBoundary, 9990},
|
||||
{grPrepend, prAny}: {grAny, grNoBoundary, 92},
|
||||
|
||||
// GB11.
|
||||
{grAny, prExtendedPictographic}: {grExtendedPictographic, grBoundary, 9990},
|
||||
{grExtendedPictographic, prExtend}: {grExtendedPictographic, grNoBoundary, 110},
|
||||
{grExtendedPictographic, prZWJ}: {grExtendedPictographicZWJ, grNoBoundary, 110},
|
||||
{grExtendedPictographicZWJ, prExtendedPictographic}: {grExtendedPictographic, grNoBoundary, 110},
|
||||
|
||||
// GB12 / GB13.
|
||||
{grAny, prRegionalIndicator}: {grRIOdd, grBoundary, 9990},
|
||||
{grRIOdd, prRegionalIndicator}: {grRIEven, grNoBoundary, 120},
|
||||
{grRIEven, prRegionalIndicator}: {grRIOdd, grBoundary, 120},
|
||||
}
|
||||
|
||||
// transitionGraphemeState determines the new state of the grapheme cluster
|
||||
// parser given the current state and the next code point. It also returns the
|
||||
// code point's grapheme property (the value mapped by the [graphemeCodePoints]
|
||||
// table) and whether a cluster boundary was detected.
|
||||
func transitionGraphemeState(state int, r rune) (newState, prop int, boundary bool) {
|
||||
// Determine the property of the next character.
|
||||
prop = property(graphemeCodePoints, r)
|
||||
|
||||
// Find the applicable transition.
|
||||
transition, ok := grTransitions[[2]int{state, prop}]
|
||||
if ok {
|
||||
// We have a specific transition. We'll use it.
|
||||
return transition[0], prop, transition[1] == grBoundary
|
||||
}
|
||||
|
||||
// No specific transition found. Try the less specific ones.
|
||||
transAnyProp, okAnyProp := grTransitions[[2]int{state, prAny}]
|
||||
transAnyState, okAnyState := grTransitions[[2]int{grAny, prop}]
|
||||
if okAnyProp && okAnyState {
|
||||
// Both apply. We'll use a mix (see comments for grTransitions).
|
||||
newState = transAnyState[0]
|
||||
boundary = transAnyState[1] == grBoundary
|
||||
if transAnyProp[2] < transAnyState[2] {
|
||||
boundary = transAnyProp[1] == grBoundary
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if okAnyProp {
|
||||
// We only have a specific state.
|
||||
return transAnyProp[0], prop, transAnyProp[1] == grBoundary
|
||||
// This branch will probably never be reached because okAnyState will
|
||||
// always be true given the current transition map. But we keep it here
|
||||
// for future modifications to the transition map where this may not be
|
||||
// true anymore.
|
||||
}
|
||||
|
||||
if okAnyState {
|
||||
// We only have a specific property.
|
||||
return transAnyState[0], prop, transAnyState[1] == grBoundary
|
||||
}
|
||||
|
||||
// No known transition. GB999: Any ÷ Any.
|
||||
return grAny, prop, true
|
||||
}
|
131
vendor/github.com/rivo/uniseg/line.go
generated
vendored
Normal file
131
vendor/github.com/rivo/uniseg/line.go
generated
vendored
Normal file
@ -0,0 +1,131 @@
|
||||
package uniseg
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
// FirstLineSegment returns the prefix of the given byte slice after which a
|
||||
// decision to break the string over to the next line can or must be made,
|
||||
// according to the rules of Unicode Standard Annex #14. This is used to
|
||||
// implement line breaking.
|
||||
//
|
||||
// Line breaking, also known as word wrapping, is the process of breaking a
|
||||
// section of text into lines such that it will fit in the available width of a
|
||||
// page, window or other display area.
|
||||
//
|
||||
// The returned "segment" may not be broken into smaller parts, unless no other
|
||||
// breaking opportunities present themselves, in which case you may break by
|
||||
// grapheme clusters (using the [FirstGraphemeCluster] function to determine the
|
||||
// grapheme clusters).
|
||||
//
|
||||
// The "mustBreak" flag indicates whether you MUST break the line after the
|
||||
// given segment (true), for example after newline characters, or you MAY break
|
||||
// the line after the given segment (false).
|
||||
//
|
||||
// This function can be called continuously to extract all non-breaking sub-sets
|
||||
// from a byte slice, as illustrated in the example below.
|
||||
//
|
||||
// If you don't know the current state, for example when calling the function
|
||||
// for the first time, you must pass -1. For consecutive calls, pass the state
|
||||
// and rest slice returned by the previous call.
|
||||
//
|
||||
// The "rest" slice is the sub-slice of the original byte slice "b" starting
|
||||
// after the last byte of the identified line segment. If the length of the
|
||||
// "rest" slice is 0, the entire byte slice "b" has been processed. The
|
||||
// "segment" byte slice is the sub-slice of the input slice containing the
|
||||
// identified line segment.
|
||||
//
|
||||
// Given an empty byte slice "b", the function returns nil values.
|
||||
//
|
||||
// Note that in accordance with UAX #14 LB3, the final segment will end with
|
||||
// "mustBreak" set to true. You can choose to ignore this by checking if the
|
||||
// length of the "rest" slice is 0 and calling [HasTrailingLineBreak] or
|
||||
// [HasTrailingLineBreakInString] on the last rune.
|
||||
//
|
||||
// Note also that this algorithm may break within grapheme clusters. This is
|
||||
// addressed in Section 8.2 Example 6 of UAX #14. To avoid this, you can use
|
||||
// the [Step] function instead.
|
||||
func FirstLineSegment(b []byte, state int) (segment, rest []byte, mustBreak bool, newState int) {
|
||||
// An empty byte slice returns nothing.
|
||||
if len(b) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Extract the first rune.
|
||||
r, length := utf8.DecodeRune(b)
|
||||
if len(b) <= length { // If we're already past the end, there is nothing else to parse.
|
||||
return b, nil, true, lbAny // LB3.
|
||||
}
|
||||
|
||||
// If we don't know the state, determine it now.
|
||||
if state < 0 {
|
||||
state, _ = transitionLineBreakState(state, r, b[length:], "")
|
||||
}
|
||||
|
||||
// Transition until we find a boundary.
|
||||
var boundary int
|
||||
for {
|
||||
r, l := utf8.DecodeRune(b[length:])
|
||||
state, boundary = transitionLineBreakState(state, r, b[length+l:], "")
|
||||
|
||||
if boundary != LineDontBreak {
|
||||
return b[:length], b[length:], boundary == LineMustBreak, state
|
||||
}
|
||||
|
||||
length += l
|
||||
if len(b) <= length {
|
||||
return b, nil, true, lbAny // LB3
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FirstLineSegmentInString is like FirstLineSegment() but its input and outputs
|
||||
// are strings.
|
||||
func FirstLineSegmentInString(str string, state int) (segment, rest string, mustBreak bool, newState int) {
|
||||
// An empty byte slice returns nothing.
|
||||
if len(str) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Extract the first rune.
|
||||
r, length := utf8.DecodeRuneInString(str)
|
||||
if len(str) <= length { // If we're already past the end, there is nothing else to parse.
|
||||
return str, "", true, lbAny // LB3.
|
||||
}
|
||||
|
||||
// If we don't know the state, determine it now.
|
||||
if state < 0 {
|
||||
state, _ = transitionLineBreakState(state, r, nil, str[length:])
|
||||
}
|
||||
|
||||
// Transition until we find a boundary.
|
||||
var boundary int
|
||||
for {
|
||||
r, l := utf8.DecodeRuneInString(str[length:])
|
||||
state, boundary = transitionLineBreakState(state, r, nil, str[length+l:])
|
||||
|
||||
if boundary != LineDontBreak {
|
||||
return str[:length], str[length:], boundary == LineMustBreak, state
|
||||
}
|
||||
|
||||
length += l
|
||||
if len(str) <= length {
|
||||
return str, "", true, lbAny // LB3.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// HasTrailingLineBreak returns true if the last rune in the given byte slice is
|
||||
// one of the hard line break code points defined in LB4 and LB5 of [UAX #14].
|
||||
//
|
||||
// [UAX #14]: https://www.unicode.org/reports/tr14/#Algorithm
|
||||
func HasTrailingLineBreak(b []byte) bool {
|
||||
r, _ := utf8.DecodeLastRune(b)
|
||||
property, _ := propertyWithGenCat(lineBreakCodePoints, r)
|
||||
return property == lbBK || property == lbCR || property == lbLF || property == lbNL
|
||||
}
|
||||
|
||||
// HasTrailingLineBreakInString is like [HasTrailingLineBreak] but for a string.
|
||||
func HasTrailingLineBreakInString(str string) bool {
|
||||
r, _ := utf8.DecodeLastRuneInString(str)
|
||||
property, _ := propertyWithGenCat(lineBreakCodePoints, r)
|
||||
return property == lbBK || property == lbCR || property == lbLF || property == lbNL
|
||||
}
|
3513
vendor/github.com/rivo/uniseg/lineproperties.go
generated
vendored
Normal file
3513
vendor/github.com/rivo/uniseg/lineproperties.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
470
vendor/github.com/rivo/uniseg/linerules.go
generated
vendored
Normal file
470
vendor/github.com/rivo/uniseg/linerules.go
generated
vendored
Normal file
@ -0,0 +1,470 @@
|
||||
package uniseg
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
// The states of the line break parser.
|
||||
const (
|
||||
lbAny = iota
|
||||
lbBK
|
||||
lbCR
|
||||
lbLF
|
||||
lbNL
|
||||
lbSP
|
||||
lbZW
|
||||
lbWJ
|
||||
lbGL
|
||||
lbBA
|
||||
lbHY
|
||||
lbCL
|
||||
lbCP
|
||||
lbEX
|
||||
lbIS
|
||||
lbSY
|
||||
lbOP
|
||||
lbQU
|
||||
lbQUSP
|
||||
lbNS
|
||||
lbCLCPSP
|
||||
lbB2
|
||||
lbB2SP
|
||||
lbCB
|
||||
lbBB
|
||||
lbLB21a
|
||||
lbHL
|
||||
lbAL
|
||||
lbNU
|
||||
lbPR
|
||||
lbEB
|
||||
lbIDEM
|
||||
lbNUNU
|
||||
lbNUSY
|
||||
lbNUIS
|
||||
lbNUCL
|
||||
lbNUCP
|
||||
lbPO
|
||||
lbJL
|
||||
lbJV
|
||||
lbJT
|
||||
lbH2
|
||||
lbH3
|
||||
lbOddRI
|
||||
lbEvenRI
|
||||
lbExtPicCn
|
||||
lbZWJBit = 64
|
||||
lbCPeaFWHBit = 128
|
||||
)
|
||||
|
||||
// These constants define whether a given text may be broken into the next line.
|
||||
// If the break is optional (LineCanBreak), you may choose to break or not based
|
||||
// on your own criteria, for example, if the text has reached the available
|
||||
// width.
|
||||
const (
|
||||
LineDontBreak = iota // You may not break the line here.
|
||||
LineCanBreak // You may or may not break the line here.
|
||||
LineMustBreak // You must break the line here.
|
||||
)
|
||||
|
||||
// The line break parser's state transitions. It's anologous to grTransitions,
|
||||
// see comments there for details. Unicode version 14.0.0.
|
||||
var lbTransitions = map[[2]int][3]int{
|
||||
// LB4.
|
||||
{lbAny, prBK}: {lbBK, LineCanBreak, 310},
|
||||
{lbBK, prAny}: {lbAny, LineMustBreak, 40},
|
||||
|
||||
// LB5.
|
||||
{lbAny, prCR}: {lbCR, LineCanBreak, 310},
|
||||
{lbAny, prLF}: {lbLF, LineCanBreak, 310},
|
||||
{lbAny, prNL}: {lbNL, LineCanBreak, 310},
|
||||
{lbCR, prLF}: {lbLF, LineDontBreak, 50},
|
||||
{lbCR, prAny}: {lbAny, LineMustBreak, 50},
|
||||
{lbLF, prAny}: {lbAny, LineMustBreak, 50},
|
||||
{lbNL, prAny}: {lbAny, LineMustBreak, 50},
|
||||
|
||||
// LB6.
|
||||
{lbAny, prBK}: {lbBK, LineDontBreak, 60},
|
||||
{lbAny, prCR}: {lbCR, LineDontBreak, 60},
|
||||
{lbAny, prLF}: {lbLF, LineDontBreak, 60},
|
||||
{lbAny, prNL}: {lbNL, LineDontBreak, 60},
|
||||
|
||||
// LB7.
|
||||
{lbAny, prSP}: {lbSP, LineDontBreak, 70},
|
||||
{lbAny, prZW}: {lbZW, LineDontBreak, 70},
|
||||
|
||||
// LB8.
|
||||
{lbZW, prSP}: {lbZW, LineDontBreak, 70},
|
||||
{lbZW, prAny}: {lbAny, LineCanBreak, 80},
|
||||
|
||||
// LB11.
|
||||
{lbAny, prWJ}: {lbWJ, LineDontBreak, 110},
|
||||
{lbWJ, prAny}: {lbAny, LineDontBreak, 110},
|
||||
|
||||
// LB12.
|
||||
{lbAny, prGL}: {lbGL, LineCanBreak, 310},
|
||||
{lbGL, prAny}: {lbAny, LineDontBreak, 120},
|
||||
|
||||
// LB13 (simple transitions).
|
||||
{lbAny, prCL}: {lbCL, LineCanBreak, 310},
|
||||
{lbAny, prCP}: {lbCP, LineCanBreak, 310},
|
||||
{lbAny, prEX}: {lbEX, LineDontBreak, 130},
|
||||
{lbAny, prIS}: {lbIS, LineCanBreak, 310},
|
||||
{lbAny, prSY}: {lbSY, LineCanBreak, 310},
|
||||
|
||||
// LB14.
|
||||
{lbAny, prOP}: {lbOP, LineCanBreak, 310},
|
||||
{lbOP, prSP}: {lbOP, LineDontBreak, 70},
|
||||
{lbOP, prAny}: {lbAny, LineDontBreak, 140},
|
||||
|
||||
// LB15.
|
||||
{lbQU, prSP}: {lbQUSP, LineDontBreak, 70},
|
||||
{lbQU, prOP}: {lbOP, LineDontBreak, 150},
|
||||
{lbQUSP, prOP}: {lbOP, LineDontBreak, 150},
|
||||
|
||||
// LB16.
|
||||
{lbCL, prSP}: {lbCLCPSP, LineDontBreak, 70},
|
||||
{lbNUCL, prSP}: {lbCLCPSP, LineDontBreak, 70},
|
||||
{lbCP, prSP}: {lbCLCPSP, LineDontBreak, 70},
|
||||
{lbNUCP, prSP}: {lbCLCPSP, LineDontBreak, 70},
|
||||
{lbCL, prNS}: {lbNS, LineDontBreak, 160},
|
||||
{lbNUCL, prNS}: {lbNS, LineDontBreak, 160},
|
||||
{lbCP, prNS}: {lbNS, LineDontBreak, 160},
|
||||
{lbNUCP, prNS}: {lbNS, LineDontBreak, 160},
|
||||
{lbCLCPSP, prNS}: {lbNS, LineDontBreak, 160},
|
||||
|
||||
// LB17.
|
||||
{lbAny, prB2}: {lbB2, LineCanBreak, 310},
|
||||
{lbB2, prSP}: {lbB2SP, LineDontBreak, 70},
|
||||
{lbB2, prB2}: {lbB2, LineDontBreak, 170},
|
||||
{lbB2SP, prB2}: {lbB2, LineDontBreak, 170},
|
||||
|
||||
// LB18.
|
||||
{lbSP, prAny}: {lbAny, LineCanBreak, 180},
|
||||
{lbQUSP, prAny}: {lbAny, LineCanBreak, 180},
|
||||
{lbCLCPSP, prAny}: {lbAny, LineCanBreak, 180},
|
||||
{lbB2SP, prAny}: {lbAny, LineCanBreak, 180},
|
||||
|
||||
// LB19.
|
||||
{lbAny, prQU}: {lbQU, LineDontBreak, 190},
|
||||
{lbQU, prAny}: {lbAny, LineDontBreak, 190},
|
||||
|
||||
// LB20.
|
||||
{lbAny, prCB}: {lbCB, LineCanBreak, 200},
|
||||
{lbCB, prAny}: {lbAny, LineCanBreak, 200},
|
||||
|
||||
// LB21.
|
||||
{lbAny, prBA}: {lbBA, LineDontBreak, 210},
|
||||
{lbAny, prHY}: {lbHY, LineDontBreak, 210},
|
||||
{lbAny, prNS}: {lbNS, LineDontBreak, 210},
|
||||
{lbAny, prBB}: {lbBB, LineCanBreak, 310},
|
||||
{lbBB, prAny}: {lbAny, LineDontBreak, 210},
|
||||
|
||||
// LB21a.
|
||||
{lbAny, prHL}: {lbHL, LineCanBreak, 310},
|
||||
{lbHL, prHY}: {lbLB21a, LineDontBreak, 210},
|
||||
{lbHL, prBA}: {lbLB21a, LineDontBreak, 210},
|
||||
{lbLB21a, prAny}: {lbAny, LineDontBreak, 211},
|
||||
|
||||
// LB21b.
|
||||
{lbSY, prHL}: {lbHL, LineDontBreak, 212},
|
||||
{lbNUSY, prHL}: {lbHL, LineDontBreak, 212},
|
||||
|
||||
// LB22.
|
||||
{lbAny, prIN}: {lbAny, LineDontBreak, 220},
|
||||
|
||||
// LB23.
|
||||
{lbAny, prAL}: {lbAL, LineCanBreak, 310},
|
||||
{lbAny, prNU}: {lbNU, LineCanBreak, 310},
|
||||
{lbAL, prNU}: {lbNU, LineDontBreak, 230},
|
||||
{lbHL, prNU}: {lbNU, LineDontBreak, 230},
|
||||
{lbNU, prAL}: {lbAL, LineDontBreak, 230},
|
||||
{lbNU, prHL}: {lbHL, LineDontBreak, 230},
|
||||
{lbNUNU, prAL}: {lbAL, LineDontBreak, 230},
|
||||
{lbNUNU, prHL}: {lbHL, LineDontBreak, 230},
|
||||
|
||||
// LB23a.
|
||||
{lbAny, prPR}: {lbPR, LineCanBreak, 310},
|
||||
{lbAny, prID}: {lbIDEM, LineCanBreak, 310},
|
||||
{lbAny, prEB}: {lbEB, LineCanBreak, 310},
|
||||
{lbAny, prEM}: {lbIDEM, LineCanBreak, 310},
|
||||
{lbPR, prID}: {lbIDEM, LineDontBreak, 231},
|
||||
{lbPR, prEB}: {lbEB, LineDontBreak, 231},
|
||||
{lbPR, prEM}: {lbIDEM, LineDontBreak, 231},
|
||||
{lbIDEM, prPO}: {lbPO, LineDontBreak, 231},
|
||||
{lbEB, prPO}: {lbPO, LineDontBreak, 231},
|
||||
|
||||
// LB24.
|
||||
{lbAny, prPO}: {lbPO, LineCanBreak, 310},
|
||||
{lbPR, prAL}: {lbAL, LineDontBreak, 240},
|
||||
{lbPR, prHL}: {lbHL, LineDontBreak, 240},
|
||||
{lbPO, prAL}: {lbAL, LineDontBreak, 240},
|
||||
{lbPO, prHL}: {lbHL, LineDontBreak, 240},
|
||||
{lbAL, prPR}: {lbPR, LineDontBreak, 240},
|
||||
{lbAL, prPO}: {lbPO, LineDontBreak, 240},
|
||||
{lbHL, prPR}: {lbPR, LineDontBreak, 240},
|
||||
{lbHL, prPO}: {lbPO, LineDontBreak, 240},
|
||||
|
||||
// LB25 (simple transitions).
|
||||
{lbPR, prNU}: {lbNU, LineDontBreak, 250},
|
||||
{lbPO, prNU}: {lbNU, LineDontBreak, 250},
|
||||
{lbOP, prNU}: {lbNU, LineDontBreak, 250},
|
||||
{lbHY, prNU}: {lbNU, LineDontBreak, 250},
|
||||
{lbNU, prNU}: {lbNUNU, LineDontBreak, 250},
|
||||
{lbNU, prSY}: {lbNUSY, LineDontBreak, 250},
|
||||
{lbNU, prIS}: {lbNUIS, LineDontBreak, 250},
|
||||
{lbNUNU, prNU}: {lbNUNU, LineDontBreak, 250},
|
||||
{lbNUNU, prSY}: {lbNUSY, LineDontBreak, 250},
|
||||
{lbNUNU, prIS}: {lbNUIS, LineDontBreak, 250},
|
||||
{lbNUSY, prNU}: {lbNUNU, LineDontBreak, 250},
|
||||
{lbNUSY, prSY}: {lbNUSY, LineDontBreak, 250},
|
||||
{lbNUSY, prIS}: {lbNUIS, LineDontBreak, 250},
|
||||
{lbNUIS, prNU}: {lbNUNU, LineDontBreak, 250},
|
||||
{lbNUIS, prSY}: {lbNUSY, LineDontBreak, 250},
|
||||
{lbNUIS, prIS}: {lbNUIS, LineDontBreak, 250},
|
||||
{lbNU, prCL}: {lbNUCL, LineDontBreak, 250},
|
||||
{lbNU, prCP}: {lbNUCP, LineDontBreak, 250},
|
||||
{lbNUNU, prCL}: {lbNUCL, LineDontBreak, 250},
|
||||
{lbNUNU, prCP}: {lbNUCP, LineDontBreak, 250},
|
||||
{lbNUSY, prCL}: {lbNUCL, LineDontBreak, 250},
|
||||
{lbNUSY, prCP}: {lbNUCP, LineDontBreak, 250},
|
||||
{lbNUIS, prCL}: {lbNUCL, LineDontBreak, 250},
|
||||
{lbNUIS, prCP}: {lbNUCP, LineDontBreak, 250},
|
||||
{lbNU, prPO}: {lbPO, LineDontBreak, 250},
|
||||
{lbNUNU, prPO}: {lbPO, LineDontBreak, 250},
|
||||
{lbNUSY, prPO}: {lbPO, LineDontBreak, 250},
|
||||
{lbNUIS, prPO}: {lbPO, LineDontBreak, 250},
|
||||
{lbNUCL, prPO}: {lbPO, LineDontBreak, 250},
|
||||
{lbNUCP, prPO}: {lbPO, LineDontBreak, 250},
|
||||
{lbNU, prPR}: {lbPR, LineDontBreak, 250},
|
||||
{lbNUNU, prPR}: {lbPR, LineDontBreak, 250},
|
||||
{lbNUSY, prPR}: {lbPR, LineDontBreak, 250},
|
||||
{lbNUIS, prPR}: {lbPR, LineDontBreak, 250},
|
||||
{lbNUCL, prPR}: {lbPR, LineDontBreak, 250},
|
||||
{lbNUCP, prPR}: {lbPR, LineDontBreak, 250},
|
||||
|
||||
// LB26.
|
||||
{lbAny, prJL}: {lbJL, LineCanBreak, 310},
|
||||
{lbAny, prJV}: {lbJV, LineCanBreak, 310},
|
||||
{lbAny, prJT}: {lbJT, LineCanBreak, 310},
|
||||
{lbAny, prH2}: {lbH2, LineCanBreak, 310},
|
||||
{lbAny, prH3}: {lbH3, LineCanBreak, 310},
|
||||
{lbJL, prJL}: {lbJL, LineDontBreak, 260},
|
||||
{lbJL, prJV}: {lbJV, LineDontBreak, 260},
|
||||
{lbJL, prH2}: {lbH2, LineDontBreak, 260},
|
||||
{lbJL, prH3}: {lbH3, LineDontBreak, 260},
|
||||
{lbJV, prJV}: {lbJV, LineDontBreak, 260},
|
||||
{lbJV, prJT}: {lbJT, LineDontBreak, 260},
|
||||
{lbH2, prJV}: {lbJV, LineDontBreak, 260},
|
||||
{lbH2, prJT}: {lbJT, LineDontBreak, 260},
|
||||
{lbJT, prJT}: {lbJT, LineDontBreak, 260},
|
||||
{lbH3, prJT}: {lbJT, LineDontBreak, 260},
|
||||
|
||||
// LB27.
|
||||
{lbJL, prPO}: {lbPO, LineDontBreak, 270},
|
||||
{lbJV, prPO}: {lbPO, LineDontBreak, 270},
|
||||
{lbJT, prPO}: {lbPO, LineDontBreak, 270},
|
||||
{lbH2, prPO}: {lbPO, LineDontBreak, 270},
|
||||
{lbH3, prPO}: {lbPO, LineDontBreak, 270},
|
||||
{lbPR, prJL}: {lbJL, LineDontBreak, 270},
|
||||
{lbPR, prJV}: {lbJV, LineDontBreak, 270},
|
||||
{lbPR, prJT}: {lbJT, LineDontBreak, 270},
|
||||
{lbPR, prH2}: {lbH2, LineDontBreak, 270},
|
||||
{lbPR, prH3}: {lbH3, LineDontBreak, 270},
|
||||
|
||||
// LB28.
|
||||
{lbAL, prAL}: {lbAL, LineDontBreak, 280},
|
||||
{lbAL, prHL}: {lbHL, LineDontBreak, 280},
|
||||
{lbHL, prAL}: {lbAL, LineDontBreak, 280},
|
||||
{lbHL, prHL}: {lbHL, LineDontBreak, 280},
|
||||
|
||||
// LB29.
|
||||
{lbIS, prAL}: {lbAL, LineDontBreak, 290},
|
||||
{lbIS, prHL}: {lbHL, LineDontBreak, 290},
|
||||
{lbNUIS, prAL}: {lbAL, LineDontBreak, 290},
|
||||
{lbNUIS, prHL}: {lbHL, LineDontBreak, 290},
|
||||
}
|
||||
|
||||
// transitionLineBreakState determines the new state of the line break parser
|
||||
// given the current state and the next code point. It also returns the type of
|
||||
// line break: LineDontBreak, LineCanBreak, or LineMustBreak. If more than one
|
||||
// code point is needed to determine the new state, the byte slice or the string
|
||||
// starting after rune "r" can be used (whichever is not nil or empty) for
|
||||
// further lookups.
|
||||
func transitionLineBreakState(state int, r rune, b []byte, str string) (newState int, lineBreak int) {
|
||||
// Determine the property of the next character.
|
||||
nextProperty, generalCategory := propertyWithGenCat(lineBreakCodePoints, r)
|
||||
|
||||
// Prepare.
|
||||
var forceNoBreak, isCPeaFWH bool
|
||||
if state >= 0 && state&lbCPeaFWHBit != 0 {
|
||||
isCPeaFWH = true // LB30: CP but ea is not F, W, or H.
|
||||
state = state &^ lbCPeaFWHBit
|
||||
}
|
||||
if state >= 0 && state&lbZWJBit != 0 {
|
||||
state = state &^ lbZWJBit // Extract zero-width joiner bit.
|
||||
forceNoBreak = true // LB8a.
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Transition into LB30.
|
||||
if newState == lbCP || newState == lbNUCP {
|
||||
ea := property(eastAsianWidth, r)
|
||||
if ea != prF && ea != prW && ea != prH {
|
||||
newState |= lbCPeaFWHBit
|
||||
}
|
||||
}
|
||||
|
||||
// Override break.
|
||||
if forceNoBreak {
|
||||
lineBreak = LineDontBreak
|
||||
}
|
||||
}()
|
||||
|
||||
// LB1.
|
||||
if nextProperty == prAI || nextProperty == prSG || nextProperty == prXX {
|
||||
nextProperty = prAL
|
||||
} else if nextProperty == prSA {
|
||||
if generalCategory == gcMn || generalCategory == gcMc {
|
||||
nextProperty = prCM
|
||||
} else {
|
||||
nextProperty = prAL
|
||||
}
|
||||
} else if nextProperty == prCJ {
|
||||
nextProperty = prNS
|
||||
}
|
||||
|
||||
// Combining marks.
|
||||
if nextProperty == prZWJ || nextProperty == prCM {
|
||||
var bit int
|
||||
if nextProperty == prZWJ {
|
||||
bit = lbZWJBit
|
||||
}
|
||||
mustBreakState := state < 0 || state == lbBK || state == lbCR || state == lbLF || state == lbNL
|
||||
if !mustBreakState && state != lbSP && state != lbZW && state != lbQUSP && state != lbCLCPSP && state != lbB2SP {
|
||||
// LB9.
|
||||
return state | bit, LineDontBreak
|
||||
} else {
|
||||
// LB10.
|
||||
if mustBreakState {
|
||||
return lbAL | bit, LineMustBreak
|
||||
}
|
||||
return lbAL | bit, LineCanBreak
|
||||
}
|
||||
}
|
||||
|
||||
// Find the applicable transition in the table.
|
||||
var rule int
|
||||
transition, ok := lbTransitions[[2]int{state, nextProperty}]
|
||||
if ok {
|
||||
// We have a specific transition. We'll use it.
|
||||
newState, lineBreak, rule = transition[0], transition[1], transition[2]
|
||||
} else {
|
||||
// No specific transition found. Try the less specific ones.
|
||||
transAnyProp, okAnyProp := lbTransitions[[2]int{state, prAny}]
|
||||
transAnyState, okAnyState := lbTransitions[[2]int{lbAny, nextProperty}]
|
||||
if okAnyProp && okAnyState {
|
||||
// Both apply. We'll use a mix (see comments for grTransitions).
|
||||
newState, lineBreak, rule = transAnyState[0], transAnyState[1], transAnyState[2]
|
||||
if transAnyProp[2] < transAnyState[2] {
|
||||
lineBreak, rule = transAnyProp[1], transAnyProp[2]
|
||||
}
|
||||
} else if okAnyProp {
|
||||
// We only have a specific state.
|
||||
newState, lineBreak, rule = transAnyProp[0], transAnyProp[1], transAnyProp[2]
|
||||
// This branch will probably never be reached because okAnyState will
|
||||
// always be true given the current transition map. But we keep it here
|
||||
// for future modifications to the transition map where this may not be
|
||||
// true anymore.
|
||||
} else if okAnyState {
|
||||
// We only have a specific property.
|
||||
newState, lineBreak, rule = transAnyState[0], transAnyState[1], transAnyState[2]
|
||||
} else {
|
||||
// No known transition. LB31: ALL ÷ ALL.
|
||||
newState, lineBreak, rule = lbAny, LineCanBreak, 310
|
||||
}
|
||||
}
|
||||
|
||||
// LB12a.
|
||||
if rule > 121 &&
|
||||
nextProperty == prGL &&
|
||||
(state != lbSP && state != lbBA && state != lbHY && state != lbLB21a && state != lbQUSP && state != lbCLCPSP && state != lbB2SP) {
|
||||
return lbGL, LineDontBreak
|
||||
}
|
||||
|
||||
// LB13.
|
||||
if rule > 130 && state != lbNU && state != lbNUNU {
|
||||
switch nextProperty {
|
||||
case prCL:
|
||||
return lbCL, LineDontBreak
|
||||
case prCP:
|
||||
return lbCP, LineDontBreak
|
||||
case prIS:
|
||||
return lbIS, LineDontBreak
|
||||
case prSY:
|
||||
return lbSY, LineDontBreak
|
||||
}
|
||||
}
|
||||
|
||||
// LB25 (look ahead).
|
||||
if rule > 250 &&
|
||||
(state == lbPR || state == lbPO) &&
|
||||
nextProperty == prOP || nextProperty == prHY {
|
||||
var r rune
|
||||
if b != nil { // Byte slice version.
|
||||
r, _ = utf8.DecodeRune(b)
|
||||
} else { // String version.
|
||||
r, _ = utf8.DecodeRuneInString(str)
|
||||
}
|
||||
if r != utf8.RuneError {
|
||||
pr, _ := propertyWithGenCat(lineBreakCodePoints, r)
|
||||
if pr == prNU {
|
||||
return lbNU, LineDontBreak
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LB30 (part one).
|
||||
if rule > 300 {
|
||||
if (state == lbAL || state == lbHL || state == lbNU || state == lbNUNU) && nextProperty == prOP {
|
||||
ea := property(eastAsianWidth, r)
|
||||
if ea != prF && ea != prW && ea != prH {
|
||||
return lbOP, LineDontBreak
|
||||
}
|
||||
} else if isCPeaFWH {
|
||||
switch nextProperty {
|
||||
case prAL:
|
||||
return lbAL, LineDontBreak
|
||||
case prHL:
|
||||
return lbHL, LineDontBreak
|
||||
case prNU:
|
||||
return lbNU, LineDontBreak
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LB30a.
|
||||
if newState == lbAny && nextProperty == prRI {
|
||||
if state != lbOddRI && state != lbEvenRI { // Includes state == -1.
|
||||
// Transition into the first RI.
|
||||
return lbOddRI, lineBreak
|
||||
}
|
||||
if state == lbOddRI {
|
||||
// Don't break pairs of Regional Indicators.
|
||||
return lbEvenRI, LineDontBreak
|
||||
}
|
||||
return lbOddRI, lineBreak
|
||||
}
|
||||
|
||||
// LB30b.
|
||||
if rule > 302 {
|
||||
if nextProperty == prEM {
|
||||
if state == lbEB || state == lbExtPicCn {
|
||||
return prAny, LineDontBreak
|
||||
}
|
||||
}
|
||||
graphemeProperty := property(graphemeCodePoints, r)
|
||||
if graphemeProperty == prExtendedPictographic && generalCategory == gcCn {
|
||||
return lbExtPicCn, LineCanBreak
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
1760
vendor/github.com/rivo/uniseg/properties.go
generated
vendored
1760
vendor/github.com/rivo/uniseg/properties.go
generated
vendored
File diff suppressed because it is too large
Load Diff
88
vendor/github.com/rivo/uniseg/sentence.go
generated
vendored
Normal file
88
vendor/github.com/rivo/uniseg/sentence.go
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
package uniseg
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
// FirstSentence returns the first sentence found in the given byte slice
|
||||
// according to the rules of Unicode Standard Annex #29, Sentence Boundaries.
|
||||
// This function can be called continuously to extract all sentences from a byte
|
||||
// slice, as illustrated in the example below.
|
||||
//
|
||||
// If you don't know the current state, for example when calling the function
|
||||
// for the first time, you must pass -1. For consecutive calls, pass the state
|
||||
// and rest slice returned by the previous call.
|
||||
//
|
||||
// The "rest" slice is the sub-slice of the original byte slice "b" starting
|
||||
// after the last byte of the identified sentence. If the length of the "rest"
|
||||
// slice is 0, the entire byte slice "b" has been processed. The "sentence" byte
|
||||
// slice is the sub-slice of the input slice containing the identified sentence.
|
||||
//
|
||||
// Given an empty byte slice "b", the function returns nil values.
|
||||
func FirstSentence(b []byte, state int) (sentence, rest []byte, newState int) {
|
||||
// An empty byte slice returns nothing.
|
||||
if len(b) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Extract the first rune.
|
||||
r, length := utf8.DecodeRune(b)
|
||||
if len(b) <= length { // If we're already past the end, there is nothing else to parse.
|
||||
return b, nil, sbAny
|
||||
}
|
||||
|
||||
// If we don't know the state, determine it now.
|
||||
if state < 0 {
|
||||
state, _ = transitionSentenceBreakState(state, r, b[length:], "")
|
||||
}
|
||||
|
||||
// Transition until we find a boundary.
|
||||
var boundary bool
|
||||
for {
|
||||
r, l := utf8.DecodeRune(b[length:])
|
||||
state, boundary = transitionSentenceBreakState(state, r, b[length+l:], "")
|
||||
|
||||
if boundary {
|
||||
return b[:length], b[length:], state
|
||||
}
|
||||
|
||||
length += l
|
||||
if len(b) <= length {
|
||||
return b, nil, sbAny
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FirstSentenceInString is like [FirstSentence] but its input and outputs are
|
||||
// strings.
|
||||
func FirstSentenceInString(str string, state int) (sentence, rest string, newState int) {
|
||||
// An empty byte slice returns nothing.
|
||||
if len(str) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Extract the first rune.
|
||||
r, length := utf8.DecodeRuneInString(str)
|
||||
if len(str) <= length { // If we're already past the end, there is nothing else to parse.
|
||||
return str, "", sbAny
|
||||
}
|
||||
|
||||
// If we don't know the state, determine it now.
|
||||
if state < 0 {
|
||||
state, _ = transitionSentenceBreakState(state, r, nil, str[length:])
|
||||
}
|
||||
|
||||
// Transition until we find a boundary.
|
||||
var boundary bool
|
||||
for {
|
||||
r, l := utf8.DecodeRuneInString(str[length:])
|
||||
state, boundary = transitionSentenceBreakState(state, r, nil, str[length+l:])
|
||||
|
||||
if boundary {
|
||||
return str[:length], str[length:], state
|
||||
}
|
||||
|
||||
length += l
|
||||
if len(str) <= length {
|
||||
return str, "", sbAny
|
||||
}
|
||||
}
|
||||
}
|
2815
vendor/github.com/rivo/uniseg/sentenceproperties.go
generated
vendored
Normal file
2815
vendor/github.com/rivo/uniseg/sentenceproperties.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
205
vendor/github.com/rivo/uniseg/sentencerules.go
generated
vendored
Normal file
205
vendor/github.com/rivo/uniseg/sentencerules.go
generated
vendored
Normal file
@ -0,0 +1,205 @@
|
||||
package uniseg
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
// The states of the sentence break parser.
|
||||
const (
|
||||
sbAny = iota
|
||||
sbCR
|
||||
sbParaSep
|
||||
sbATerm
|
||||
sbUpper
|
||||
sbLower
|
||||
sbSB7
|
||||
sbSB8Close
|
||||
sbSB8Sp
|
||||
sbSTerm
|
||||
sbSB8aClose
|
||||
sbSB8aSp
|
||||
)
|
||||
|
||||
// The sentence break parser's breaking instructions.
|
||||
const (
|
||||
sbDontBreak = iota
|
||||
sbBreak
|
||||
)
|
||||
|
||||
// The sentence break parser's state transitions. It's anologous to
|
||||
// grTransitions, see comments there for details. Unicode version 14.0.0.
|
||||
var sbTransitions = map[[2]int][3]int{
|
||||
// SB3.
|
||||
{sbAny, prCR}: {sbCR, sbDontBreak, 9990},
|
||||
{sbCR, prLF}: {sbParaSep, sbDontBreak, 30},
|
||||
|
||||
// SB4.
|
||||
{sbAny, prSep}: {sbParaSep, sbDontBreak, 9990},
|
||||
{sbAny, prLF}: {sbParaSep, sbDontBreak, 9990},
|
||||
{sbParaSep, prAny}: {sbAny, sbBreak, 40},
|
||||
{sbCR, prAny}: {sbAny, sbBreak, 40},
|
||||
|
||||
// SB6.
|
||||
{sbAny, prATerm}: {sbATerm, sbDontBreak, 9990},
|
||||
{sbATerm, prNumeric}: {sbAny, sbDontBreak, 60},
|
||||
{sbSB7, prNumeric}: {sbAny, sbDontBreak, 60}, // Because ATerm also appears in SB7.
|
||||
|
||||
// SB7.
|
||||
{sbAny, prUpper}: {sbUpper, sbDontBreak, 9990},
|
||||
{sbAny, prLower}: {sbLower, sbDontBreak, 9990},
|
||||
{sbUpper, prATerm}: {sbSB7, sbDontBreak, 70},
|
||||
{sbLower, prATerm}: {sbSB7, sbDontBreak, 70},
|
||||
{sbSB7, prUpper}: {sbUpper, sbDontBreak, 70},
|
||||
|
||||
// SB8a.
|
||||
{sbAny, prSTerm}: {sbSTerm, sbDontBreak, 9990},
|
||||
{sbATerm, prSContinue}: {sbAny, sbDontBreak, 81},
|
||||
{sbATerm, prATerm}: {sbATerm, sbDontBreak, 81},
|
||||
{sbATerm, prSTerm}: {sbSTerm, sbDontBreak, 81},
|
||||
{sbSB7, prSContinue}: {sbAny, sbDontBreak, 81},
|
||||
{sbSB7, prATerm}: {sbATerm, sbDontBreak, 81},
|
||||
{sbSB7, prSTerm}: {sbSTerm, sbDontBreak, 81},
|
||||
{sbSB8Close, prSContinue}: {sbAny, sbDontBreak, 81},
|
||||
{sbSB8Close, prATerm}: {sbATerm, sbDontBreak, 81},
|
||||
{sbSB8Close, prSTerm}: {sbSTerm, sbDontBreak, 81},
|
||||
{sbSB8Sp, prSContinue}: {sbAny, sbDontBreak, 81},
|
||||
{sbSB8Sp, prATerm}: {sbATerm, sbDontBreak, 81},
|
||||
{sbSB8Sp, prSTerm}: {sbSTerm, sbDontBreak, 81},
|
||||
{sbSTerm, prSContinue}: {sbAny, sbDontBreak, 81},
|
||||
{sbSTerm, prATerm}: {sbATerm, sbDontBreak, 81},
|
||||
{sbSTerm, prSTerm}: {sbSTerm, sbDontBreak, 81},
|
||||
{sbSB8aClose, prSContinue}: {sbAny, sbDontBreak, 81},
|
||||
{sbSB8aClose, prATerm}: {sbATerm, sbDontBreak, 81},
|
||||
{sbSB8aClose, prSTerm}: {sbSTerm, sbDontBreak, 81},
|
||||
{sbSB8aSp, prSContinue}: {sbAny, sbDontBreak, 81},
|
||||
{sbSB8aSp, prATerm}: {sbATerm, sbDontBreak, 81},
|
||||
{sbSB8aSp, prSTerm}: {sbSTerm, sbDontBreak, 81},
|
||||
|
||||
// SB9.
|
||||
{sbATerm, prClose}: {sbSB8Close, sbDontBreak, 90},
|
||||
{sbSB7, prClose}: {sbSB8Close, sbDontBreak, 90},
|
||||
{sbSB8Close, prClose}: {sbSB8Close, sbDontBreak, 90},
|
||||
{sbATerm, prSp}: {sbSB8Sp, sbDontBreak, 90},
|
||||
{sbSB7, prSp}: {sbSB8Sp, sbDontBreak, 90},
|
||||
{sbSB8Close, prSp}: {sbSB8Sp, sbDontBreak, 90},
|
||||
{sbSTerm, prClose}: {sbSB8aClose, sbDontBreak, 90},
|
||||
{sbSB8aClose, prClose}: {sbSB8aClose, sbDontBreak, 90},
|
||||
{sbSTerm, prSp}: {sbSB8aSp, sbDontBreak, 90},
|
||||
{sbSB8aClose, prSp}: {sbSB8aSp, sbDontBreak, 90},
|
||||
{sbATerm, prSep}: {sbParaSep, sbDontBreak, 90},
|
||||
{sbATerm, prCR}: {sbParaSep, sbDontBreak, 90},
|
||||
{sbATerm, prLF}: {sbParaSep, sbDontBreak, 90},
|
||||
{sbSB7, prSep}: {sbParaSep, sbDontBreak, 90},
|
||||
{sbSB7, prCR}: {sbParaSep, sbDontBreak, 90},
|
||||
{sbSB7, prLF}: {sbParaSep, sbDontBreak, 90},
|
||||
{sbSB8Close, prSep}: {sbParaSep, sbDontBreak, 90},
|
||||
{sbSB8Close, prCR}: {sbParaSep, sbDontBreak, 90},
|
||||
{sbSB8Close, prLF}: {sbParaSep, sbDontBreak, 90},
|
||||
{sbSTerm, prSep}: {sbParaSep, sbDontBreak, 90},
|
||||
{sbSTerm, prCR}: {sbParaSep, sbDontBreak, 90},
|
||||
{sbSTerm, prLF}: {sbParaSep, sbDontBreak, 90},
|
||||
{sbSB8aClose, prSep}: {sbParaSep, sbDontBreak, 90},
|
||||
{sbSB8aClose, prCR}: {sbParaSep, sbDontBreak, 90},
|
||||
{sbSB8aClose, prLF}: {sbParaSep, sbDontBreak, 90},
|
||||
|
||||
// SB10.
|
||||
{sbSB8Sp, prSp}: {sbSB8Sp, sbDontBreak, 100},
|
||||
{sbSB8aSp, prSp}: {sbSB8aSp, sbDontBreak, 100},
|
||||
{sbSB8Sp, prSep}: {sbParaSep, sbDontBreak, 100},
|
||||
{sbSB8Sp, prCR}: {sbParaSep, sbDontBreak, 100},
|
||||
{sbSB8Sp, prLF}: {sbParaSep, sbDontBreak, 100},
|
||||
|
||||
// SB11.
|
||||
{sbATerm, prAny}: {sbAny, sbBreak, 110},
|
||||
{sbSB7, prAny}: {sbAny, sbBreak, 110},
|
||||
{sbSB8Close, prAny}: {sbAny, sbBreak, 110},
|
||||
{sbSB8Sp, prAny}: {sbAny, sbBreak, 110},
|
||||
{sbSTerm, prAny}: {sbAny, sbBreak, 110},
|
||||
{sbSB8aClose, prAny}: {sbAny, sbBreak, 110},
|
||||
{sbSB8aSp, prAny}: {sbAny, sbBreak, 110},
|
||||
// We'll always break after ParaSep due to SB4.
|
||||
}
|
||||
|
||||
// transitionSentenceBreakState determines the new state of the sentence break
|
||||
// parser given the current state and the next code point. It also returns
|
||||
// whether a sentence boundary was detected. If more than one code point is
|
||||
// needed to determine the new state, the byte slice or the string starting
|
||||
// after rune "r" can be used (whichever is not nil or empty) for further
|
||||
// lookups.
|
||||
func transitionSentenceBreakState(state int, r rune, b []byte, str string) (newState int, sentenceBreak bool) {
|
||||
// Determine the property of the next character.
|
||||
nextProperty := property(sentenceBreakCodePoints, r)
|
||||
|
||||
// SB5 (Replacing Ignore Rules).
|
||||
if nextProperty == prExtend || nextProperty == prFormat {
|
||||
if state == sbParaSep || state == sbCR {
|
||||
return sbAny, true // Make sure we don't apply SB5 to SB3 or SB4.
|
||||
}
|
||||
if state < 0 {
|
||||
return sbAny, true // SB1.
|
||||
}
|
||||
return state, false
|
||||
}
|
||||
|
||||
// Find the applicable transition in the table.
|
||||
var rule int
|
||||
transition, ok := sbTransitions[[2]int{state, nextProperty}]
|
||||
if ok {
|
||||
// We have a specific transition. We'll use it.
|
||||
newState, sentenceBreak, rule = transition[0], transition[1] == sbBreak, transition[2]
|
||||
} else {
|
||||
// No specific transition found. Try the less specific ones.
|
||||
transAnyProp, okAnyProp := sbTransitions[[2]int{state, prAny}]
|
||||
transAnyState, okAnyState := sbTransitions[[2]int{sbAny, nextProperty}]
|
||||
if okAnyProp && okAnyState {
|
||||
// Both apply. We'll use a mix (see comments for grTransitions).
|
||||
newState, sentenceBreak, rule = transAnyState[0], transAnyState[1] == sbBreak, transAnyState[2]
|
||||
if transAnyProp[2] < transAnyState[2] {
|
||||
sentenceBreak, rule = transAnyProp[1] == sbBreak, transAnyProp[2]
|
||||
}
|
||||
} else if okAnyProp {
|
||||
// We only have a specific state.
|
||||
newState, sentenceBreak, rule = transAnyProp[0], transAnyProp[1] == sbBreak, transAnyProp[2]
|
||||
// This branch will probably never be reached because okAnyState will
|
||||
// always be true given the current transition map. But we keep it here
|
||||
// for future modifications to the transition map where this may not be
|
||||
// true anymore.
|
||||
} else if okAnyState {
|
||||
// We only have a specific property.
|
||||
newState, sentenceBreak, rule = transAnyState[0], transAnyState[1] == sbBreak, transAnyState[2]
|
||||
} else {
|
||||
// No known transition. SB999: Any × Any.
|
||||
newState, sentenceBreak, rule = sbAny, false, 9990
|
||||
}
|
||||
}
|
||||
|
||||
// SB8.
|
||||
if rule > 80 && (state == sbATerm || state == sbSB8Close || state == sbSB8Sp || state == sbSB7) {
|
||||
// Check the right side of the rule.
|
||||
var length int
|
||||
for nextProperty != prOLetter &&
|
||||
nextProperty != prUpper &&
|
||||
nextProperty != prLower &&
|
||||
nextProperty != prSep &&
|
||||
nextProperty != prCR &&
|
||||
nextProperty != prLF &&
|
||||
nextProperty != prATerm &&
|
||||
nextProperty != prSTerm {
|
||||
// Move on to the next rune.
|
||||
if b != nil { // Byte slice version.
|
||||
r, length = utf8.DecodeRune(b)
|
||||
b = b[length:]
|
||||
} else { // String version.
|
||||
r, length = utf8.DecodeRuneInString(str)
|
||||
str = str[length:]
|
||||
}
|
||||
if r == utf8.RuneError {
|
||||
break
|
||||
}
|
||||
nextProperty = property(sentenceBreakCodePoints, r)
|
||||
}
|
||||
if nextProperty == prLower {
|
||||
return sbLower, false
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
244
vendor/github.com/rivo/uniseg/step.go
generated
vendored
Normal file
244
vendor/github.com/rivo/uniseg/step.go
generated
vendored
Normal file
@ -0,0 +1,244 @@
|
||||
package uniseg
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
// The bit masks used to extract boundary information returned by [Step].
|
||||
const (
|
||||
MaskLine = 3
|
||||
MaskWord = 4
|
||||
MaskSentence = 8
|
||||
)
|
||||
|
||||
// The number of bits to shift the boundary information returned by [Step] to
|
||||
// obtain the monospace width of the grapheme cluster.
|
||||
const ShiftWidth = 4
|
||||
|
||||
// The bit positions by which boundary flags are shifted by the [Step] function.
|
||||
// These must correspond to the Mask constants.
|
||||
const (
|
||||
shiftWord = 2
|
||||
shiftSentence = 3
|
||||
// shiftwWidth is ShiftWidth above. No mask as these are always the remaining bits.
|
||||
)
|
||||
|
||||
// The bit positions by which states are shifted by the [Step] function. These
|
||||
// values must ensure state values defined for each of the boundary algorithms
|
||||
// don't overlap (and that they all still fit in a single int). These must
|
||||
// correspond to the Mask constants.
|
||||
const (
|
||||
shiftWordState = 4
|
||||
shiftSentenceState = 9
|
||||
shiftLineState = 13
|
||||
shiftPropState = 21 // No mask as these are always the remaining bits.
|
||||
)
|
||||
|
||||
// The bit mask used to extract the state returned by the [Step] function, after
|
||||
// shifting. These values must correspond to the shift constants.
|
||||
const (
|
||||
maskGraphemeState = 0xf
|
||||
maskWordState = 0x1f
|
||||
maskSentenceState = 0xf
|
||||
maskLineState = 0xff
|
||||
)
|
||||
|
||||
// Step returns the first grapheme cluster (user-perceived character) found in
|
||||
// the given byte slice. It also returns information about the boundary between
|
||||
// that grapheme cluster and the one following it as well as the monospace width
|
||||
// of the grapheme cluster. There are three types of boundary information: word
|
||||
// boundaries, sentence boundaries, and line breaks. This function is therefore
|
||||
// a combination of [FirstGraphemeCluster], [FirstWord], [FirstSentence], and
|
||||
// [FirstLineSegment].
|
||||
//
|
||||
// The "boundaries" return value can be evaluated as follows:
|
||||
//
|
||||
// - boundaries&MaskWord != 0: The boundary is a word boundary.
|
||||
// - boundaries&MaskWord == 0: The boundary is not a word boundary.
|
||||
// - boundaries&MaskSentence != 0: The boundary is a sentence boundary.
|
||||
// - boundaries&MaskSentence == 0: The boundary is not a sentence boundary.
|
||||
// - boundaries&MaskLine == LineDontBreak: You must not break the line at the
|
||||
// boundary.
|
||||
// - boundaries&MaskLine == LineMustBreak: You must break the line at the
|
||||
// boundary.
|
||||
// - boundaries&MaskLine == LineCanBreak: You may or may not break the line at
|
||||
// the boundary.
|
||||
// - boundaries >> ShiftWidth: The width of the grapheme cluster for most
|
||||
// monospace fonts where a value of 1 represents one character cell.
|
||||
//
|
||||
// This function can be called continuously to extract all grapheme clusters
|
||||
// from a byte slice, as illustrated in the examples below.
|
||||
//
|
||||
// If you don't know which state to pass, for example when calling the function
|
||||
// for the first time, you must pass -1. For consecutive calls, pass the state
|
||||
// and rest slice returned by the previous call.
|
||||
//
|
||||
// The "rest" slice is the sub-slice of the original byte slice "b" starting
|
||||
// after the last byte of the identified grapheme cluster. If the length of the
|
||||
// "rest" slice is 0, the entire byte slice "b" has been processed. The
|
||||
// "cluster" byte slice is the sub-slice of the input slice containing the
|
||||
// first identified grapheme cluster.
|
||||
//
|
||||
// Given an empty byte slice "b", the function returns nil values.
|
||||
//
|
||||
// While slightly less convenient than using the Graphemes class, this function
|
||||
// has much better performance and makes no allocations. It lends itself well to
|
||||
// large byte slices.
|
||||
//
|
||||
// Note that in accordance with UAX #14 LB3, the final segment will end with
|
||||
// a mandatory line break (boundaries&MaskLine == LineMustBreak). You can choose
|
||||
// to ignore this by checking if the length of the "rest" slice is 0 and calling
|
||||
// [HasTrailingLineBreak] or [HasTrailingLineBreakInString] on the last rune.
|
||||
func Step(b []byte, state int) (cluster, rest []byte, boundaries int, newState int) {
|
||||
// An empty byte slice returns nothing.
|
||||
if len(b) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Extract the first rune.
|
||||
r, length := utf8.DecodeRune(b)
|
||||
if len(b) <= length { // If we're already past the end, there is nothing else to parse.
|
||||
var prop int
|
||||
if state < 0 {
|
||||
prop = property(graphemeCodePoints, r)
|
||||
} else {
|
||||
prop = state >> shiftPropState
|
||||
}
|
||||
return b, nil, LineMustBreak | (1 << shiftWord) | (1 << shiftSentence) | (runeWidth(r, prop) << ShiftWidth), grAny | (wbAny << shiftWordState) | (sbAny << shiftSentenceState) | (lbAny << shiftLineState) | (prop << shiftPropState)
|
||||
}
|
||||
|
||||
// If we don't know the state, determine it now.
|
||||
var graphemeState, wordState, sentenceState, lineState, firstProp int
|
||||
remainder := b[length:]
|
||||
if state < 0 {
|
||||
graphemeState, firstProp, _ = transitionGraphemeState(state, r)
|
||||
wordState, _ = transitionWordBreakState(state, r, remainder, "")
|
||||
sentenceState, _ = transitionSentenceBreakState(state, r, remainder, "")
|
||||
lineState, _ = transitionLineBreakState(state, r, remainder, "")
|
||||
} else {
|
||||
graphemeState = state & maskGraphemeState
|
||||
wordState = (state >> shiftWordState) & maskWordState
|
||||
sentenceState = (state >> shiftSentenceState) & maskSentenceState
|
||||
lineState = (state >> shiftLineState) & maskLineState
|
||||
firstProp = state >> shiftPropState
|
||||
}
|
||||
|
||||
// Transition until we find a grapheme cluster boundary.
|
||||
width := runeWidth(r, firstProp)
|
||||
for {
|
||||
var (
|
||||
graphemeBoundary, wordBoundary, sentenceBoundary bool
|
||||
lineBreak, prop int
|
||||
)
|
||||
|
||||
r, l := utf8.DecodeRune(remainder)
|
||||
remainder = b[length+l:]
|
||||
|
||||
graphemeState, prop, graphemeBoundary = transitionGraphemeState(graphemeState, r)
|
||||
wordState, wordBoundary = transitionWordBreakState(wordState, r, remainder, "")
|
||||
sentenceState, sentenceBoundary = transitionSentenceBreakState(sentenceState, r, remainder, "")
|
||||
lineState, lineBreak = transitionLineBreakState(lineState, r, remainder, "")
|
||||
|
||||
if graphemeBoundary {
|
||||
boundary := lineBreak | (width << ShiftWidth)
|
||||
if wordBoundary {
|
||||
boundary |= 1 << shiftWord
|
||||
}
|
||||
if sentenceBoundary {
|
||||
boundary |= 1 << shiftSentence
|
||||
}
|
||||
return b[:length], b[length:], boundary, graphemeState | (wordState << shiftWordState) | (sentenceState << shiftSentenceState) | (lineState << shiftLineState) | (prop << shiftPropState)
|
||||
}
|
||||
|
||||
if r == vs16 {
|
||||
width = 2
|
||||
} else if firstProp != prExtendedPictographic && firstProp != prRegionalIndicator && firstProp != prL {
|
||||
width += runeWidth(r, prop)
|
||||
} else if firstProp == prExtendedPictographic {
|
||||
if r == vs15 {
|
||||
width = 1
|
||||
} else {
|
||||
width = 2
|
||||
}
|
||||
}
|
||||
|
||||
length += l
|
||||
if len(b) <= length {
|
||||
return b, nil, LineMustBreak | (1 << shiftWord) | (1 << shiftSentence) | (width << ShiftWidth), grAny | (wbAny << shiftWordState) | (sbAny << shiftSentenceState) | (lbAny << shiftLineState) | (prop << shiftPropState)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StepString is like [Step] but its input and outputs are strings.
|
||||
func StepString(str string, state int) (cluster, rest string, boundaries int, newState int) {
|
||||
// An empty byte slice returns nothing.
|
||||
if len(str) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Extract the first rune.
|
||||
r, length := utf8.DecodeRuneInString(str)
|
||||
if len(str) <= length { // If we're already past the end, there is nothing else to parse.
|
||||
prop := property(graphemeCodePoints, r)
|
||||
return str, "", LineMustBreak | (1 << shiftWord) | (1 << shiftSentence) | (runeWidth(r, prop) << ShiftWidth), grAny | (wbAny << shiftWordState) | (sbAny << shiftSentenceState) | (lbAny << shiftLineState)
|
||||
}
|
||||
|
||||
// If we don't know the state, determine it now.
|
||||
var graphemeState, wordState, sentenceState, lineState, firstProp int
|
||||
remainder := str[length:]
|
||||
if state < 0 {
|
||||
graphemeState, firstProp, _ = transitionGraphemeState(state, r)
|
||||
wordState, _ = transitionWordBreakState(state, r, nil, remainder)
|
||||
sentenceState, _ = transitionSentenceBreakState(state, r, nil, remainder)
|
||||
lineState, _ = transitionLineBreakState(state, r, nil, remainder)
|
||||
} else {
|
||||
graphemeState = state & maskGraphemeState
|
||||
wordState = (state >> shiftWordState) & maskWordState
|
||||
sentenceState = (state >> shiftSentenceState) & maskSentenceState
|
||||
lineState = (state >> shiftLineState) & maskLineState
|
||||
firstProp = state >> shiftPropState
|
||||
}
|
||||
|
||||
// Transition until we find a grapheme cluster boundary.
|
||||
width := runeWidth(r, firstProp)
|
||||
for {
|
||||
var (
|
||||
graphemeBoundary, wordBoundary, sentenceBoundary bool
|
||||
lineBreak, prop int
|
||||
)
|
||||
|
||||
r, l := utf8.DecodeRuneInString(remainder)
|
||||
remainder = str[length+l:]
|
||||
|
||||
graphemeState, prop, graphemeBoundary = transitionGraphemeState(graphemeState, r)
|
||||
wordState, wordBoundary = transitionWordBreakState(wordState, r, nil, remainder)
|
||||
sentenceState, sentenceBoundary = transitionSentenceBreakState(sentenceState, r, nil, remainder)
|
||||
lineState, lineBreak = transitionLineBreakState(lineState, r, nil, remainder)
|
||||
|
||||
if graphemeBoundary {
|
||||
boundary := lineBreak | (width << ShiftWidth)
|
||||
if wordBoundary {
|
||||
boundary |= 1 << shiftWord
|
||||
}
|
||||
if sentenceBoundary {
|
||||
boundary |= 1 << shiftSentence
|
||||
}
|
||||
return str[:length], str[length:], boundary, graphemeState | (wordState << shiftWordState) | (sentenceState << shiftSentenceState) | (lineState << shiftLineState) | (prop << shiftPropState)
|
||||
}
|
||||
|
||||
if r == vs16 {
|
||||
width = 2
|
||||
} else if firstProp != prExtendedPictographic && firstProp != prRegionalIndicator && firstProp != prL {
|
||||
width += runeWidth(r, prop)
|
||||
} else if firstProp == prExtendedPictographic {
|
||||
if r == vs15 {
|
||||
width = 1
|
||||
} else {
|
||||
width = 2
|
||||
}
|
||||
}
|
||||
|
||||
length += l
|
||||
if len(str) <= length {
|
||||
return str, "", LineMustBreak | (1 << shiftWord) | (1 << shiftSentence) | (width << ShiftWidth), grAny | (wbAny << shiftWordState) | (sbAny << shiftSentenceState) | (lbAny << shiftLineState) | (prop << shiftPropState)
|
||||
}
|
||||
}
|
||||
}
|
54
vendor/github.com/rivo/uniseg/width.go
generated
vendored
Normal file
54
vendor/github.com/rivo/uniseg/width.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package uniseg
|
||||
|
||||
// runeWidth returns the monospace width for the given rune. The provided
|
||||
// grapheme property is a value mapped by the [graphemeCodePoints] table.
|
||||
//
|
||||
// Every rune has a width of 1, except for runes with the following properties
|
||||
// (evaluated in this order):
|
||||
//
|
||||
// - Control, CR, LF, Extend, ZWJ: Width of 0
|
||||
// - \u2e3a, TWO-EM DASH: Width of 3
|
||||
// - \u2e3b, THREE-EM DASH: Width of 4
|
||||
// - East-Asian width Fullwidth and Wide: Width of 2 (Ambiguous and Neutral
|
||||
// have a width of 1)
|
||||
// - Regional Indicator: Width of 2
|
||||
// - Extended Pictographic: Width of 2, unless Emoji Presentation is "No".
|
||||
func runeWidth(r rune, graphemeProperty int) int {
|
||||
switch graphemeProperty {
|
||||
case prControl, prCR, prLF, prExtend, prZWJ:
|
||||
return 0
|
||||
case prRegionalIndicator:
|
||||
return 2
|
||||
case prExtendedPictographic:
|
||||
if property(emojiPresentation, r) == prEmojiPresentation {
|
||||
return 2
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
switch r {
|
||||
case 0x2e3a:
|
||||
return 3
|
||||
case 0x2e3b:
|
||||
return 4
|
||||
}
|
||||
|
||||
switch property(eastAsianWidth, r) {
|
||||
case prW, prF:
|
||||
return 2
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
// StringWidth returns the monospace width for the given string, that is, the
|
||||
// number of same-size cells to be occupied by the string.
|
||||
func StringWidth(s string) (width int) {
|
||||
state := -1
|
||||
for len(s) > 0 {
|
||||
var w int
|
||||
_, s, w, state = FirstGraphemeClusterInString(s, state)
|
||||
width += w
|
||||
}
|
||||
return
|
||||
}
|
87
vendor/github.com/rivo/uniseg/word.go
generated
vendored
Normal file
87
vendor/github.com/rivo/uniseg/word.go
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
package uniseg
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
// FirstWord returns the first word found in the given byte slice according to
|
||||
// the rules of Unicode Standard Annex #29, Word Boundaries. This function can
|
||||
// be called continuously to extract all words from a byte slice, as illustrated
|
||||
// in the example below.
|
||||
//
|
||||
// If you don't know the current state, for example when calling the function
|
||||
// for the first time, you must pass -1. For consecutive calls, pass the state
|
||||
// and rest slice returned by the previous call.
|
||||
//
|
||||
// The "rest" slice is the sub-slice of the original byte slice "b" starting
|
||||
// after the last byte of the identified word. If the length of the "rest" slice
|
||||
// is 0, the entire byte slice "b" has been processed. The "word" byte slice is
|
||||
// the sub-slice of the input slice containing the identified word.
|
||||
//
|
||||
// Given an empty byte slice "b", the function returns nil values.
|
||||
func FirstWord(b []byte, state int) (word, rest []byte, newState int) {
|
||||
// An empty byte slice returns nothing.
|
||||
if len(b) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Extract the first rune.
|
||||
r, length := utf8.DecodeRune(b)
|
||||
if len(b) <= length { // If we're already past the end, there is nothing else to parse.
|
||||
return b, nil, wbAny
|
||||
}
|
||||
|
||||
// If we don't know the state, determine it now.
|
||||
if state < 0 {
|
||||
state, _ = transitionWordBreakState(state, r, b[length:], "")
|
||||
}
|
||||
|
||||
// Transition until we find a boundary.
|
||||
var boundary bool
|
||||
for {
|
||||
r, l := utf8.DecodeRune(b[length:])
|
||||
state, boundary = transitionWordBreakState(state, r, b[length+l:], "")
|
||||
|
||||
if boundary {
|
||||
return b[:length], b[length:], state
|
||||
}
|
||||
|
||||
length += l
|
||||
if len(b) <= length {
|
||||
return b, nil, wbAny
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FirstWordInString is like [FirstWord] but its input and outputs are strings.
|
||||
func FirstWordInString(str string, state int) (word, rest string, newState int) {
|
||||
// An empty byte slice returns nothing.
|
||||
if len(str) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Extract the first rune.
|
||||
r, length := utf8.DecodeRuneInString(str)
|
||||
if len(str) <= length { // If we're already past the end, there is nothing else to parse.
|
||||
return str, "", wbAny
|
||||
}
|
||||
|
||||
// If we don't know the state, determine it now.
|
||||
if state < 0 {
|
||||
state, _ = transitionWordBreakState(state, r, nil, str[length:])
|
||||
}
|
||||
|
||||
// Transition until we find a boundary.
|
||||
var boundary bool
|
||||
for {
|
||||
r, l := utf8.DecodeRuneInString(str[length:])
|
||||
state, boundary = transitionWordBreakState(state, r, nil, str[length+l:])
|
||||
|
||||
if boundary {
|
||||
return str[:length], str[length:], state
|
||||
}
|
||||
|
||||
length += l
|
||||
if len(str) <= length {
|
||||
return str, "", wbAny
|
||||
}
|
||||
}
|
||||
}
|
1848
vendor/github.com/rivo/uniseg/wordproperties.go
generated
vendored
Normal file
1848
vendor/github.com/rivo/uniseg/wordproperties.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
246
vendor/github.com/rivo/uniseg/wordrules.go
generated
vendored
Normal file
246
vendor/github.com/rivo/uniseg/wordrules.go
generated
vendored
Normal file
@ -0,0 +1,246 @@
|
||||
package uniseg
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
// The states of the word break parser.
|
||||
const (
|
||||
wbAny = iota
|
||||
wbCR
|
||||
wbLF
|
||||
wbNewline
|
||||
wbWSegSpace
|
||||
wbHebrewLetter
|
||||
wbALetter
|
||||
wbWB7
|
||||
wbWB7c
|
||||
wbNumeric
|
||||
wbWB11
|
||||
wbKatakana
|
||||
wbExtendNumLet
|
||||
wbOddRI
|
||||
wbEvenRI
|
||||
wbZWJBit = 16 // This bit is set for any states followed by at least one zero-width joiner (see WB4 and WB3c).
|
||||
)
|
||||
|
||||
// The word break parser's breaking instructions.
|
||||
const (
|
||||
wbDontBreak = iota
|
||||
wbBreak
|
||||
)
|
||||
|
||||
// The word break parser's state transitions. It's anologous to grTransitions,
|
||||
// see comments there for details. Unicode version 14.0.0.
|
||||
var wbTransitions = map[[2]int][3]int{
|
||||
// WB3b.
|
||||
{wbAny, prNewline}: {wbNewline, wbBreak, 32},
|
||||
{wbAny, prCR}: {wbCR, wbBreak, 32},
|
||||
{wbAny, prLF}: {wbLF, wbBreak, 32},
|
||||
|
||||
// WB3a.
|
||||
{wbNewline, prAny}: {wbAny, wbBreak, 31},
|
||||
{wbCR, prAny}: {wbAny, wbBreak, 31},
|
||||
{wbLF, prAny}: {wbAny, wbBreak, 31},
|
||||
|
||||
// WB3.
|
||||
{wbCR, prLF}: {wbLF, wbDontBreak, 30},
|
||||
|
||||
// WB3d.
|
||||
{wbAny, prWSegSpace}: {wbWSegSpace, wbBreak, 9990},
|
||||
{wbWSegSpace, prWSegSpace}: {wbWSegSpace, wbDontBreak, 34},
|
||||
|
||||
// WB5.
|
||||
{wbAny, prALetter}: {wbALetter, wbBreak, 9990},
|
||||
{wbAny, prHebrewLetter}: {wbHebrewLetter, wbBreak, 9990},
|
||||
{wbALetter, prALetter}: {wbALetter, wbDontBreak, 50},
|
||||
{wbALetter, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 50},
|
||||
{wbHebrewLetter, prALetter}: {wbALetter, wbDontBreak, 50},
|
||||
{wbHebrewLetter, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 50},
|
||||
|
||||
// WB7. Transitions to wbWB7 handled by transitionWordBreakState().
|
||||
{wbWB7, prALetter}: {wbALetter, wbDontBreak, 70},
|
||||
{wbWB7, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 70},
|
||||
|
||||
// WB7a.
|
||||
{wbHebrewLetter, prSingleQuote}: {wbAny, wbDontBreak, 71},
|
||||
|
||||
// WB7c. Transitions to wbWB7c handled by transitionWordBreakState().
|
||||
{wbWB7c, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 73},
|
||||
|
||||
// WB8.
|
||||
{wbAny, prNumeric}: {wbNumeric, wbBreak, 9990},
|
||||
{wbNumeric, prNumeric}: {wbNumeric, wbDontBreak, 80},
|
||||
|
||||
// WB9.
|
||||
{wbALetter, prNumeric}: {wbNumeric, wbDontBreak, 90},
|
||||
{wbHebrewLetter, prNumeric}: {wbNumeric, wbDontBreak, 90},
|
||||
|
||||
// WB10.
|
||||
{wbNumeric, prALetter}: {wbALetter, wbDontBreak, 100},
|
||||
{wbNumeric, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 100},
|
||||
|
||||
// WB11. Transitions to wbWB11 handled by transitionWordBreakState().
|
||||
{wbWB11, prNumeric}: {wbNumeric, wbDontBreak, 110},
|
||||
|
||||
// WB13.
|
||||
{wbAny, prKatakana}: {wbKatakana, wbBreak, 9990},
|
||||
{wbKatakana, prKatakana}: {wbKatakana, wbDontBreak, 130},
|
||||
|
||||
// WB13a.
|
||||
{wbAny, prExtendNumLet}: {wbExtendNumLet, wbBreak, 9990},
|
||||
{wbALetter, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131},
|
||||
{wbHebrewLetter, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131},
|
||||
{wbNumeric, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131},
|
||||
{wbKatakana, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131},
|
||||
{wbExtendNumLet, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131},
|
||||
|
||||
// WB13b.
|
||||
{wbExtendNumLet, prALetter}: {wbALetter, wbDontBreak, 132},
|
||||
{wbExtendNumLet, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 132},
|
||||
{wbExtendNumLet, prNumeric}: {wbNumeric, wbDontBreak, 132},
|
||||
{wbExtendNumLet, prKatakana}: {prKatakana, wbDontBreak, 132},
|
||||
}
|
||||
|
||||
// transitionWordBreakState determines the new state of the word break parser
|
||||
// given the current state and the next code point. It also returns whether a
|
||||
// word boundary was detected. If more than one code point is needed to
|
||||
// determine the new state, the byte slice or the string starting after rune "r"
|
||||
// can be used (whichever is not nil or empty) for further lookups.
|
||||
func transitionWordBreakState(state int, r rune, b []byte, str string) (newState int, wordBreak bool) {
|
||||
// Determine the property of the next character.
|
||||
nextProperty := property(workBreakCodePoints, r)
|
||||
|
||||
// "Replacing Ignore Rules".
|
||||
if nextProperty == prZWJ {
|
||||
// WB4 (for zero-width joiners).
|
||||
if state == wbNewline || state == wbCR || state == wbLF {
|
||||
return wbAny | wbZWJBit, true // Make sure we don't apply WB4 to WB3a.
|
||||
}
|
||||
if state < 0 {
|
||||
return wbAny | wbZWJBit, false
|
||||
}
|
||||
return state | wbZWJBit, false
|
||||
} else if nextProperty == prExtend || nextProperty == prFormat {
|
||||
// WB4 (for Extend and Format).
|
||||
if state == wbNewline || state == wbCR || state == wbLF {
|
||||
return wbAny, true // Make sure we don't apply WB4 to WB3a.
|
||||
}
|
||||
if state == wbWSegSpace || state == wbAny|wbZWJBit {
|
||||
return wbAny, false // We don't break but this is also not WB3d or WB3c.
|
||||
}
|
||||
if state < 0 {
|
||||
return wbAny, false
|
||||
}
|
||||
return state, false
|
||||
} else if nextProperty == prExtendedPictographic && state >= 0 && state&wbZWJBit != 0 {
|
||||
// WB3c.
|
||||
return wbAny, false
|
||||
}
|
||||
if state >= 0 {
|
||||
state = state &^ wbZWJBit
|
||||
}
|
||||
|
||||
// Find the applicable transition in the table.
|
||||
var rule int
|
||||
transition, ok := wbTransitions[[2]int{state, nextProperty}]
|
||||
if ok {
|
||||
// We have a specific transition. We'll use it.
|
||||
newState, wordBreak, rule = transition[0], transition[1] == wbBreak, transition[2]
|
||||
} else {
|
||||
// No specific transition found. Try the less specific ones.
|
||||
transAnyProp, okAnyProp := wbTransitions[[2]int{state, prAny}]
|
||||
transAnyState, okAnyState := wbTransitions[[2]int{wbAny, nextProperty}]
|
||||
if okAnyProp && okAnyState {
|
||||
// Both apply. We'll use a mix (see comments for grTransitions).
|
||||
newState, wordBreak, rule = transAnyState[0], transAnyState[1] == wbBreak, transAnyState[2]
|
||||
if transAnyProp[2] < transAnyState[2] {
|
||||
wordBreak, rule = transAnyProp[1] == wbBreak, transAnyProp[2]
|
||||
}
|
||||
} else if okAnyProp {
|
||||
// We only have a specific state.
|
||||
newState, wordBreak, rule = transAnyProp[0], transAnyProp[1] == wbBreak, transAnyProp[2]
|
||||
// This branch will probably never be reached because okAnyState will
|
||||
// always be true given the current transition map. But we keep it here
|
||||
// for future modifications to the transition map where this may not be
|
||||
// true anymore.
|
||||
} else if okAnyState {
|
||||
// We only have a specific property.
|
||||
newState, wordBreak, rule = transAnyState[0], transAnyState[1] == wbBreak, transAnyState[2]
|
||||
} else {
|
||||
// No known transition. WB999: Any ÷ Any.
|
||||
newState, wordBreak, rule = wbAny, true, 9990
|
||||
}
|
||||
}
|
||||
|
||||
// For those rules that need to look up runes further in the string, we
|
||||
// determine the property after nextProperty, skipping over Format, Extend,
|
||||
// and ZWJ (according to WB4). It's -1 if not needed, if such a rune cannot
|
||||
// be determined (because the text ends or the rune is faulty).
|
||||
farProperty := -1
|
||||
if rule > 60 &&
|
||||
(state == wbALetter || state == wbHebrewLetter || state == wbNumeric) &&
|
||||
(nextProperty == prMidLetter || nextProperty == prMidNumLet || nextProperty == prSingleQuote || // WB6.
|
||||
nextProperty == prDoubleQuote || // WB7b.
|
||||
nextProperty == prMidNum) { // WB12.
|
||||
for {
|
||||
var (
|
||||
r rune
|
||||
length int
|
||||
)
|
||||
if b != nil { // Byte slice version.
|
||||
r, length = utf8.DecodeRune(b)
|
||||
b = b[length:]
|
||||
} else { // String version.
|
||||
r, length = utf8.DecodeRuneInString(str)
|
||||
str = str[length:]
|
||||
}
|
||||
if r == utf8.RuneError {
|
||||
break
|
||||
}
|
||||
prop := property(workBreakCodePoints, r)
|
||||
if prop == prExtend || prop == prFormat || prop == prZWJ {
|
||||
continue
|
||||
}
|
||||
farProperty = prop
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// WB6.
|
||||
if rule > 60 &&
|
||||
(state == wbALetter || state == wbHebrewLetter) &&
|
||||
(nextProperty == prMidLetter || nextProperty == prMidNumLet || nextProperty == prSingleQuote) &&
|
||||
(farProperty == prALetter || farProperty == prHebrewLetter) {
|
||||
return wbWB7, false
|
||||
}
|
||||
|
||||
// WB7b.
|
||||
if rule > 72 &&
|
||||
state == wbHebrewLetter &&
|
||||
nextProperty == prDoubleQuote &&
|
||||
farProperty == prHebrewLetter {
|
||||
return wbWB7c, false
|
||||
}
|
||||
|
||||
// WB12.
|
||||
if rule > 120 &&
|
||||
state == wbNumeric &&
|
||||
(nextProperty == prMidNum || nextProperty == prMidNumLet || nextProperty == prSingleQuote) &&
|
||||
farProperty == prNumeric {
|
||||
return wbWB11, false
|
||||
}
|
||||
|
||||
// WB15 and WB16.
|
||||
if newState == wbAny && nextProperty == prRegionalIndicator {
|
||||
if state != wbOddRI && state != wbEvenRI { // Includes state == -1.
|
||||
// Transition into the first RI.
|
||||
return wbOddRI, true
|
||||
}
|
||||
if state == wbOddRI {
|
||||
// Don't break pairs of Regional Indicators.
|
||||
return wbEvenRI, false
|
||||
}
|
||||
return wbOddRI, true // We can break after a pair.
|
||||
}
|
||||
|
||||
return
|
||||
}
|
17
vendor/github.com/vishvananda/netlink/addr_linux.go
generated
vendored
17
vendor/github.com/vishvananda/netlink/addr_linux.go
generated
vendored
@ -176,7 +176,7 @@ func AddrList(link Link, family int) ([]Addr, error) {
|
||||
// The list can be filtered by link and ip family.
|
||||
func (h *Handle) AddrList(link Link, family int) ([]Addr, error) {
|
||||
req := h.newNetlinkRequest(unix.RTM_GETADDR, unix.NLM_F_DUMP)
|
||||
msg := nl.NewIfInfomsg(family)
|
||||
msg := nl.NewIfAddrmsg(family)
|
||||
req.AddData(msg)
|
||||
|
||||
msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWADDR)
|
||||
@ -296,13 +296,13 @@ type AddrUpdate struct {
|
||||
// AddrSubscribe takes a chan down which notifications will be sent
|
||||
// when addresses change. Close the 'done' chan to stop subscription.
|
||||
func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error {
|
||||
return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0)
|
||||
return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil)
|
||||
}
|
||||
|
||||
// AddrSubscribeAt works like AddrSubscribe plus it allows the caller
|
||||
// to choose the network namespace in which to subscribe (ns).
|
||||
func AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error {
|
||||
return addrSubscribeAt(ns, netns.None(), ch, done, nil, false, 0)
|
||||
return addrSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil)
|
||||
}
|
||||
|
||||
// AddrSubscribeOptions contains a set of options to use with
|
||||
@ -312,6 +312,7 @@ type AddrSubscribeOptions struct {
|
||||
ErrorCallback func(error)
|
||||
ListExisting bool
|
||||
ReceiveBufferSize int
|
||||
ReceiveTimeout *unix.Timeval
|
||||
}
|
||||
|
||||
// AddrSubscribeWithOptions work like AddrSubscribe but enable to
|
||||
@ -322,14 +323,20 @@ func AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, option
|
||||
none := netns.None()
|
||||
options.Namespace = &none
|
||||
}
|
||||
return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, options.ReceiveBufferSize)
|
||||
return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, options.ReceiveBufferSize, options.ReceiveTimeout)
|
||||
}
|
||||
|
||||
func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool, rcvbuf int) error {
|
||||
func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool, rcvbuf int, rcvTimeout *unix.Timeval) error {
|
||||
s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_IFADDR, unix.RTNLGRP_IPV6_IFADDR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rcvTimeout != nil {
|
||||
if err := s.SetReceiveTimeout(rcvTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if done != nil {
|
||||
go func() {
|
||||
<-done
|
||||
|
33
vendor/github.com/vishvananda/netlink/filter.go
generated
vendored
33
vendor/github.com/vishvananda/netlink/filter.go
generated
vendored
@ -157,6 +157,39 @@ func NewConnmarkAction() *ConnmarkAction {
|
||||
}
|
||||
}
|
||||
|
||||
type CsumUpdateFlags uint32
|
||||
|
||||
const (
|
||||
TCA_CSUM_UPDATE_FLAG_IPV4HDR CsumUpdateFlags = 1
|
||||
TCA_CSUM_UPDATE_FLAG_ICMP CsumUpdateFlags = 2
|
||||
TCA_CSUM_UPDATE_FLAG_IGMP CsumUpdateFlags = 4
|
||||
TCA_CSUM_UPDATE_FLAG_TCP CsumUpdateFlags = 8
|
||||
TCA_CSUM_UPDATE_FLAG_UDP CsumUpdateFlags = 16
|
||||
TCA_CSUM_UPDATE_FLAG_UDPLITE CsumUpdateFlags = 32
|
||||
TCA_CSUM_UPDATE_FLAG_SCTP CsumUpdateFlags = 64
|
||||
)
|
||||
|
||||
type CsumAction struct {
|
||||
ActionAttrs
|
||||
UpdateFlags CsumUpdateFlags
|
||||
}
|
||||
|
||||
func (action *CsumAction) Type() string {
|
||||
return "csum"
|
||||
}
|
||||
|
||||
func (action *CsumAction) Attrs() *ActionAttrs {
|
||||
return &action.ActionAttrs
|
||||
}
|
||||
|
||||
func NewCsumAction() *CsumAction {
|
||||
return &CsumAction{
|
||||
ActionAttrs: ActionAttrs{
|
||||
Action: TC_ACT_PIPE,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type MirredAct uint8
|
||||
|
||||
func (a MirredAct) String() string {
|
||||
|
22
vendor/github.com/vishvananda/netlink/filter_linux.go
generated
vendored
22
vendor/github.com/vishvananda/netlink/filter_linux.go
generated
vendored
@ -607,6 +607,16 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error {
|
||||
}
|
||||
toTcGen(action.Attrs(), &connmark.TcGen)
|
||||
aopts.AddRtAttr(nl.TCA_CONNMARK_PARMS, connmark.Serialize())
|
||||
case *CsumAction:
|
||||
table := attr.AddRtAttr(tabIndex, nil)
|
||||
tabIndex++
|
||||
table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("csum"))
|
||||
aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil)
|
||||
csum := nl.TcCsum{
|
||||
UpdateFlags: uint32(action.UpdateFlags),
|
||||
}
|
||||
toTcGen(action.Attrs(), &csum.TcGen)
|
||||
aopts.AddRtAttr(nl.TCA_CSUM_PARMS, csum.Serialize())
|
||||
case *BpfAction:
|
||||
table := attr.AddRtAttr(tabIndex, nil)
|
||||
tabIndex++
|
||||
@ -675,6 +685,8 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
|
||||
action = &BpfAction{}
|
||||
case "connmark":
|
||||
action = &ConnmarkAction{}
|
||||
case "csum":
|
||||
action = &CsumAction{}
|
||||
case "gact":
|
||||
action = &GenericAction{}
|
||||
case "tunnel_key":
|
||||
@ -755,6 +767,14 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
|
||||
toAttrs(&connmark.TcGen, action.Attrs())
|
||||
action.(*ConnmarkAction).Zone = connmark.Zone
|
||||
}
|
||||
case "csum":
|
||||
switch adatum.Attr.Type {
|
||||
case nl.TCA_CSUM_PARMS:
|
||||
csum := *nl.DeserializeTcCsum(adatum.Value)
|
||||
action.(*CsumAction).ActionAttrs = ActionAttrs{}
|
||||
toAttrs(&csum.TcGen, action.Attrs())
|
||||
action.(*CsumAction).UpdateFlags = CsumUpdateFlags(csum.UpdateFlags)
|
||||
}
|
||||
case "gact":
|
||||
switch adatum.Attr.Type {
|
||||
case nl.TCA_GACT_PARMS:
|
||||
@ -859,7 +879,7 @@ func parseBpfData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error)
|
||||
case nl.TCA_BPF_ID:
|
||||
bpf.Id = int(native.Uint32(datum.Value[0:4]))
|
||||
case nl.TCA_BPF_TAG:
|
||||
bpf.Tag = hex.EncodeToString(datum.Value[:len(datum.Value)-1])
|
||||
bpf.Tag = hex.EncodeToString(datum.Value)
|
||||
}
|
||||
}
|
||||
return detailed, nil
|
||||
|
15
vendor/github.com/vishvananda/netlink/handle_linux.go
generated
vendored
15
vendor/github.com/vishvananda/netlink/handle_linux.go
generated
vendored
@ -107,6 +107,21 @@ func (h *Handle) GetSocketReceiveBufferSize() ([]int, error) {
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// SetStrictCheck sets the strict check socket option for each socket in the netlink handle. Returns early if any set operation fails
|
||||
func (h *Handle) SetStrictCheck(state bool) error {
|
||||
for _, sh := range h.sockets {
|
||||
var stateInt int = 0
|
||||
if state {
|
||||
stateInt = 1
|
||||
}
|
||||
err := unix.SetsockoptInt(sh.Socket.GetFd(), unix.SOL_NETLINK, unix.NETLINK_GET_STRICT_CHK, stateInt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewHandleAt returns a netlink handle on the network namespace
|
||||
// specified by ns. If ns=netns.None(), current network namespace
|
||||
// will be assumed
|
||||
|
1
vendor/github.com/vishvananda/netlink/link.go
generated
vendored
1
vendor/github.com/vishvananda/netlink/link.go
generated
vendored
@ -1038,6 +1038,7 @@ type Iptun struct {
|
||||
EncapType uint16
|
||||
EncapFlags uint16
|
||||
FlowBased bool
|
||||
Proto uint8
|
||||
}
|
||||
|
||||
func (iptun *Iptun) Attrs() *LinkAttrs {
|
||||
|
6
vendor/github.com/vishvananda/netlink/link_linux.go
generated
vendored
6
vendor/github.com/vishvananda/netlink/link_linux.go
generated
vendored
@ -2874,6 +2874,7 @@ func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) {
|
||||
data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(iptun.EncapFlags))
|
||||
data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_SPORT, htons(iptun.EncapSport))
|
||||
data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_DPORT, htons(iptun.EncapDport))
|
||||
data.AddRtAttr(nl.IFLA_IPTUN_PROTO, nl.Uint8Attr(iptun.Proto))
|
||||
}
|
||||
|
||||
func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) {
|
||||
@ -2904,6 +2905,8 @@ func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) {
|
||||
iptun.EncapFlags = native.Uint16(datum.Value[0:2])
|
||||
case nl.IFLA_IPTUN_COLLECT_METADATA:
|
||||
iptun.FlowBased = true
|
||||
case nl.IFLA_IPTUN_PROTO:
|
||||
iptun.Proto = datum.Value[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3226,8 +3229,9 @@ func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) VfInfo {
|
||||
func addXfrmiAttrs(xfrmi *Xfrmi, linkInfo *nl.RtAttr) {
|
||||
data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
|
||||
data.AddRtAttr(nl.IFLA_XFRM_LINK, nl.Uint32Attr(uint32(xfrmi.ParentIndex)))
|
||||
if xfrmi.Ifid != 0 {
|
||||
data.AddRtAttr(nl.IFLA_XFRM_IF_ID, nl.Uint32Attr(xfrmi.Ifid))
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func parseXfrmiData(link Link, data []syscall.NetlinkRouteAttr) {
|
||||
|
64
vendor/github.com/vishvananda/netlink/nl/nl_linux.go
generated
vendored
64
vendor/github.com/vishvananda/netlink/nl/nl_linux.go
generated
vendored
@ -39,6 +39,9 @@ var nextSeqNr uint32
|
||||
// Default netlink socket timeout, 60s
|
||||
var SocketTimeoutTv = unix.Timeval{Sec: 60, Usec: 0}
|
||||
|
||||
// ErrorMessageReporting is the default error message reporting configuration for the new netlink sockets
|
||||
var EnableErrorMessageReporting bool = false
|
||||
|
||||
// GetIPFamily returns the family type of a net.IP.
|
||||
func GetIPFamily(ip net.IP) int {
|
||||
if len(ip) <= net.IPv4len {
|
||||
@ -81,6 +84,14 @@ func Swap32(i uint32) uint32 {
|
||||
return (i&0xff000000)>>24 | (i&0xff0000)>>8 | (i&0xff00)<<8 | (i&0xff)<<24
|
||||
}
|
||||
|
||||
const (
|
||||
NLMSGERR_ATTR_UNUSED = 0
|
||||
NLMSGERR_ATTR_MSG = 1
|
||||
NLMSGERR_ATTR_OFFS = 2
|
||||
NLMSGERR_ATTR_COOKIE = 3
|
||||
NLMSGERR_ATTR_POLICY = 4
|
||||
)
|
||||
|
||||
type NetlinkRequestData interface {
|
||||
Len() int
|
||||
Serialize() []byte
|
||||
@ -303,6 +314,12 @@ func (msg *IfInfomsg) EncapType() string {
|
||||
return fmt.Sprintf("unknown%d", msg.Type)
|
||||
}
|
||||
|
||||
// Round the length of a netlink message up to align it properly.
|
||||
// Taken from syscall/netlink_linux.go by The Go Authors under BSD-style license.
|
||||
func nlmAlignOf(msglen int) int {
|
||||
return (msglen + syscall.NLMSG_ALIGNTO - 1) & ^(syscall.NLMSG_ALIGNTO - 1)
|
||||
}
|
||||
|
||||
func rtaAlignOf(attrlen int) int {
|
||||
return (attrlen + unix.RTA_ALIGNTO - 1) & ^(unix.RTA_ALIGNTO - 1)
|
||||
}
|
||||
@ -487,6 +504,11 @@ func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, erro
|
||||
if err := s.SetReceiveTimeout(&SocketTimeoutTv); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if EnableErrorMessageReporting {
|
||||
if err := s.SetExtAck(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
defer s.Close()
|
||||
} else {
|
||||
@ -526,11 +548,37 @@ done:
|
||||
}
|
||||
if m.Header.Type == unix.NLMSG_DONE || m.Header.Type == unix.NLMSG_ERROR {
|
||||
native := NativeEndian()
|
||||
error := int32(native.Uint32(m.Data[0:4]))
|
||||
if error == 0 {
|
||||
errno := int32(native.Uint32(m.Data[0:4]))
|
||||
if errno == 0 {
|
||||
break done
|
||||
}
|
||||
return nil, syscall.Errno(-error)
|
||||
var err error
|
||||
err = syscall.Errno(-errno)
|
||||
|
||||
unreadData := m.Data[4:]
|
||||
if m.Header.Flags|unix.NLM_F_ACK_TLVS != 0 && len(unreadData) > syscall.SizeofNlMsghdr {
|
||||
// Skip the echoed request message.
|
||||
echoReqH := (*syscall.NlMsghdr)(unsafe.Pointer(&unreadData[0]))
|
||||
unreadData = unreadData[nlmAlignOf(int(echoReqH.Len)):]
|
||||
|
||||
// Annotate `err` using nlmsgerr attributes.
|
||||
for len(unreadData) >= syscall.SizeofRtAttr {
|
||||
attr := (*syscall.RtAttr)(unsafe.Pointer(&unreadData[0]))
|
||||
attrData := unreadData[syscall.SizeofRtAttr:attr.Len]
|
||||
|
||||
switch attr.Type {
|
||||
case NLMSGERR_ATTR_MSG:
|
||||
err = fmt.Errorf("%w: %s", err, string(attrData))
|
||||
|
||||
default:
|
||||
// TODO: handle other NLMSGERR_ATTR types
|
||||
}
|
||||
|
||||
unreadData = unreadData[rtaAlignOf(int(attr.Len)):]
|
||||
}
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
if resType != 0 && m.Header.Type != resType {
|
||||
continue
|
||||
@ -745,6 +793,16 @@ func (s *NetlinkSocket) SetReceiveTimeout(timeout *unix.Timeval) error {
|
||||
return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_RCVTIMEO, timeout)
|
||||
}
|
||||
|
||||
// SetExtAck requests error messages to be reported on the socket
|
||||
func (s *NetlinkSocket) SetExtAck(enable bool) error {
|
||||
var enableN int
|
||||
if enable {
|
||||
enableN = 1
|
||||
}
|
||||
|
||||
return unix.SetsockoptInt(int(s.fd), unix.SOL_NETLINK, unix.NETLINK_EXT_ACK, enableN)
|
||||
}
|
||||
|
||||
func (s *NetlinkSocket) GetPid() (uint32, error) {
|
||||
fd := int(atomic.LoadInt32(&s.fd))
|
||||
lsa, err := unix.Getsockname(fd)
|
||||
|
31
vendor/github.com/vishvananda/netlink/nl/tc_linux.go
generated
vendored
31
vendor/github.com/vishvananda/netlink/nl/tc_linux.go
generated
vendored
@ -90,6 +90,7 @@ const (
|
||||
SizeofTcU32Sel = 0x10 // without keys
|
||||
SizeofTcGen = 0x14
|
||||
SizeofTcConnmark = SizeofTcGen + 0x04
|
||||
SizeofTcCsum = SizeofTcGen + 0x04
|
||||
SizeofTcMirred = SizeofTcGen + 0x08
|
||||
SizeofTcTunnelKey = SizeofTcGen + 0x04
|
||||
SizeofTcSkbEdit = SizeofTcGen
|
||||
@ -694,6 +695,36 @@ func (x *TcConnmark) Serialize() []byte {
|
||||
return (*(*[SizeofTcConnmark]byte)(unsafe.Pointer(x)))[:]
|
||||
}
|
||||
|
||||
const (
|
||||
TCA_CSUM_UNSPEC = iota
|
||||
TCA_CSUM_PARMS
|
||||
TCA_CSUM_TM
|
||||
TCA_CSUM_PAD
|
||||
TCA_CSUM_MAX = TCA_CSUM_PAD
|
||||
)
|
||||
|
||||
// struct tc_csum {
|
||||
// tc_gen;
|
||||
// __u32 update_flags;
|
||||
// }
|
||||
|
||||
type TcCsum struct {
|
||||
TcGen
|
||||
UpdateFlags uint32
|
||||
}
|
||||
|
||||
func (msg *TcCsum) Len() int {
|
||||
return SizeofTcCsum
|
||||
}
|
||||
|
||||
func DeserializeTcCsum(b []byte) *TcCsum {
|
||||
return (*TcCsum)(unsafe.Pointer(&b[0:SizeofTcCsum][0]))
|
||||
}
|
||||
|
||||
func (x *TcCsum) Serialize() []byte {
|
||||
return (*(*[SizeofTcCsum]byte)(unsafe.Pointer(x)))[:]
|
||||
}
|
||||
|
||||
const (
|
||||
TCA_ACT_MIRRED = 8
|
||||
)
|
||||
|
18
vendor/github.com/vishvananda/netlink/route.go
generated
vendored
18
vendor/github.com/vishvananda/netlink/route.go
generated
vendored
@ -11,6 +11,24 @@ type Scope uint8
|
||||
|
||||
type NextHopFlag int
|
||||
|
||||
const (
|
||||
RT_FILTER_PROTOCOL uint64 = 1 << (1 + iota)
|
||||
RT_FILTER_SCOPE
|
||||
RT_FILTER_TYPE
|
||||
RT_FILTER_TOS
|
||||
RT_FILTER_IIF
|
||||
RT_FILTER_OIF
|
||||
RT_FILTER_DST
|
||||
RT_FILTER_SRC
|
||||
RT_FILTER_GW
|
||||
RT_FILTER_TABLE
|
||||
RT_FILTER_HOPLIMIT
|
||||
RT_FILTER_PRIORITY
|
||||
RT_FILTER_MARK
|
||||
RT_FILTER_MASK
|
||||
RT_FILTER_REALM
|
||||
)
|
||||
|
||||
type Destination interface {
|
||||
Family() int
|
||||
Decode([]byte) error
|
||||
|
22
vendor/github.com/vishvananda/netlink/route_linux.go
generated
vendored
22
vendor/github.com/vishvananda/netlink/route_linux.go
generated
vendored
@ -41,23 +41,6 @@ func (s Scope) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
RT_FILTER_PROTOCOL uint64 = 1 << (1 + iota)
|
||||
RT_FILTER_SCOPE
|
||||
RT_FILTER_TYPE
|
||||
RT_FILTER_TOS
|
||||
RT_FILTER_IIF
|
||||
RT_FILTER_OIF
|
||||
RT_FILTER_DST
|
||||
RT_FILTER_SRC
|
||||
RT_FILTER_GW
|
||||
RT_FILTER_TABLE
|
||||
RT_FILTER_HOPLIMIT
|
||||
RT_FILTER_PRIORITY
|
||||
RT_FILTER_MARK
|
||||
RT_FILTER_MASK
|
||||
RT_FILTER_REALM
|
||||
)
|
||||
|
||||
const (
|
||||
FLAG_ONLINK NextHopFlag = unix.RTNH_F_ONLINK
|
||||
@ -1030,8 +1013,9 @@ func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, e
|
||||
// All rules must be defined in RouteFilter struct
|
||||
func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) {
|
||||
req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_DUMP)
|
||||
infmsg := nl.NewIfInfomsg(family)
|
||||
req.AddData(infmsg)
|
||||
rtmsg := nl.NewRtMsg()
|
||||
rtmsg.Family = uint8(family)
|
||||
req.AddData(rtmsg)
|
||||
|
||||
msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE)
|
||||
if err != nil {
|
||||
|
1
vendor/github.com/vishvananda/netlink/rule.go
generated
vendored
1
vendor/github.com/vishvananda/netlink/rule.go
generated
vendored
@ -25,6 +25,7 @@ type Rule struct {
|
||||
Invert bool
|
||||
Dport *RulePortRange
|
||||
Sport *RulePortRange
|
||||
IPProto int
|
||||
}
|
||||
|
||||
func (r Rule) String() string {
|
||||
|
8
vendor/github.com/vishvananda/netlink/rule_linux.go
generated
vendored
8
vendor/github.com/vishvananda/netlink/rule_linux.go
generated
vendored
@ -152,6 +152,12 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error {
|
||||
req.AddData(nl.NewRtAttr(nl.FRA_GOTO, b))
|
||||
}
|
||||
|
||||
if rule.IPProto > 0 {
|
||||
b := make([]byte, 4)
|
||||
native.PutUint32(b, uint32(rule.IPProto))
|
||||
req.AddData(nl.NewRtAttr(nl.FRA_IP_PROTO, b))
|
||||
}
|
||||
|
||||
if rule.Dport != nil {
|
||||
b := rule.Dport.toRtAttrData()
|
||||
req.AddData(nl.NewRtAttr(nl.FRA_DPORT_RANGE, b))
|
||||
@ -250,6 +256,8 @@ func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) (
|
||||
rule.Goto = int(native.Uint32(attrs[j].Value[0:4]))
|
||||
case nl.FRA_PRIORITY:
|
||||
rule.Priority = int(native.Uint32(attrs[j].Value[0:4]))
|
||||
case nl.FRA_IP_PROTO:
|
||||
rule.IPProto = int(native.Uint32(attrs[j].Value[0:4]))
|
||||
case nl.FRA_DPORT_RANGE:
|
||||
rule.Dport = NewRulePortRange(native.Uint16(attrs[j].Value[0:2]), native.Uint16(attrs[j].Value[2:4]))
|
||||
case nl.FRA_SPORT_RANGE:
|
||||
|
4
vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go
generated
vendored
4
vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go
generated
vendored
@ -93,8 +93,10 @@ func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error {
|
||||
req.AddData(out)
|
||||
}
|
||||
|
||||
if policy.Ifid != 0 {
|
||||
ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid)))
|
||||
req.AddData(ifId)
|
||||
}
|
||||
|
||||
_, err := req.Execute(unix.NETLINK_XFRM, 0)
|
||||
return err
|
||||
@ -189,8 +191,10 @@ func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPo
|
||||
req.AddData(out)
|
||||
}
|
||||
|
||||
if policy.Ifid != 0 {
|
||||
ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid)))
|
||||
req.AddData(ifId)
|
||||
}
|
||||
|
||||
resType := nl.XFRM_MSG_NEWPOLICY
|
||||
if nlProto == nl.XFRM_MSG_DELPOLICY {
|
||||
|
4
vendor/github.com/vishvananda/netlink/xfrm_state_linux.go
generated
vendored
4
vendor/github.com/vishvananda/netlink/xfrm_state_linux.go
generated
vendored
@ -167,8 +167,10 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error {
|
||||
}
|
||||
}
|
||||
|
||||
if state.Ifid != 0 {
|
||||
ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid)))
|
||||
req.AddData(ifId)
|
||||
}
|
||||
|
||||
_, err := req.Execute(unix.NETLINK_XFRM, 0)
|
||||
return err
|
||||
@ -281,8 +283,10 @@ func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState
|
||||
req.AddData(out)
|
||||
}
|
||||
|
||||
if state.Ifid != 0 {
|
||||
ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid)))
|
||||
req.AddData(ifId)
|
||||
}
|
||||
|
||||
resType := nl.XFRM_MSG_NEWSA
|
||||
if nlProto == nl.XFRM_MSG_DELSA {
|
||||
|
23
vendor/modules.txt
vendored
23
vendor/modules.txt
vendored
@ -92,7 +92,7 @@ github.com/containernetworking/cni/pkg/types/create
|
||||
github.com/containernetworking/cni/pkg/types/internal
|
||||
github.com/containernetworking/cni/pkg/utils
|
||||
github.com/containernetworking/cni/pkg/version
|
||||
# github.com/containernetworking/plugins v1.1.1
|
||||
# github.com/containernetworking/plugins v1.2.0
|
||||
## explicit; go 1.17
|
||||
github.com/containernetworking/plugins/pkg/ns
|
||||
# github.com/containers/buildah v1.28.1-0.20221221082547-8403b6ebc13d
|
||||
@ -118,7 +118,7 @@ github.com/containers/buildah/pkg/rusage
|
||||
github.com/containers/buildah/pkg/sshagent
|
||||
github.com/containers/buildah/pkg/util
|
||||
github.com/containers/buildah/util
|
||||
# github.com/containers/common v0.50.2-0.20230113010242-57f81310d68a
|
||||
# github.com/containers/common v0.50.2-0.20230117154327-37d31888e634
|
||||
## explicit; go 1.17
|
||||
github.com/containers/common/libimage
|
||||
github.com/containers/common/libimage/define
|
||||
@ -172,7 +172,7 @@ github.com/containers/common/version
|
||||
# github.com/containers/conmon v2.0.20+incompatible
|
||||
## explicit
|
||||
github.com/containers/conmon/runner/config
|
||||
# github.com/containers/image/v5 v5.23.1-0.20230104183125-aaf0985b36f9
|
||||
# github.com/containers/image/v5 v5.23.1-0.20230116122250-3d22f4e96c53
|
||||
## explicit; go 1.17
|
||||
github.com/containers/image/v5/copy
|
||||
github.com/containers/image/v5/directory
|
||||
@ -199,10 +199,12 @@ github.com/containers/image/v5/internal/private
|
||||
github.com/containers/image/v5/internal/putblobdigest
|
||||
github.com/containers/image/v5/internal/rootless
|
||||
github.com/containers/image/v5/internal/signature
|
||||
github.com/containers/image/v5/internal/signer
|
||||
github.com/containers/image/v5/internal/streamdigest
|
||||
github.com/containers/image/v5/internal/tmpdir
|
||||
github.com/containers/image/v5/internal/unparsedimage
|
||||
github.com/containers/image/v5/internal/uploadreader
|
||||
github.com/containers/image/v5/internal/useragent
|
||||
github.com/containers/image/v5/manifest
|
||||
github.com/containers/image/v5/oci/archive
|
||||
github.com/containers/image/v5/oci/internal
|
||||
@ -227,7 +229,10 @@ github.com/containers/image/v5/pkg/tlsclientconfig
|
||||
github.com/containers/image/v5/sif
|
||||
github.com/containers/image/v5/signature
|
||||
github.com/containers/image/v5/signature/internal
|
||||
github.com/containers/image/v5/signature/signer
|
||||
github.com/containers/image/v5/signature/sigstore
|
||||
github.com/containers/image/v5/signature/sigstore/internal
|
||||
github.com/containers/image/v5/signature/simplesigning
|
||||
github.com/containers/image/v5/storage
|
||||
github.com/containers/image/v5/tarball
|
||||
github.com/containers/image/v5/transports
|
||||
@ -237,8 +242,8 @@ github.com/containers/image/v5/version
|
||||
# github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a
|
||||
## explicit
|
||||
github.com/containers/libtrust
|
||||
# github.com/containers/ocicrypt v1.1.6
|
||||
## explicit; go 1.12
|
||||
# github.com/containers/ocicrypt v1.1.7-0.20230115130455-e0cec6f7be0d
|
||||
## explicit; go 1.16
|
||||
github.com/containers/ocicrypt
|
||||
github.com/containers/ocicrypt/blockcipher
|
||||
github.com/containers/ocicrypt/config
|
||||
@ -653,8 +658,8 @@ github.com/pmezard/go-difflib/difflib
|
||||
# github.com/proglottis/gpgme v0.1.3
|
||||
## explicit; go 1.11
|
||||
github.com/proglottis/gpgme
|
||||
# github.com/rivo/uniseg v0.2.0
|
||||
## explicit; go 1.12
|
||||
# github.com/rivo/uniseg v0.4.3
|
||||
## explicit; go 1.18
|
||||
github.com/rivo/uniseg
|
||||
# github.com/rootless-containers/rootlesskit v1.1.0
|
||||
## explicit; go 1.19
|
||||
@ -736,7 +741,7 @@ github.com/vbauerster/mpb/v7
|
||||
github.com/vbauerster/mpb/v7/cwriter
|
||||
github.com/vbauerster/mpb/v7/decor
|
||||
github.com/vbauerster/mpb/v7/internal
|
||||
# github.com/vishvananda/netlink v1.1.1-0.20220115184804-dd687eb2f2d4
|
||||
# github.com/vishvananda/netlink v1.2.1-beta.2
|
||||
## explicit; go 1.12
|
||||
github.com/vishvananda/netlink
|
||||
github.com/vishvananda/netlink/nl
|
||||
@ -863,7 +868,7 @@ golang.org/x/tools/internal/packagesinternal
|
||||
golang.org/x/tools/internal/pkgbits
|
||||
golang.org/x/tools/internal/typeparams
|
||||
golang.org/x/tools/internal/typesinternal
|
||||
# google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6
|
||||
# google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37
|
||||
## explicit; go 1.19
|
||||
google.golang.org/genproto/googleapis/rpc/status
|
||||
# google.golang.org/grpc v1.51.0
|
||||
|
Reference in New Issue
Block a user