diff --git a/go.mod b/go.mod index 818d0cff..6cb96eca 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,8 @@ go 1.21.3 require ( github.com/anchore/go-logger v0.0.0-20230725134548-c21dafa1ec5a - github.com/anchore/stereoscope v0.0.0-20230925132944-bf05af58eb44 - github.com/anchore/syft v0.94.0 + github.com/anchore/stereoscope v0.0.0-20231117203853-3610f4ef3e83 + github.com/anchore/syft v0.97.1 github.com/in-toto/in-toto-golang v0.9.0 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.3 @@ -13,16 +13,13 @@ require ( require ( dario.cat/mergo v1.0.0 // indirect - github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect - github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20221215162035-5330a85ea652 // indirect + github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect + github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect github.com/CycloneDX/cyclonedx-go v0.7.2 // indirect github.com/DataDog/zstd v1.4.5 // indirect - github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect - github.com/Masterminds/semver/v3 v3.2.0 // indirect - github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/Microsoft/hcsshim v0.10.0-rc.7 // indirect + github.com/Microsoft/hcsshim v0.11.1 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect github.com/acobaugh/osrelease v0.1.0 // indirect github.com/acomagu/bufpipe v1.0.4 // indirect @@ -36,21 +33,22 @@ require ( github.com/aquasecurity/go-pep440-version v0.0.0-20210121094942-22b2f8951d46 // indirect github.com/aquasecurity/go-version v0.0.0-20210121072130-637058cfe492 // indirect github.com/becheran/wildmatch-go v1.0.0 // indirect - github.com/bmatcuk/doublestar/v4 v4.6.0 // indirect + github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect github.com/cloudflare/circl v1.3.3 // indirect github.com/containerd/cgroups v1.1.0 // indirect - github.com/containerd/containerd v1.7.0 // indirect - github.com/containerd/continuity v0.3.0 // indirect + github.com/containerd/containerd v1.7.8 // indirect + github.com/containerd/continuity v0.4.2 // indirect github.com/containerd/fifo v1.1.0 // indirect + github.com/containerd/log v0.1.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect - github.com/containerd/ttrpc v1.2.1 // indirect - github.com/containerd/typeurl/v2 v2.1.0 // indirect + github.com/containerd/ttrpc v1.2.2 // indirect + github.com/containerd/typeurl/v2 v2.1.1 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/deitch/magic v0.0.0-20230404182410-1ff89d7342da // indirect github.com/distribution/reference v0.5.0 // indirect github.com/docker/cli v24.0.0+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v24.0.6+incompatible // indirect + github.com/docker/docker v24.0.7+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect @@ -66,7 +64,7 @@ require ( github.com/github/go-spdx/v2 v2.2.0 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.5.0 // indirect - github.com/go-git/go-git/v5 v5.9.0 // indirect + github.com/go-git/go-git/v5 v5.10.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-restruct/restruct v1.2.0-alpha // indirect @@ -78,14 +76,12 @@ require ( github.com/google/go-containerregistry v0.16.1 // indirect github.com/google/licensecheck v0.3.1 // indirect github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 // indirect - github.com/google/uuid v1.3.1 // indirect + github.com/google/uuid v1.4.0 // indirect github.com/gookit/color v1.5.4 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/huandu/xstrings v1.3.3 // indirect github.com/iancoleman/strcase v0.3.0 // indirect - github.com/imdario/mergo v0.3.15 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jinzhu/copier v0.4.0 // indirect @@ -97,21 +93,17 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.18 // indirect - github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/mholt/archiver/v3 v3.5.1 // indirect github.com/microsoft/go-rustaudit v0.0.0-20220730194248-4b17361d90a5 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/locker v1.0.1 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/signal v0.7.0 // indirect github.com/nwaples/rardecode v1.1.0 // indirect - github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc3 // indirect github.com/opencontainers/runc v1.1.5 // indirect @@ -123,7 +115,6 @@ require ( github.com/pierrec/lz4/v4 v4.1.15 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pkg/profile v1.7.0 // indirect - github.com/rivo/uniseg v0.2.0 // indirect github.com/saferwall/pe v1.4.7 // indirect github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d // indirect github.com/sassoftware/go-rpmutils v0.2.0 // indirect @@ -131,12 +122,11 @@ require ( github.com/secure-systems-lab/go-securesystemslib v0.6.0 // indirect github.com/sergi/go-diff v1.3.1 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/shopspring/decimal v1.2.0 // indirect github.com/skeema/knownhosts v1.2.0 // indirect github.com/spdx/tools-golang v0.5.3 // indirect github.com/spf13/afero v1.10.0 // indirect github.com/spf13/cast v1.5.1 // indirect - github.com/spf13/cobra v1.7.0 // indirect + github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.16.0 // indirect @@ -158,18 +148,19 @@ require ( go.opentelemetry.io/otel v1.14.0 // indirect go.opentelemetry.io/otel/trace v1.14.0 // indirect go.uber.org/goleak v1.2.0 // indirect - golang.org/x/crypto v0.14.0 // indirect - golang.org/x/mod v0.13.0 // indirect - golang.org/x/net v0.17.0 // indirect + golang.org/x/crypto v0.15.0 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/net v0.18.0 // indirect golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect + golang.org/x/sys v0.14.0 // indirect + golang.org/x/term v0.14.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.13.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect - google.golang.org/grpc v1.55.0 // indirect - google.golang.org/protobuf v1.30.0 // indirect + google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/grpc v1.58.3 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index c90f0f44..6047f029 100644 --- a/go.sum +++ b/go.sum @@ -51,10 +51,10 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= -github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20221215162035-5330a85ea652 h1:+vTEFqeoeur6XSq06bs+roX3YiT49gUniJK7Zky7Xjg= -github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20221215162035-5330a85ea652/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 h1:59MxjQVfjXsBpLy+dbd2/ELV5ofnUkUZBvWSC85sheA= +github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -66,19 +66,13 @@ github.com/CycloneDX/cyclonedx-go v0.7.2/go.mod h1:K2bA+324+Og0X84fA8HhN2X066K7B github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= -github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8= -github.com/Microsoft/hcsshim v0.10.0-rc.7/go.mod h1:ILuwjA+kNW+MrN/w5un7n3mTqkwsFu4Bp05/okFUZlE= +github.com/Microsoft/hcsshim v0.11.1 h1:hJ3s7GbWlGK4YVV92sO88BQSyF4ZLVy7/awqOlPxFbA= +github.com/Microsoft/hcsshim v0.11.1/go.mod h1:nFJmaO4Zr5Y7eADdFOpYswDDlNVbvcIJJNJLECr5JQg= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= @@ -106,10 +100,10 @@ github.com/anchore/go-testutils v0.0.0-20200925183923-d5f45b0d3c04 h1:VzprUTpc0v github.com/anchore/go-testutils v0.0.0-20200925183923-d5f45b0d3c04/go.mod h1:6dK64g27Qi1qGQZ67gFmBFvEHScy0/C8qhQhNe5B5pQ= github.com/anchore/packageurl-go v0.1.1-0.20230104203445-02e0a6721501 h1:AV7qjwMcM4r8wFhJq3jLRztew3ywIyPTRapl2T1s9o8= github.com/anchore/packageurl-go v0.1.1-0.20230104203445-02e0a6721501/go.mod h1:Blo6OgJNiYF41ufcgHKkbCKF2MDOMlrqhXv/ij6ocR4= -github.com/anchore/stereoscope v0.0.0-20230925132944-bf05af58eb44 h1:dKMvcpgqsRrX1ZWyqG53faVW+BahlaAO1RUEc7/rOjA= -github.com/anchore/stereoscope v0.0.0-20230925132944-bf05af58eb44/go.mod h1:RtbeDCho0pxkPqrB1QNf/Jlxfc9juLmtYZAf2UbpJfk= -github.com/anchore/syft v0.94.0 h1:bQKGqSjW1eaOU5nz/lIfmE7N3ePfSQr2PKSlx9Sts4k= -github.com/anchore/syft v0.94.0/go.mod h1:3P7bisGb54g2qJ7VA4jcmMnxJEnSwypr6hyNsoida7g= +github.com/anchore/stereoscope v0.0.0-20231117203853-3610f4ef3e83 h1:mxGIOmj+asEm8LUkPTG3/v0hi27WIlDVjiEVsUB9eqY= +github.com/anchore/stereoscope v0.0.0-20231117203853-3610f4ef3e83/go.mod h1:GKAnytSVV1hoqB5r5Gd9M5Ph3Rzqq0zPdEJesewjC2w= +github.com/anchore/syft v0.97.1 h1:c6ext6TQiUCjaqYgkG7rYzQOtzEHyxCD9OfbtYZkJnk= +github.com/anchore/syft v0.97.1/go.mod h1:jlxXQCqwYXyEp2UYsEoXWr4VJ59Bdxyfy8LyVl1CdaY= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= @@ -134,8 +128,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bmatcuk/doublestar/v4 v4.6.0 h1:HTuxyug8GyFbRkrffIpzNCSK4luc0TY3wzXvzIZhEXc= -github.com/bmatcuk/doublestar/v4 v4.6.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I= +github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bradleyjkemp/cupaloy/v2 v2.8.0 h1:any4BmKE+jGIaMpnU8YgH/I2LPiLBufr6oMMlVBbn9M= github.com/bradleyjkemp/cupaloy/v2 v2.8.0/go.mod h1:bm7JXdkRd4BHJk9HpwqAI8BoAY1lps46Enkdqw6aRX0= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= @@ -169,23 +163,26 @@ github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AX github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/containerd v1.7.0 h1:G/ZQr3gMZs6ZT0qPUZ15znx5QSdQdASW11nXTLTM2Pg= -github.com/containerd/containerd v1.7.0/go.mod h1:QfR7Efgb/6X2BDpTPJRvPTYDE9rsF0FsXX9J8sIs/sc= -github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= -github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= +github.com/containerd/containerd v1.7.8 h1:RkwgOW3AVUT3H/dyT0W03Dc8AzlpMG65lX48KftOFSM= +github.com/containerd/containerd v1.7.8/go.mod h1:L/Hn9qylJtUFT7cPeM0Sr3fATj+WjHwRQ0lyrYk3OPY= +github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= +github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY= github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= -github.com/containerd/ttrpc v1.2.1 h1:VWv/Rzx023TBLv4WQ+9WPXlBG/s3rsRjY3i9AJ2BJdE= -github.com/containerd/ttrpc v1.2.1/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak= -github.com/containerd/typeurl/v2 v2.1.0 h1:yNAhJvbNEANt7ck48IlEGOxP7YAp6LLpGn5jZACDNIE= -github.com/containerd/typeurl/v2 v2.1.0/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= +github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtOs= +github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak= +github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4= +github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= @@ -201,8 +198,8 @@ github.com/docker/cli v24.0.0+incompatible h1:0+1VshNwBQzQAx9lOl+OYCTCEAD8fKs/qe github.com/docker/cli v24.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE= -github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= +github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -263,10 +260,10 @@ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66D github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= -github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f h1:Pz0DHeFij3XFhoBRGUDPzSJ+w2UcK5/0JvF8DRI58r8= -github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo= -github.com/go-git/go-git/v5 v5.9.0 h1:cD9SFA7sHVRdJ7AYck1ZaAa/yeuBvGPxwXDL8cxrObY= -github.com/go-git/go-git/v5 v5.9.0/go.mod h1:RKIqga24sWdMGZF+1Ekv9kylsDz6LzdTSI2s/OsZWE0= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/go-git/go-git/v5 v5.10.0 h1:F0x3xXrAWmhwtzoCokU4IMPcBdncG+HAAqi9FcOOjbQ= +github.com/go-git/go-git/v5 v5.10.0/go.mod h1:1FOZ/pQnqw24ghP2n7cunVl0ON55BsjPYvhWHvZGhoo= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -373,10 +370,9 @@ github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8I github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -421,17 +417,12 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= -github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -500,9 +491,6 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= @@ -514,9 +502,6 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -527,9 +512,6 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= @@ -552,8 +534,6 @@ github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2 github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nwaples/rardecode v1.1.0 h1:vSxaY8vQhOcVr4mm5e8XllHWTiM4JF507A0Katqw7MQ= github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -611,8 +591,6 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= @@ -643,8 +621,6 @@ github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -663,13 +639,12 @@ github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY52 github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -785,11 +760,10 @@ golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= +golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -828,8 +802,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -878,8 +852,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= +golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -997,16 +971,16 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8= +golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1019,8 +993,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1196,8 +1170,10 @@ google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1225,8 +1201,8 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= -google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1241,8 +1217,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1261,7 +1237,6 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -1276,14 +1251,14 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -modernc.org/libc v1.24.1 h1:uvJSeCKL/AgzBo2yYIPPTy82v21KgGnizcGYfBHaNuM= -modernc.org/libc v1.24.1/go.mod h1:FmfO1RLrU3MHJfyi9eYYmZBfi/R+tqZ6+hQ3yQQUkak= -modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= -modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.6.0 h1:i6mzavxrE9a30whzMfwf7XWVODx2r5OYXvU46cirX7o= -modernc.org/memory v1.6.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/sqlite v1.26.0 h1:SocQdLRSYlA8W99V8YH0NES75thx19d9sB/aFc4R8Lw= -modernc.org/sqlite v1.26.0/go.mod h1:FL3pVXie73rg3Rii6V/u5BoHlSoyeZeIgKZEgHARyCU= +modernc.org/libc v1.29.0 h1:tTFRFq69YKCF2QyGNuRUQxKBm1uZZLubf6Cjh/pVHXs= +modernc.org/libc v1.29.0/go.mod h1:DaG/4Q3LRRdqpiLyP0C2m1B8ZMGkQ+cCgOIjEtQlYhQ= +modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E= +modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E= +modernc.org/sqlite v1.27.0 h1:MpKAHoyYB7xqcwnUwkuD+npwEa0fojF0B5QRbN+auJ8= +modernc.org/sqlite v1.27.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/internal/scanner.go b/internal/scanner.go index e69f1aa7..ac06cdd9 100644 --- a/internal/scanner.go +++ b/internal/scanner.go @@ -19,8 +19,8 @@ import ( "os" "path/filepath" - "github.com/anchore/syft/syft" - "github.com/anchore/syft/syft/formats/spdxjson" + "github.com/anchore/syft/syft/format" + "github.com/anchore/syft/syft/format/spdxjson" intoto "github.com/in-toto/in-toto-golang/in_toto" "github.com/pkg/errors" ) @@ -38,7 +38,12 @@ func (s Scanner) Scan() error { return err } - output, err := syft.Encode(result, spdxjson.Format()) + enc, err := spdxjson.NewFormatEncoderWithConfig(spdxjson.DefaultEncoderConfig()) + if err != nil { + return err + } + + output, err := format.Encode(result, enc) if err != nil { return err } diff --git a/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go b/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go index fdde08a7..adfeedf5 100644 --- a/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go +++ b/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go @@ -25,11 +25,10 @@ import ( "os" "path/filepath" "reflect" + "strconv" "strings" "time" "unsafe" - - securejoin "github.com/cyphar/filepath-securejoin" ) var ( @@ -389,11 +388,11 @@ func (f *ConsumeFuzzer) GetUint16() (uint16, error) { } func (f *ConsumeFuzzer) GetUint32() (uint32, error) { - i, err := f.GetInt() + u32, err := f.GetNBytes(4) if err != nil { - return uint32(0), err + return 0, err } - return uint32(i), nil + return binary.BigEndian.Uint32(u32), nil } func (f *ConsumeFuzzer) GetUint64() (uint64, error) { @@ -412,26 +411,27 @@ func (f *ConsumeFuzzer) GetUint64() (uint64, error) { } func (f *ConsumeFuzzer) GetBytes() ([]byte, error) { - if f.position >= f.dataTotal { - return nil, errors.New("not enough bytes to create byte array") - } - length, err := f.GetUint32() + var length uint32 + var err error + length, err = f.GetUint32() if err != nil { return nil, errors.New("not enough bytes to create byte array") } - if f.position+length > MaxTotalLen { - return nil, errors.New("created too large a string") - } - byteBegin := f.position - 1 - if byteBegin >= f.dataTotal { - return nil, errors.New("not enough bytes to create byte array") - } + if length == 0 { - return nil, errors.New("zero-length is not supported") + length = 30 } - if byteBegin+length >= f.dataTotal { + bytesLeft := f.dataTotal - f.position + if bytesLeft <= 0 { return nil, errors.New("not enough bytes to create byte array") } + + // If the length is the same as bytes left, we will not overflow + // the remaining bytes. + if length != bytesLeft { + length = length % bytesLeft + } + byteBegin := f.position if byteBegin+length < byteBegin { return nil, errors.New("numbers overflow") } @@ -482,6 +482,7 @@ func (f *ConsumeFuzzer) FuzzMap(m interface{}) error { } func returnTarBytes(buf []byte) ([]byte, error) { + return buf, nil // Count files var fileCounter int tr := tar.NewReader(bytes.NewReader(buf)) @@ -504,7 +505,8 @@ func returnTarBytes(buf []byte) ([]byte, error) { func setTarHeaderFormat(hdr *tar.Header, f *ConsumeFuzzer) error { ind, err := f.GetInt() if err != nil { - return err + hdr.Format = tar.FormatGNU + //return nil } switch ind % 4 { case 0: @@ -565,71 +567,17 @@ func setTarHeaderTypeflag(hdr *tar.Header, f *ConsumeFuzzer) error { return nil } -func tooSmallFileBody(length uint32) bool { - if length < 2 { - return true - } - if length < 4 { - return true - } - if length < 10 { - return true - } - if length < 100 { - return true - } - if length < 500 { - return true - } - if length < 1000 { - return true - } - if length < 2000 { - return true - } - if length < 4000 { - return true - } - if length < 8000 { - return true - } - if length < 16000 { - return true - } - if length < 32000 { - return true - } - if length < 64000 { - return true - } - if length < 128000 { - return true - } - if length < 264000 { - return true - } - return false -} - func (f *ConsumeFuzzer) createTarFileBody() ([]byte, error) { - length, err := f.GetUint32() + return f.GetBytes() + /*length, err := f.GetUint32() if err != nil { return nil, errors.New("not enough bytes to create byte array") } - shouldUseLargeFileBody, err := f.GetBool() - if err != nil { - return nil, errors.New("not enough bytes to check long file body") - } - - if shouldUseLargeFileBody && tooSmallFileBody(length) { - return nil, errors.New("File body was too small") - } - // A bit of optimization to attempt to create a file body // when we don't have as many bytes left as "length" remainingBytes := f.dataTotal - f.position - if remainingBytes == 0 { + if remainingBytes <= 0 { return nil, errors.New("created too large a string") } if f.position+length > MaxTotalLen { @@ -649,14 +597,15 @@ func (f *ConsumeFuzzer) createTarFileBody() ([]byte, error) { return nil, errors.New("numbers overflow") } f.position = byteBegin + length - return f.data[byteBegin:f.position], nil + return f.data[byteBegin:f.position], nil*/ } // getTarFileName is similar to GetString(), but creates string based // on the length of f.data to reduce the likelihood of overflowing // f.data. func (f *ConsumeFuzzer) getTarFilename() (string, error) { - length, err := f.GetUint32() + return f.GetString() + /*length, err := f.GetUint32() if err != nil { return "nil", errors.New("not enough bytes to create string") } @@ -664,14 +613,9 @@ func (f *ConsumeFuzzer) getTarFilename() (string, error) { // A bit of optimization to attempt to create a file name // when we don't have as many bytes left as "length" remainingBytes := f.dataTotal - f.position - if remainingBytes == 0 { + if remainingBytes <= 0 { return "nil", errors.New("created too large a string") } - if remainingBytes < 50 { - length = length % remainingBytes - } else if f.dataTotal < 500 { - length = length % f.dataTotal - } if f.position > MaxTotalLen { return "nil", errors.New("created too large a string") } @@ -686,7 +630,12 @@ func (f *ConsumeFuzzer) getTarFilename() (string, error) { return "nil", errors.New("numbers overflow") } f.position = byteBegin + length - return string(f.data[byteBegin:f.position]), nil + return string(f.data[byteBegin:f.position]), nil*/ +} + +type TarFile struct { + Hdr *tar.Header + Body []byte } // TarBytes returns valid bytes for a tar archive @@ -695,28 +644,101 @@ func (f *ConsumeFuzzer) TarBytes() ([]byte, error) { if err != nil { return nil, err } + var tarFiles []*TarFile + tarFiles = make([]*TarFile, 0) + + const maxNoOfFiles = 100 + for i := 0; i < numberOfFiles%maxNoOfFiles; i++ { + var filename string + var filebody []byte + var sec, nsec int + var err error + + filename, err = f.getTarFilename() + if err != nil { + var sb strings.Builder + sb.WriteString("file-") + sb.WriteString(strconv.Itoa(i)) + filename = sb.String() + } + filebody, err = f.createTarFileBody() + if err != nil { + var sb strings.Builder + sb.WriteString("filebody-") + sb.WriteString(strconv.Itoa(i)) + filebody = []byte(sb.String()) + } + + sec, err = f.GetInt() + if err != nil { + sec = 1672531200 // beginning of 2023 + } + nsec, err = f.GetInt() + if err != nil { + nsec = 1703980800 // end of 2023 + } + + hdr := &tar.Header{ + Name: filename, + Size: int64(len(filebody)), + Mode: 0o600, + ModTime: time.Unix(int64(sec), int64(nsec)), + } + if err := setTarHeaderTypeflag(hdr, f); err != nil { + return []byte(""), err + } + if err := setTarHeaderFormat(hdr, f); err != nil { + return []byte(""), err + } + tf := &TarFile{ + Hdr: hdr, + Body: filebody, + } + tarFiles = append(tarFiles, tf) + } var buf bytes.Buffer tw := tar.NewWriter(&buf) defer tw.Close() - const maxNoOfFiles = 1000 + for _, tf := range tarFiles { + tw.WriteHeader(tf.Hdr) + tw.Write(tf.Body) + } + return buf.Bytes(), nil +} + +// This is similar to TarBytes, but it returns a series of +// files instead of raw tar bytes. The advantage of this +// api is that it is cheaper in terms of cpu power to +// modify or check the files in the fuzzer with TarFiles() +// because it avoids creating a tar reader. +func (f *ConsumeFuzzer) TarFiles() ([]*TarFile, error) { + numberOfFiles, err := f.GetInt() + if err != nil { + return nil, err + } + var tarFiles []*TarFile + tarFiles = make([]*TarFile, 0) + + const maxNoOfFiles = 100 for i := 0; i < numberOfFiles%maxNoOfFiles; i++ { filename, err := f.getTarFilename() if err != nil { - return returnTarBytes(buf.Bytes()) + return tarFiles, err } filebody, err := f.createTarFileBody() if err != nil { - return returnTarBytes(buf.Bytes()) + return tarFiles, err } + sec, err := f.GetInt() if err != nil { - return returnTarBytes(buf.Bytes()) + return tarFiles, err } nsec, err := f.GetInt() if err != nil { - return returnTarBytes(buf.Bytes()) + return tarFiles, err } hdr := &tar.Header{ @@ -726,19 +748,18 @@ func (f *ConsumeFuzzer) TarBytes() ([]byte, error) { ModTime: time.Unix(int64(sec), int64(nsec)), } if err := setTarHeaderTypeflag(hdr, f); err != nil { - return returnTarBytes(buf.Bytes()) + hdr.Typeflag = tar.TypeReg } if err := setTarHeaderFormat(hdr, f); err != nil { - return returnTarBytes(buf.Bytes()) - } - if err := tw.WriteHeader(hdr); err != nil { - return returnTarBytes(buf.Bytes()) + return tarFiles, err // should not happend } - if _, err := tw.Write(filebody); err != nil { - return returnTarBytes(buf.Bytes()) + tf := &TarFile{ + Hdr: hdr, + Body: filebody, } + tarFiles = append(tarFiles, tf) } - return buf.Bytes(), nil + return tarFiles, nil } // CreateFiles creates pseudo-random files in rootDir. @@ -767,10 +788,10 @@ func (f *ConsumeFuzzer) CreateFiles(rootDir string) error { return errors.New("could not get fileName") } } - fullFilePath, err := securejoin.SecureJoin(rootDir, fileName) - if err != nil { - return err + if strings.Contains(fileName, "..") || (len(fileName) > 0 && fileName[0] == 47) || strings.Contains(fileName, "\\") { + continue } + fullFilePath := filepath.Join(rootDir, fileName) // Find the subdirectory of the file if subDir := filepath.Dir(fileName); subDir != "" && subDir != "." { @@ -778,20 +799,14 @@ func (f *ConsumeFuzzer) CreateFiles(rootDir string) error { if strings.Contains(subDir, "../") || (len(subDir) > 0 && subDir[0] == 47) || strings.Contains(subDir, "\\") { continue } - dirPath, err := securejoin.SecureJoin(rootDir, subDir) - if err != nil { - continue - } + dirPath := filepath.Join(rootDir, subDir) if _, err := os.Stat(dirPath); os.IsNotExist(err) { err2 := os.MkdirAll(dirPath, 0o777) if err2 != nil { continue } } - fullFilePath, err = securejoin.SecureJoin(dirPath, fileName) - if err != nil { - continue - } + fullFilePath = filepath.Join(dirPath, fileName) } else { // Create symlink createSymlink, err := f.GetBool() diff --git a/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/unsupported_funcs.go b/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/unsupported_funcs.go index 0e37c1fe..310c2289 100644 --- a/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/unsupported_funcs.go +++ b/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/unsupported_funcs.go @@ -29,7 +29,7 @@ func RunTests(matchString func(pat, str string) (bool, error), tests []testing.I } func Short() bool { - panic(unsupportedApi("testing.Short")) + return false } func Verbose() bool { @@ -39,4 +39,4 @@ func Verbose() bool { type M struct {} func (m *M) Run() (code int) { panic("testing.M is not support in libFuzzer Mode") -} \ No newline at end of file +} diff --git a/vendor/github.com/Masterminds/goutils/.travis.yml b/vendor/github.com/Masterminds/goutils/.travis.yml deleted file mode 100644 index 4025e01e..00000000 --- a/vendor/github.com/Masterminds/goutils/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go: - - 1.6 - - 1.7 - - 1.8 - - tip - -script: - - go test -v - -notifications: - webhooks: - urls: - - https://webhooks.gitter.im/e/06e3328629952dabe3e0 - on_success: change # options: [always|never|change] default: always - on_failure: always # options: [always|never|change] default: always - on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/goutils/CHANGELOG.md b/vendor/github.com/Masterminds/goutils/CHANGELOG.md deleted file mode 100644 index d700ec47..00000000 --- a/vendor/github.com/Masterminds/goutils/CHANGELOG.md +++ /dev/null @@ -1,8 +0,0 @@ -# 1.0.1 (2017-05-31) - -## Fixed -- #21: Fix generation of alphanumeric strings (thanks @dbarranco) - -# 1.0.0 (2014-04-30) - -- Initial release. diff --git a/vendor/github.com/Masterminds/goutils/README.md b/vendor/github.com/Masterminds/goutils/README.md deleted file mode 100644 index 163ffe72..00000000 --- a/vendor/github.com/Masterminds/goutils/README.md +++ /dev/null @@ -1,70 +0,0 @@ -GoUtils -=========== -[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) -[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils) - - -GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some -string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes: -* WordUtils -* RandomStringUtils -* StringUtils (partial implementation) - -## Installation -If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this: - - go get github.com/Masterminds/goutils - -If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils. - - -## Documentation -GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) - - -## Usage -The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file). - - package main - - import ( - "fmt" - "github.com/Masterminds/goutils" - ) - - func main() { - - // EXAMPLE 1: A goutils function which returns no errors - fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" - - } -Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file). - - package main - - import ( - "fmt" - "github.com/Masterminds/goutils" - ) - - func main() { - - // EXAMPLE 2: A goutils function which returns an error - rand1, err1 := goutils.Random (-1, 0, 0, true, true) - - if err1 != nil { - fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) - } else { - fmt.Println(rand1) - } - - } - -## License -GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license. - -## Issue Reporting -Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues - -## Website -* [GoUtils webpage](http://Masterminds.github.io/goutils/) diff --git a/vendor/github.com/Masterminds/goutils/appveyor.yml b/vendor/github.com/Masterminds/goutils/appveyor.yml deleted file mode 100644 index 657564a8..00000000 --- a/vendor/github.com/Masterminds/goutils/appveyor.yml +++ /dev/null @@ -1,21 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\Masterminds\goutils -shallow_clone: true - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -build: off - -install: - - go version - - go env - -test_script: - - go test -v - -deploy: off diff --git a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go deleted file mode 100644 index 8dbd9248..00000000 --- a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go +++ /dev/null @@ -1,230 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package goutils - -import ( - "crypto/rand" - "fmt" - "math" - "math/big" - "unicode" -) - -/* -CryptoRandomNonAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomNonAlphaNumeric(count int) (string, error) { - return CryptoRandomAlphaNumericCustom(count, false, false) -} - -/* -CryptoRandomAscii creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAscii(count int) (string, error) { - return CryptoRandom(count, 32, 127, false, false) -} - -/* -CryptoRandomNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomNumeric(count int) (string, error) { - return CryptoRandom(count, 0, 0, false, true) -} - -/* -CryptoRandomAlphabetic creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. - -Parameters: - count - the length of random string to create - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAlphabetic(count int) (string, error) { - return CryptoRandom(count, 0, 0, true, false) -} - -/* -CryptoRandomAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAlphaNumeric(count int) (string, error) { - return CryptoRandom(count, 0, 0, true, true) -} - -/* -CryptoRandomAlphaNumericCustom creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. - -Parameters: - count - the length of random string to create - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { - return CryptoRandom(count, 0, 0, letters, numbers) -} - -/* -CryptoRandom creates a random string based on a variety of options, using using golang's crypto/rand source of randomness. -If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, -unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. -If chars is not nil, characters stored in chars that are between start and end are chosen. - -Parameters: - count - the length of random string to create - start - the position in set of chars (ASCII/Unicode int) to start at - end - the position in set of chars (ASCII/Unicode int) to end before - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. - -Returns: - string - the random string - error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) -*/ -func CryptoRandom(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { - if count == 0 { - return "", nil - } else if count < 0 { - err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") - return "", err - } - if chars != nil && len(chars) == 0 { - err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") - return "", err - } - - if start == 0 && end == 0 { - if chars != nil { - end = len(chars) - } else { - if !letters && !numbers { - end = math.MaxInt32 - } else { - end = 'z' + 1 - start = ' ' - } - } - } else { - if end <= start { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) - return "", err - } - - if chars != nil && end > len(chars) { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) - return "", err - } - } - - buffer := make([]rune, count) - gap := end - start - - // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 - // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 - - for count != 0 { - count-- - var ch rune - if chars == nil { - ch = rune(getCryptoRandomInt(gap) + int64(start)) - } else { - ch = chars[getCryptoRandomInt(gap)+int64(start)] - } - - if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { - if ch >= 56320 && ch <= 57343 { // low surrogate range - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = ch - count-- - // Insert high surrogate - buffer[count] = rune(55296 + getCryptoRandomInt(128)) - } - } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = rune(56320 + getCryptoRandomInt(128)) - count-- - // Insert high surrogate - buffer[count] = ch - } - } else if ch >= 56192 && ch <= 56319 { - // private high surrogate, skip it - count++ - } else { - // not one of the surrogates* - buffer[count] = ch - } - } else { - count++ - } - } - return string(buffer), nil -} - -func getCryptoRandomInt(count int) int64 { - nBig, err := rand.Int(rand.Reader, big.NewInt(int64(count))) - if err != nil { - panic(err) - } - return nBig.Int64() -} diff --git a/vendor/github.com/Masterminds/goutils/randomstringutils.go b/vendor/github.com/Masterminds/goutils/randomstringutils.go deleted file mode 100644 index 27267023..00000000 --- a/vendor/github.com/Masterminds/goutils/randomstringutils.go +++ /dev/null @@ -1,248 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package goutils - -import ( - "fmt" - "math" - "math/rand" - "time" - "unicode" -) - -// RANDOM provides the time-based seed used to generate random numbers -var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano())) - -/* -RandomNonAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomNonAlphaNumeric(count int) (string, error) { - return RandomAlphaNumericCustom(count, false, false) -} - -/* -RandomAscii creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAscii(count int) (string, error) { - return Random(count, 32, 127, false, false) -} - -/* -RandomNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomNumeric(count int) (string, error) { - return Random(count, 0, 0, false, true) -} - -/* -RandomAlphabetic creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alphabetic characters. - -Parameters: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAlphabetic(count int) (string, error) { - return Random(count, 0, 0, true, false) -} - -/* -RandomAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAlphaNumeric(count int) (string, error) { - return Random(count, 0, 0, true, true) -} - -/* -RandomAlphaNumericCustom creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. - -Parameters: - count - the length of random string to create - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { - return Random(count, 0, 0, letters, numbers) -} - -/* -Random creates a random string based on a variety of options, using default source of randomness. -This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but -instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance. - -Parameters: - count - the length of random string to create - start - the position in set of chars (ASCII/Unicode int) to start at - end - the position in set of chars (ASCII/Unicode int) to end before - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { - return RandomSeed(count, start, end, letters, numbers, chars, RANDOM) -} - -/* -RandomSeed creates a random string based on a variety of options, using supplied source of randomness. -If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, -unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. -If chars is not nil, characters stored in chars that are between start and end are chosen. -This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance -with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably. - -Parameters: - count - the length of random string to create - start - the position in set of chars (ASCII/Unicode decimals) to start at - end - the position in set of chars (ASCII/Unicode decimals) to end before - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. - random - a source of randomness. - -Returns: - string - the random string - error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) -*/ -func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) { - - if count == 0 { - return "", nil - } else if count < 0 { - err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") - return "", err - } - if chars != nil && len(chars) == 0 { - err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") - return "", err - } - - if start == 0 && end == 0 { - if chars != nil { - end = len(chars) - } else { - if !letters && !numbers { - end = math.MaxInt32 - } else { - end = 'z' + 1 - start = ' ' - } - } - } else { - if end <= start { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) - return "", err - } - - if chars != nil && end > len(chars) { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) - return "", err - } - } - - buffer := make([]rune, count) - gap := end - start - - // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 - // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 - - for count != 0 { - count-- - var ch rune - if chars == nil { - ch = rune(random.Intn(gap) + start) - } else { - ch = chars[random.Intn(gap)+start] - } - - if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { - if ch >= 56320 && ch <= 57343 { // low surrogate range - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = ch - count-- - // Insert high surrogate - buffer[count] = rune(55296 + random.Intn(128)) - } - } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = rune(56320 + random.Intn(128)) - count-- - // Insert high surrogate - buffer[count] = ch - } - } else if ch >= 56192 && ch <= 56319 { - // private high surrogate, skip it - count++ - } else { - // not one of the surrogates* - buffer[count] = ch - } - } else { - count++ - } - } - return string(buffer), nil -} diff --git a/vendor/github.com/Masterminds/goutils/stringutils.go b/vendor/github.com/Masterminds/goutils/stringutils.go deleted file mode 100644 index 741bb530..00000000 --- a/vendor/github.com/Masterminds/goutils/stringutils.go +++ /dev/null @@ -1,240 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package goutils - -import ( - "bytes" - "fmt" - "strings" - "unicode" -) - -// Typically returned by functions where a searched item cannot be found -const INDEX_NOT_FOUND = -1 - -/* -Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..." - -Specifically, the algorithm is as follows: - - - If str is less than maxWidth characters long, return it. - - Else abbreviate it to (str[0:maxWidth - 3] + "..."). - - If maxWidth is less than 4, return an illegal argument error. - - In no case will it return a string of length greater than maxWidth. - -Parameters: - str - the string to check - maxWidth - maximum length of result string, must be at least 4 - -Returns: - string - abbreviated string - error - if the width is too small -*/ -func Abbreviate(str string, maxWidth int) (string, error) { - return AbbreviateFull(str, 0, maxWidth) -} - -/* -AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..." -This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not -necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear -somewhere in the result. -In no case will it return a string of length greater than maxWidth. - -Parameters: - str - the string to check - offset - left edge of source string - maxWidth - maximum length of result string, must be at least 4 - -Returns: - string - abbreviated string - error - if the width is too small -*/ -func AbbreviateFull(str string, offset int, maxWidth int) (string, error) { - if str == "" { - return "", nil - } - if maxWidth < 4 { - err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4") - return "", err - } - if len(str) <= maxWidth { - return str, nil - } - if offset > len(str) { - offset = len(str) - } - if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7 - offset = len(str) - (maxWidth - 3) - } - abrevMarker := "..." - if offset <= 4 { - return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker; - } - if maxWidth < 7 { - err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7") - return "", err - } - if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15 - abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3)) - return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3); - } - return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3)); -} - -/* -DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune). -It returns the string without whitespaces. - -Parameter: - str - the string to delete whitespace from, may be nil - -Returns: - the string without whitespaces -*/ -func DeleteWhiteSpace(str string) string { - if str == "" { - return str - } - sz := len(str) - var chs bytes.Buffer - count := 0 - for i := 0; i < sz; i++ { - ch := rune(str[i]) - if !unicode.IsSpace(ch) { - chs.WriteRune(ch) - count++ - } - } - if count == sz { - return str - } - return chs.String() -} - -/* -IndexOfDifference compares two strings, and returns the index at which the strings begin to differ. - -Parameters: - str1 - the first string - str2 - the second string - -Returns: - the index where str1 and str2 begin to differ; -1 if they are equal -*/ -func IndexOfDifference(str1 string, str2 string) int { - if str1 == str2 { - return INDEX_NOT_FOUND - } - if IsEmpty(str1) || IsEmpty(str2) { - return 0 - } - var i int - for i = 0; i < len(str1) && i < len(str2); i++ { - if rune(str1[i]) != rune(str2[i]) { - break - } - } - if i < len(str2) || i < len(str1) { - return i - } - return INDEX_NOT_FOUND -} - -/* -IsBlank checks if a string is whitespace or empty (""). Observe the following behavior: - - goutils.IsBlank("") = true - goutils.IsBlank(" ") = true - goutils.IsBlank("bob") = false - goutils.IsBlank(" bob ") = false - -Parameter: - str - the string to check - -Returns: - true - if the string is whitespace or empty ("") -*/ -func IsBlank(str string) bool { - strLen := len(str) - if str == "" || strLen == 0 { - return true - } - for i := 0; i < strLen; i++ { - if unicode.IsSpace(rune(str[i])) == false { - return false - } - } - return true -} - -/* -IndexOf returns the index of the first instance of sub in str, with the search beginning from the -index start point specified. -1 is returned if sub is not present in str. - -An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero. -A start position greater than the string length returns -1. - -Parameters: - str - the string to check - sub - the substring to find - start - the start position; negative treated as zero - -Returns: - the first index where the sub string was found (always >= start) -*/ -func IndexOf(str string, sub string, start int) int { - - if start < 0 { - start = 0 - } - - if len(str) < start { - return INDEX_NOT_FOUND - } - - if IsEmpty(str) || IsEmpty(sub) { - return INDEX_NOT_FOUND - } - - partialIndex := strings.Index(str[start:len(str)], sub) - if partialIndex == -1 { - return INDEX_NOT_FOUND - } - return partialIndex + start -} - -// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise. -func IsEmpty(str string) bool { - return len(str) == 0 -} - -// Returns either the passed in string, or if the string is empty, the value of defaultStr. -func DefaultString(str string, defaultStr string) string { - if IsEmpty(str) { - return defaultStr - } - return str -} - -// Returns either the passed in string, or if the string is whitespace, empty (""), the value of defaultStr. -func DefaultIfBlank(str string, defaultStr string) string { - if IsBlank(str) { - return defaultStr - } - return str -} diff --git a/vendor/github.com/Masterminds/goutils/wordutils.go b/vendor/github.com/Masterminds/goutils/wordutils.go deleted file mode 100644 index 034cad8e..00000000 --- a/vendor/github.com/Masterminds/goutils/wordutils.go +++ /dev/null @@ -1,357 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package goutils provides utility functions to manipulate strings in various ways. -The code snippets below show examples of how to use goutils. Some functions return -errors while others do not, so usage would vary as a result. - -Example: - - package main - - import ( - "fmt" - "github.com/aokoli/goutils" - ) - - func main() { - - // EXAMPLE 1: A goutils function which returns no errors - fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" - - - - // EXAMPLE 2: A goutils function which returns an error - rand1, err1 := goutils.Random (-1, 0, 0, true, true) - - if err1 != nil { - fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) - } else { - fmt.Println(rand1) - } - } -*/ -package goutils - -import ( - "bytes" - "strings" - "unicode" -) - -// VERSION indicates the current version of goutils -const VERSION = "1.0.0" - -/* -Wrap wraps a single line of text, identifying words by ' '. -New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped. -Leading spaces on a new line are stripped. Trailing spaces are not stripped. - -Parameters: - str - the string to be word wrapped - wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 - -Returns: - a line with newlines inserted -*/ -func Wrap(str string, wrapLength int) string { - return WrapCustom(str, wrapLength, "", false) -} - -/* -WrapCustom wraps a single line of text, identifying words by ' '. -Leading spaces on a new line are stripped. Trailing spaces are not stripped. - -Parameters: - str - the string to be word wrapped - wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 - newLineStr - the string to insert for a new line, "" uses '\n' - wrapLongWords - true if long words (such as URLs) should be wrapped - -Returns: - a line with newlines inserted -*/ -func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string { - - if str == "" { - return "" - } - if newLineStr == "" { - newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons - } - if wrapLength < 1 { - wrapLength = 1 - } - - inputLineLength := len(str) - offset := 0 - - var wrappedLine bytes.Buffer - - for inputLineLength-offset > wrapLength { - - if rune(str[offset]) == ' ' { - offset++ - continue - } - - end := wrapLength + offset + 1 - spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset - - if spaceToWrapAt >= offset { - // normal word (not longer than wrapLength) - wrappedLine.WriteString(str[offset:spaceToWrapAt]) - wrappedLine.WriteString(newLineStr) - offset = spaceToWrapAt + 1 - - } else { - // long word or URL - if wrapLongWords { - end := wrapLength + offset - // long words are wrapped one line at a time - wrappedLine.WriteString(str[offset:end]) - wrappedLine.WriteString(newLineStr) - offset += wrapLength - } else { - // long words aren't wrapped, just extended beyond limit - end := wrapLength + offset - index := strings.IndexRune(str[end:len(str)], ' ') - if index == -1 { - wrappedLine.WriteString(str[offset:len(str)]) - offset = inputLineLength - } else { - spaceToWrapAt = index + end - wrappedLine.WriteString(str[offset:spaceToWrapAt]) - wrappedLine.WriteString(newLineStr) - offset = spaceToWrapAt + 1 - } - } - } - } - - wrappedLine.WriteString(str[offset:len(str)]) - - return wrappedLine.String() - -} - -/* -Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed. -To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune). -The delimiters represent a set of characters understood to separate words. The first string character -and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "". -Capitalization uses the Unicode title case, normally equivalent to upper case. - -Parameters: - str - the string to capitalize - delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter - -Returns: - capitalized string -*/ -func Capitalize(str string, delimiters ...rune) string { - - var delimLen int - - if delimiters == nil { - delimLen = -1 - } else { - delimLen = len(delimiters) - } - - if str == "" || delimLen == 0 { - return str - } - - buffer := []rune(str) - capitalizeNext := true - for i := 0; i < len(buffer); i++ { - ch := buffer[i] - if isDelimiter(ch, delimiters...) { - capitalizeNext = true - } else if capitalizeNext { - buffer[i] = unicode.ToTitle(ch) - capitalizeNext = false - } - } - return string(buffer) - -} - -/* -CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a -titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood -to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized. -Capitalization uses the Unicode title case, normally equivalent to upper case. - -Parameters: - str - the string to capitalize fully - delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter - -Returns: - capitalized string -*/ -func CapitalizeFully(str string, delimiters ...rune) string { - - var delimLen int - - if delimiters == nil { - delimLen = -1 - } else { - delimLen = len(delimiters) - } - - if str == "" || delimLen == 0 { - return str - } - str = strings.ToLower(str) - return Capitalize(str, delimiters...) -} - -/* -Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed. -The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter -character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char). - -Parameters: - str - the string to uncapitalize fully - delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter - -Returns: - uncapitalized string -*/ -func Uncapitalize(str string, delimiters ...rune) string { - - var delimLen int - - if delimiters == nil { - delimLen = -1 - } else { - delimLen = len(delimiters) - } - - if str == "" || delimLen == 0 { - return str - } - - buffer := []rune(str) - uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char. - for i := 0; i < len(buffer); i++ { - ch := buffer[i] - if isDelimiter(ch, delimiters...) { - uncapitalizeNext = true - } else if uncapitalizeNext { - buffer[i] = unicode.ToLower(ch) - uncapitalizeNext = false - } - } - return string(buffer) -} - -/* -SwapCase swaps the case of a string using a word based algorithm. - -Conversion algorithm: - - Upper case character converts to Lower case - Title case character converts to Lower case - Lower case character after Whitespace or at start converts to Title case - Other Lower case character converts to Upper case - Whitespace is defined by unicode.IsSpace(char). - -Parameters: - str - the string to swap case - -Returns: - the changed string -*/ -func SwapCase(str string) string { - if str == "" { - return str - } - buffer := []rune(str) - - whitespace := true - - for i := 0; i < len(buffer); i++ { - ch := buffer[i] - if unicode.IsUpper(ch) { - buffer[i] = unicode.ToLower(ch) - whitespace = false - } else if unicode.IsTitle(ch) { - buffer[i] = unicode.ToLower(ch) - whitespace = false - } else if unicode.IsLower(ch) { - if whitespace { - buffer[i] = unicode.ToTitle(ch) - whitespace = false - } else { - buffer[i] = unicode.ToUpper(ch) - } - } else { - whitespace = unicode.IsSpace(ch) - } - } - return string(buffer) -} - -/* -Initials extracts the initial letters from each word in the string. The first letter of the string and all first -letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters -parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string. - -Parameters: - str - the string to get initials from - delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter -Returns: - string of initial letters -*/ -func Initials(str string, delimiters ...rune) string { - if str == "" { - return str - } - if delimiters != nil && len(delimiters) == 0 { - return "" - } - strLen := len(str) - var buf bytes.Buffer - lastWasGap := true - for i := 0; i < strLen; i++ { - ch := rune(str[i]) - - if isDelimiter(ch, delimiters...) { - lastWasGap = true - } else if lastWasGap { - buf.WriteRune(ch) - lastWasGap = false - } - } - return buf.String() -} - -// private function (lower case func name) -func isDelimiter(ch rune, delimiters ...rune) bool { - if delimiters == nil { - return unicode.IsSpace(ch) - } - for _, delimiter := range delimiters { - if ch == delimiter { - return true - } - } - return false -} diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore deleted file mode 100644 index 6b061e61..00000000 --- a/vendor/github.com/Masterminds/semver/v3/.gitignore +++ /dev/null @@ -1 +0,0 @@ -_fuzz/ \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml deleted file mode 100644 index c87d1c4b..00000000 --- a/vendor/github.com/Masterminds/semver/v3/.golangci.yml +++ /dev/null @@ -1,30 +0,0 @@ -run: - deadline: 2m - -linters: - disable-all: true - enable: - - misspell - - structcheck - - govet - - staticcheck - - deadcode - - errcheck - - varcheck - - unparam - - ineffassign - - nakedret - - gocyclo - - dupl - - goimports - - revive - - gosec - - gosimple - - typecheck - - unused - -linters-settings: - gofmt: - simplify: true - dupl: - threshold: 600 diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md deleted file mode 100644 index f1262642..00000000 --- a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md +++ /dev/null @@ -1,214 +0,0 @@ -# Changelog - -## 3.2.0 (2022-11-28) - -### Added - -- #190: Added text marshaling and unmarshaling -- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg) -- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker) -- #179: Added New() version constructor (thanks @kazhuravlev) - -### Changed - -- #182/#183: Updated CI testing setup - -### Fixed - -- #186: Fixing issue where validation of constraint section gave false positives -- #176: Fix constraints check with *-0 (thanks @mtt0) -- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni) -- #161: Fixed godoc (thanks @afirth) - -## 3.1.1 (2020-11-23) - -### Fixed - -- #158: Fixed issue with generated regex operation order that could cause problem - -## 3.1.0 (2020-04-15) - -### Added - -- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah) - -### Changed - -- #148: More accurate validation messages on constraints - -## 3.0.3 (2019-12-13) - -### Fixed - -- #141: Fixed issue with <= comparison - -## 3.0.2 (2019-11-14) - -### Fixed - -- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) - -## 3.0.1 (2019-09-13) - -### Fixed - -- #125: Fixes issue with module path for v3 - -## 3.0.0 (2019-09-12) - -This is a major release of the semver package which includes API changes. The Go -API is compatible with ^1. The Go API was not changed because many people are using -`go get` without Go modules for their applications and API breaking changes cause -errors which we have or would need to support. - -The changes in this release are the handling based on the data passed into the -functions. These are described in the added and changed sections below. - -### Added - -- StrictNewVersion function. This is similar to NewVersion but will return an - error if the version passed in is not a strict semantic version. For example, - 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly - speaking semantic versions. This function is faster, performs fewer operations, - and uses fewer allocations than NewVersion. -- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. - The Makefile contains the operations used. For more information on you can start - on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing -- Now using Go modules - -### Changed - -- NewVersion has proper prerelease and metadata validation with error messages - to signal an issue with either of them -- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the - version is >=1 the ^ ranges works the same as v1. For major versions of 0 the - rules have changed. The minor version is treated as the stable version unless - a patch is specified and then it is equivalent to =. One difference from npm/js - is that prereleases there are only to a specific version (e.g. 1.2.3). - Prereleases here look over multiple versions and follow semantic version - ordering rules. This pattern now follows along with the expected and requested - handling of this packaged by numerous users. - -## 1.5.0 (2019-09-11) - -### Added - -- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) - -### Changed - -- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) -- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) -- #72: Adding docs comment pointing to vert for a cli -- #71: Update the docs on pre-release comparator handling -- #89: Test with new go versions (thanks @thedevsaddam) -- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) - -### Fixed - -- #78: Fix unchecked error in example code (thanks @ravron) -- #70: Fix the handling of pre-releases and the 0.0.0 release edge case -- #97: Fixed copyright file for proper display on GitHub -- #107: Fix handling prerelease when sorting alphanum and num -- #109: Fixed where Validate sometimes returns wrong message on error - -## 1.4.2 (2018-04-10) - -### Changed - -- #72: Updated the docs to point to vert for a console appliaction -- #71: Update the docs on pre-release comparator handling - -### Fixed - -- #70: Fix the handling of pre-releases and the 0.0.0 release edge case - -## 1.4.1 (2018-04-02) - -### Fixed - -- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) - -## 1.4.0 (2017-10-04) - -### Changed - -- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) - -## 1.3.1 (2017-07-10) - -### Fixed - -- Fixed #57: number comparisons in prerelease sometimes inaccurate - -## 1.3.0 (2017-05-02) - -### Added - -- #45: Added json (un)marshaling support (thanks @mh-cbon) -- Stability marker. See https://masterminds.github.io/stability/ - -### Fixed - -- #51: Fix handling of single digit tilde constraint (thanks @dgodd) - -### Changed - -- #55: The godoc icon moved from png to svg - -## 1.2.3 (2017-04-03) - -### Fixed - -- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * - -## Release 1.2.2 (2016-12-13) - -### Fixed - -- #34: Fixed issue where hyphen range was not working with pre-release parsing. - -## Release 1.2.1 (2016-11-28) - -### Fixed - -- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" - properly. - -## Release 1.2.0 (2016-11-04) - -### Added - -- #20: Added MustParse function for versions (thanks @adamreese) -- #15: Added increment methods on versions (thanks @mh-cbon) - -### Fixed - -- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and - might not satisfy the intended compatibility. The change here ignores pre-releases - on constraint checks (e.g., ~ or ^) when a pre-release is not part of the - constraint. For example, `^1.2.3` will ignore pre-releases while - `^1.2.3-alpha` will include them. - -## Release 1.1.1 (2016-06-30) - -### Changed - -- Issue #9: Speed up version comparison performance (thanks @sdboyer) -- Issue #8: Added benchmarks (thanks @sdboyer) -- Updated Go Report Card URL to new location -- Updated Readme to add code snippet formatting (thanks @mh-cbon) -- Updating tagging to v[SemVer] structure for compatibility with other tools. - -## Release 1.1.0 (2016-03-11) - -- Issue #2: Implemented validation to provide reasons a versions failed a - constraint. - -## Release 1.0.1 (2015-12-31) - -- Fixed #1: * constraint failing on valid versions. - -## Release 1.0.0 (2015-10-20) - -- Initial release diff --git a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt deleted file mode 100644 index 9ff7da9c..00000000 --- a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2014-2019, Matt Butcher and Matt Farina - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile deleted file mode 100644 index eac19178..00000000 --- a/vendor/github.com/Masterminds/semver/v3/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -GOPATH=$(shell go env GOPATH) -GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint -GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build -GOFUZZ = $(GOPATH)/bin/go-fuzz - -.PHONY: lint -lint: $(GOLANGCI_LINT) - @echo "==> Linting codebase" - @$(GOLANGCI_LINT) run - -.PHONY: test -test: - @echo "==> Running tests" - GO111MODULE=on go test -v - -.PHONY: test-cover -test-cover: - @echo "==> Running Tests with coverage" - GO111MODULE=on go test -cover . - -.PHONY: fuzz -fuzz: $(GOFUZZBUILD) $(GOFUZZ) - @echo "==> Fuzz testing" - $(GOFUZZBUILD) - $(GOFUZZ) -workdir=_fuzz - -$(GOLANGCI_LINT): - # Install golangci-lint. The configuration for it is in the .golangci.yml - # file in the root of the repository - echo ${GOPATH} - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 - -$(GOFUZZBUILD): - cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build - -$(GOFUZZ): - cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md deleted file mode 100644 index d8f54dcb..00000000 --- a/vendor/github.com/Masterminds/semver/v3/README.md +++ /dev/null @@ -1,244 +0,0 @@ -# SemVer - -The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: - -* Parse semantic versions -* Sort semantic versions -* Check if a semantic version fits within a set of constraints -* Optionally work with a `v` prefix - -[![Stability: -Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) -[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) -[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) -[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) - -If you are looking for a command line tool for version comparisons please see -[vert](https://github.com/Masterminds/vert) which uses this library. - -## Package Versions - -There are three major versions fo the `semver` package. - -* 3.x.x is the new stable and active version. This version is focused on constraint - compatibility for range handling in other tools from other languages. It has - a similar API to the v1 releases. The development of this version is on the master - branch. The documentation for this version is below. -* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are - no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). - There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). -* 1.x.x is the most widely used version with numerous tagged releases. This is the - previous stable and is still maintained for bug fixes. The development, to fix - bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md). - -## Parsing Semantic Versions - -There are two functions that can parse semantic versions. The `StrictNewVersion` -function only parses valid version 2 semantic versions as outlined in the -specification. The `NewVersion` function attempts to coerce a version into a -semantic version and parse it. For example, if there is a leading v or a version -listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid -semantic version (e.g., 1.2.0). In both cases a `Version` object is returned -that can be sorted, compared, and used in constraints. - -When parsing a version an error is returned if there is an issue parsing the -version. For example, - - v, err := semver.NewVersion("1.2.3-beta.1+build345") - -The version object has methods to get the parts of the version, compare it to -other versions, convert the version back into a string, and get the original -string. Getting the original string is useful if the semantic version was coerced -into a valid form. - -## Sorting Semantic Versions - -A set of versions can be sorted using the `sort` package from the standard library. -For example, - -```go -raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} -vs := make([]*semver.Version, len(raw)) -for i, r := range raw { - v, err := semver.NewVersion(r) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - vs[i] = v -} - -sort.Sort(semver.Collection(vs)) -``` - -## Checking Version Constraints - -There are two methods for comparing versions. One uses comparison methods on -`Version` instances and the other uses `Constraints`. There are some important -differences to notes between these two methods of comparison. - -1. When two versions are compared using functions such as `Compare`, `LessThan`, - and others it will follow the specification and always include prereleases - within the comparison. It will provide an answer that is valid with the - comparison section of the spec at https://semver.org/#spec-item-11 -2. When constraint checking is used for checks or validation it will follow a - different set of rules that are common for ranges with tools like npm/js - and Rust/Cargo. This includes considering prereleases to be invalid if the - ranges does not include one. If you want to have it include pre-releases a - simple solution is to include `-0` in your range. -3. Constraint ranges can have some complex rules including the shorthand use of - ~ and ^. For more details on those see the options below. - -There are differences between the two methods or checking versions because the -comparison methods on `Version` follow the specification while comparison ranges -are not part of the specification. Different packages and tools have taken it -upon themselves to come up with range rules. This has resulted in differences. -For example, npm/js and Cargo/Rust follow similar patterns while PHP has a -different pattern for ^. The comparison features in this package follow the -npm/js and Cargo/Rust lead because applications using it have followed similar -patters with their versions. - -Checking a version against version constraints is one of the most featureful -parts of the package. - -```go -c, err := semver.NewConstraint(">= 1.2.3") -if err != nil { - // Handle constraint not being parsable. -} - -v, err := semver.NewVersion("1.3") -if err != nil { - // Handle version not being parsable. -} -// Check if the version meets the constraints. The a variable will be true. -a := c.Check(v) -``` - -### Basic Comparisons - -There are two elements to the comparisons. First, a comparison string is a list -of space or comma separated AND comparisons. These are then separated by || (OR) -comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a -comparison that's greater than or equal to 1.2 and less than 3.0.0 or is -greater than or equal to 4.2.3. - -The basic comparisons are: - -* `=`: equal (aliased to no operator) -* `!=`: not equal -* `>`: greater than -* `<`: less than -* `>=`: greater than or equal to -* `<=`: less than or equal to - -### Working With Prerelease Versions - -Pre-releases, for those not familiar with them, are used for software releases -prior to stable or generally available releases. Examples of prereleases include -development, alpha, beta, and release candidate releases. A prerelease may be -a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the -order of precedence, prereleases come before their associated releases. In this -example `1.2.3-beta.1 < 1.2.3`. - -According to the Semantic Version specification prereleases may not be -API compliant with their release counterpart. It says, - -> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. - -SemVer comparisons using constraints without a prerelease comparator will skip -prerelease versions. For example, `>=1.2.3` will skip prereleases when looking -at a list of releases while `>=1.2.3-0` will evaluate and find prereleases. - -The reason for the `0` as a pre-release version in the example comparison is -because pre-releases can only contain ASCII alphanumerics and hyphens (along with -`.` separators), per the spec. Sorting happens in ASCII sort order, again per the -spec. The lowest character is a `0` in ASCII sort order -(see an [ASCII Table](http://www.asciitable.com/)) - -Understanding ASCII sort ordering is important because A-Z comes before a-z. That -means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case -sensitivity doesn't apply here. This is due to ASCII sort ordering which is what -the spec specifies. - -### Hyphen Range Comparisons - -There are multiple methods to handle ranges and the first is hyphens ranges. -These look like: - -* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` -* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` - -### Wildcards In Comparisons - -The `x`, `X`, and `*` characters can be used as a wildcard character. This works -for all comparison operators. When used on the `=` operator it falls -back to the patch level comparison (see tilde below). For example, - -* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` -* `>= 1.2.x` is equivalent to `>= 1.2.0` -* `<= 2.x` is equivalent to `< 3` -* `*` is equivalent to `>= 0.0.0` - -### Tilde Range Comparisons (Patch) - -The tilde (`~`) comparison operator is for patch level ranges when a minor -version is specified and major level changes when the minor number is missing. -For example, - -* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` -* `~1` is equivalent to `>= 1, < 2` -* `~2.3` is equivalent to `>= 2.3, < 2.4` -* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` -* `~1.x` is equivalent to `>= 1, < 2` - -### Caret Range Comparisons (Major) - -The caret (`^`) comparison operator is for major level changes once a stable -(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts -as the API stability level. This is useful when comparisons of API versions as a -major change is API breaking. For example, - -* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` -* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` -* `^2.3` is equivalent to `>= 2.3, < 3` -* `^2.x` is equivalent to `>= 2.0.0, < 3` -* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` -* `^0.2` is equivalent to `>=0.2.0 <0.3.0` -* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` -* `^0.0` is equivalent to `>=0.0.0 <0.1.0` -* `^0` is equivalent to `>=0.0.0 <1.0.0` - -## Validation - -In addition to testing a version against a constraint, a version can be validated -against a constraint. When validation fails a slice of errors containing why a -version didn't meet the constraint is returned. For example, - -```go -c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") -if err != nil { - // Handle constraint not being parseable. -} - -v, err := semver.NewVersion("1.3") -if err != nil { - // Handle version not being parseable. -} - -// Validate a version against a constraint. -a, msgs := c.Validate(v) -// a is false -for _, m := range msgs { - fmt.Println(m) - - // Loops over the errors which would read - // "1.3 is greater than 1.2.3" - // "1.3 is less than 1.4" -} -``` - -## Contribute - -If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) -or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/v3/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go deleted file mode 100644 index a7823589..00000000 --- a/vendor/github.com/Masterminds/semver/v3/collection.go +++ /dev/null @@ -1,24 +0,0 @@ -package semver - -// Collection is a collection of Version instances and implements the sort -// interface. See the sort package for more details. -// https://golang.org/pkg/sort/ -type Collection []*Version - -// Len returns the length of a collection. The number of Version instances -// on the slice. -func (c Collection) Len() int { - return len(c) -} - -// Less is needed for the sort interface to compare two Version objects on the -// slice. If checks if one is less than the other. -func (c Collection) Less(i, j int) bool { - return c[i].LessThan(c[j]) -} - -// Swap is needed for the sort interface to replace the Version objects -// at two different positions in the slice. -func (c Collection) Swap(i, j int) { - c[i], c[j] = c[j], c[i] -} diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go deleted file mode 100644 index 203072e4..00000000 --- a/vendor/github.com/Masterminds/semver/v3/constraints.go +++ /dev/null @@ -1,594 +0,0 @@ -package semver - -import ( - "bytes" - "errors" - "fmt" - "regexp" - "strings" -) - -// Constraints is one or more constraint that a semantic version can be -// checked against. -type Constraints struct { - constraints [][]*constraint -} - -// NewConstraint returns a Constraints instance that a Version instance can -// be checked against. If there is a parse error it will be returned. -func NewConstraint(c string) (*Constraints, error) { - - // Rewrite - ranges into a comparison operation. - c = rewriteRange(c) - - ors := strings.Split(c, "||") - or := make([][]*constraint, len(ors)) - for k, v := range ors { - - // TODO: Find a way to validate and fetch all the constraints in a simpler form - - // Validate the segment - if !validConstraintRegex.MatchString(v) { - return nil, fmt.Errorf("improper constraint: %s", v) - } - - cs := findConstraintRegex.FindAllString(v, -1) - if cs == nil { - cs = append(cs, v) - } - result := make([]*constraint, len(cs)) - for i, s := range cs { - pc, err := parseConstraint(s) - if err != nil { - return nil, err - } - - result[i] = pc - } - or[k] = result - } - - o := &Constraints{constraints: or} - return o, nil -} - -// Check tests if a version satisfies the constraints. -func (cs Constraints) Check(v *Version) bool { - // TODO(mattfarina): For v4 of this library consolidate the Check and Validate - // functions as the underlying functions make that possible now. - // loop over the ORs and check the inner ANDs - for _, o := range cs.constraints { - joy := true - for _, c := range o { - if check, _ := c.check(v); !check { - joy = false - break - } - } - - if joy { - return true - } - } - - return false -} - -// Validate checks if a version satisfies a constraint. If not a slice of -// reasons for the failure are returned in addition to a bool. -func (cs Constraints) Validate(v *Version) (bool, []error) { - // loop over the ORs and check the inner ANDs - var e []error - - // Capture the prerelease message only once. When it happens the first time - // this var is marked - var prerelesase bool - for _, o := range cs.constraints { - joy := true - for _, c := range o { - // Before running the check handle the case there the version is - // a prerelease and the check is not searching for prereleases. - if c.con.pre == "" && v.pre != "" { - if !prerelesase { - em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - e = append(e, em) - prerelesase = true - } - joy = false - - } else { - - if _, err := c.check(v); err != nil { - e = append(e, err) - joy = false - } - } - } - - if joy { - return true, []error{} - } - } - - return false, e -} - -func (cs Constraints) String() string { - buf := make([]string, len(cs.constraints)) - var tmp bytes.Buffer - - for k, v := range cs.constraints { - tmp.Reset() - vlen := len(v) - for kk, c := range v { - tmp.WriteString(c.string()) - - // Space separate the AND conditions - if vlen > 1 && kk < vlen-1 { - tmp.WriteString(" ") - } - } - buf[k] = tmp.String() - } - - return strings.Join(buf, " || ") -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -func (cs *Constraints) UnmarshalText(text []byte) error { - temp, err := NewConstraint(string(text)) - if err != nil { - return err - } - - *cs = *temp - - return nil -} - -// MarshalText implements the encoding.TextMarshaler interface. -func (cs Constraints) MarshalText() ([]byte, error) { - return []byte(cs.String()), nil -} - -var constraintOps map[string]cfunc -var constraintRegex *regexp.Regexp -var constraintRangeRegex *regexp.Regexp - -// Used to find individual constraints within a multi-constraint string -var findConstraintRegex *regexp.Regexp - -// Used to validate an segment of ANDs is valid -var validConstraintRegex *regexp.Regexp - -const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + - `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` - -func init() { - constraintOps = map[string]cfunc{ - "": constraintTildeOrEqual, - "=": constraintTildeOrEqual, - "!=": constraintNotEqual, - ">": constraintGreaterThan, - "<": constraintLessThan, - ">=": constraintGreaterThanEqual, - "=>": constraintGreaterThanEqual, - "<=": constraintLessThanEqual, - "=<": constraintLessThanEqual, - "~": constraintTilde, - "~>": constraintTilde, - "^": constraintCaret, - } - - ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^` - - constraintRegex = regexp.MustCompile(fmt.Sprintf( - `^\s*(%s)\s*(%s)\s*$`, - ops, - cvRegex)) - - constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( - `\s*(%s)\s+-\s+(%s)\s*`, - cvRegex, cvRegex)) - - findConstraintRegex = regexp.MustCompile(fmt.Sprintf( - `(%s)\s*(%s)`, - ops, - cvRegex)) - - // The first time a constraint shows up will look slightly different from - // future times it shows up due to a leading space or comma in a given - // string. - validConstraintRegex = regexp.MustCompile(fmt.Sprintf( - `^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`, - ops, - cvRegex, - ops, - cvRegex)) -} - -// An individual constraint -type constraint struct { - // The version used in the constraint check. For example, if a constraint - // is '<= 2.0.0' the con a version instance representing 2.0.0. - con *Version - - // The original parsed version (e.g., 4.x from != 4.x) - orig string - - // The original operator for the constraint - origfunc string - - // When an x is used as part of the version (e.g., 1.x) - minorDirty bool - dirty bool - patchDirty bool -} - -// Check if a version meets the constraint -func (c *constraint) check(v *Version) (bool, error) { - return constraintOps[c.origfunc](v, c) -} - -// String prints an individual constraint into a string -func (c *constraint) string() string { - return c.origfunc + c.orig -} - -type cfunc func(v *Version, c *constraint) (bool, error) - -func parseConstraint(c string) (*constraint, error) { - if len(c) > 0 { - m := constraintRegex.FindStringSubmatch(c) - if m == nil { - return nil, fmt.Errorf("improper constraint: %s", c) - } - - cs := &constraint{ - orig: m[2], - origfunc: m[1], - } - - ver := m[2] - minorDirty := false - patchDirty := false - dirty := false - if isX(m[3]) || m[3] == "" { - ver = fmt.Sprintf("0.0.0%s", m[6]) - dirty = true - } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { - minorDirty = true - dirty = true - ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) - } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" { - dirty = true - patchDirty = true - ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) - } - - con, err := NewVersion(ver) - if err != nil { - - // The constraintRegex should catch any regex parsing errors. So, - // we should never get here. - return nil, errors.New("constraint Parser Error") - } - - cs.con = con - cs.minorDirty = minorDirty - cs.patchDirty = patchDirty - cs.dirty = dirty - - return cs, nil - } - - // The rest is the special case where an empty string was passed in which - // is equivalent to * or >=0.0.0 - con, err := StrictNewVersion("0.0.0") - if err != nil { - - // The constraintRegex should catch any regex parsing errors. So, - // we should never get here. - return nil, errors.New("constraint Parser Error") - } - - cs := &constraint{ - con: con, - orig: c, - origfunc: "", - minorDirty: false, - patchDirty: false, - dirty: true, - } - return cs, nil -} - -// Constraint functions -func constraintNotEqual(v *Version, c *constraint) (bool, error) { - if c.dirty { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } - - if c.con.Major() != v.Major() { - return true, nil - } - if c.con.Minor() != v.Minor() && !c.minorDirty { - return true, nil - } else if c.minorDirty { - return false, fmt.Errorf("%s is equal to %s", v, c.orig) - } else if c.con.Patch() != v.Patch() && !c.patchDirty { - return true, nil - } else if c.patchDirty { - // Need to handle prereleases if present - if v.Prerelease() != "" || c.con.Prerelease() != "" { - eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 - if eq { - return true, nil - } - return false, fmt.Errorf("%s is equal to %s", v, c.orig) - } - return false, fmt.Errorf("%s is equal to %s", v, c.orig) - } - } - - eq := v.Equal(c.con) - if eq { - return false, fmt.Errorf("%s is equal to %s", v, c.orig) - } - - return true, nil -} - -func constraintGreaterThan(v *Version, c *constraint) (bool, error) { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } - - var eq bool - - if !c.dirty { - eq = v.Compare(c.con) == 1 - if eq { - return true, nil - } - return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) - } - - if v.Major() > c.con.Major() { - return true, nil - } else if v.Major() < c.con.Major() { - return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) - } else if c.minorDirty { - // This is a range case such as >11. When the version is something like - // 11.1.0 is it not > 11. For that we would need 12 or higher - return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) - } else if c.patchDirty { - // This is for ranges such as >11.1. A version of 11.1.1 is not greater - // which one of 11.2.1 is greater - eq = v.Minor() > c.con.Minor() - if eq { - return true, nil - } - return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) - } - - // If we have gotten here we are not comparing pre-preleases and can use the - // Compare function to accomplish that. - eq = v.Compare(c.con) == 1 - if eq { - return true, nil - } - return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) -} - -func constraintLessThan(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } - - eq := v.Compare(c.con) < 0 - if eq { - return true, nil - } - return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) -} - -func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } - - eq := v.Compare(c.con) >= 0 - if eq { - return true, nil - } - return false, fmt.Errorf("%s is less than %s", v, c.orig) -} - -func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } - - var eq bool - - if !c.dirty { - eq = v.Compare(c.con) <= 0 - if eq { - return true, nil - } - return false, fmt.Errorf("%s is greater than %s", v, c.orig) - } - - if v.Major() > c.con.Major() { - return false, fmt.Errorf("%s is greater than %s", v, c.orig) - } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { - return false, fmt.Errorf("%s is greater than %s", v, c.orig) - } - - return true, nil -} - -// ~*, ~>* --> >= 0.0.0 (any) -// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 -// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 -// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 -// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 -// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 -func constraintTilde(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } - - if v.LessThan(c.con) { - return false, fmt.Errorf("%s is less than %s", v, c.orig) - } - - // ~0.0.0 is a special case where all constraints are accepted. It's - // equivalent to >= 0.0.0. - if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && - !c.minorDirty && !c.patchDirty { - return true, nil - } - - if v.Major() != c.con.Major() { - return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) - } - - if v.Minor() != c.con.Minor() && !c.minorDirty { - return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig) - } - - return true, nil -} - -// When there is a .x (dirty) status it automatically opts in to ~. Otherwise -// it's a straight = -func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } - - if c.dirty { - return constraintTilde(v, c) - } - - eq := v.Equal(c.con) - if eq { - return true, nil - } - - return false, fmt.Errorf("%s is not equal to %s", v, c.orig) -} - -// ^* --> (any) -// ^1.2.3 --> >=1.2.3 <2.0.0 -// ^1.2 --> >=1.2.0 <2.0.0 -// ^1 --> >=1.0.0 <2.0.0 -// ^0.2.3 --> >=0.2.3 <0.3.0 -// ^0.2 --> >=0.2.0 <0.3.0 -// ^0.0.3 --> >=0.0.3 <0.0.4 -// ^0.0 --> >=0.0.0 <0.1.0 -// ^0 --> >=0.0.0 <1.0.0 -func constraintCaret(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } - - // This less than handles prereleases - if v.LessThan(c.con) { - return false, fmt.Errorf("%s is less than %s", v, c.orig) - } - - var eq bool - - // ^ when the major > 0 is >=x.y.z < x+1 - if c.con.Major() > 0 || c.minorDirty { - - // ^ has to be within a major range for > 0. Everything less than was - // filtered out with the LessThan call above. This filters out those - // that greater but not within the same major range. - eq = v.Major() == c.con.Major() - if eq { - return true, nil - } - return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) - } - - // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 - if c.con.Major() == 0 && v.Major() > 0 { - return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) - } - // If the con Minor is > 0 it is not dirty - if c.con.Minor() > 0 || c.patchDirty { - eq = v.Minor() == c.con.Minor() - if eq { - return true, nil - } - return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig) - } - // ^ when the minor is 0 and minor > 0 is =0.0.z - if c.con.Minor() == 0 && v.Minor() > 0 { - return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig) - } - - // At this point the major is 0 and the minor is 0 and not dirty. The patch - // is not dirty so we need to check if they are equal. If they are not equal - eq = c.con.Patch() == v.Patch() - if eq { - return true, nil - } - return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig) -} - -func isX(x string) bool { - switch x { - case "x", "*", "X": - return true - default: - return false - } -} - -func rewriteRange(i string) string { - m := constraintRangeRegex.FindAllStringSubmatch(i, -1) - if m == nil { - return i - } - o := i - for _, v := range m { - t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) - o = strings.Replace(o, v[0], t, 1) - } - - return o -} diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go deleted file mode 100644 index 74f97caa..00000000 --- a/vendor/github.com/Masterminds/semver/v3/doc.go +++ /dev/null @@ -1,184 +0,0 @@ -/* -Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. - -Specifically it provides the ability to: - - - Parse semantic versions - - Sort semantic versions - - Check if a semantic version fits within a set of constraints - - Optionally work with a `v` prefix - -# Parsing Semantic Versions - -There are two functions that can parse semantic versions. The `StrictNewVersion` -function only parses valid version 2 semantic versions as outlined in the -specification. The `NewVersion` function attempts to coerce a version into a -semantic version and parse it. For example, if there is a leading v or a version -listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid -semantic version (e.g., 1.2.0). In both cases a `Version` object is returned -that can be sorted, compared, and used in constraints. - -When parsing a version an optional error can be returned if there is an issue -parsing the version. For example, - - v, err := semver.NewVersion("1.2.3-beta.1+b345") - -The version object has methods to get the parts of the version, compare it to -other versions, convert the version back into a string, and get the original -string. For more details please see the documentation -at https://godoc.org/github.com/Masterminds/semver. - -# Sorting Semantic Versions - -A set of versions can be sorted using the `sort` package from the standard library. -For example, - - raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} - vs := make([]*semver.Version, len(raw)) - for i, r := range raw { - v, err := semver.NewVersion(r) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - vs[i] = v - } - - sort.Sort(semver.Collection(vs)) - -# Checking Version Constraints and Comparing Versions - -There are two methods for comparing versions. One uses comparison methods on -`Version` instances and the other is using Constraints. There are some important -differences to notes between these two methods of comparison. - - 1. When two versions are compared using functions such as `Compare`, `LessThan`, - and others it will follow the specification and always include prereleases - within the comparison. It will provide an answer valid with the comparison - spec section at https://semver.org/#spec-item-11 - 2. When constraint checking is used for checks or validation it will follow a - different set of rules that are common for ranges with tools like npm/js - and Rust/Cargo. This includes considering prereleases to be invalid if the - ranges does not include on. If you want to have it include pre-releases a - simple solution is to include `-0` in your range. - 3. Constraint ranges can have some complex rules including the shorthard use of - ~ and ^. For more details on those see the options below. - -There are differences between the two methods or checking versions because the -comparison methods on `Version` follow the specification while comparison ranges -are not part of the specification. Different packages and tools have taken it -upon themselves to come up with range rules. This has resulted in differences. -For example, npm/js and Cargo/Rust follow similar patterns which PHP has a -different pattern for ^. The comparison features in this package follow the -npm/js and Cargo/Rust lead because applications using it have followed similar -patters with their versions. - -Checking a version against version constraints is one of the most featureful -parts of the package. - - c, err := semver.NewConstraint(">= 1.2.3") - if err != nil { - // Handle constraint not being parsable. - } - - v, err := semver.NewVersion("1.3") - if err != nil { - // Handle version not being parsable. - } - // Check if the version meets the constraints. The a variable will be true. - a := c.Check(v) - -# Basic Comparisons - -There are two elements to the comparisons. First, a comparison string is a list -of comma or space separated AND comparisons. These are then separated by || (OR) -comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a -comparison that's greater than or equal to 1.2 and less than 3.0.0 or is -greater than or equal to 4.2.3. This can also be written as -`">= 1.2, < 3.0.0 || >= 4.2.3"` - -The basic comparisons are: - - - `=`: equal (aliased to no operator) - - `!=`: not equal - - `>`: greater than - - `<`: less than - - `>=`: greater than or equal to - - `<=`: less than or equal to - -# Hyphen Range Comparisons - -There are multiple methods to handle ranges and the first is hyphens ranges. -These look like: - - - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` - - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` - -# Wildcards In Comparisons - -The `x`, `X`, and `*` characters can be used as a wildcard character. This works -for all comparison operators. When used on the `=` operator it falls -back to the tilde operation. For example, - - - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` - - `>= 1.2.x` is equivalent to `>= 1.2.0` - - `<= 2.x` is equivalent to `<= 3` - - `*` is equivalent to `>= 0.0.0` - -Tilde Range Comparisons (Patch) - -The tilde (`~`) comparison operator is for patch level ranges when a minor -version is specified and major level changes when the minor number is missing. -For example, - - - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` - - `~1` is equivalent to `>= 1, < 2` - - `~2.3` is equivalent to `>= 2.3 < 2.4` - - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` - - `~1.x` is equivalent to `>= 1 < 2` - -Caret Range Comparisons (Major) - -The caret (`^`) comparison operator is for major level changes once a stable -(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts -as the API stability level. This is useful when comparisons of API versions as a -major change is API breaking. For example, - - - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` - - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` - - `^2.3` is equivalent to `>= 2.3, < 3` - - `^2.x` is equivalent to `>= 2.0.0, < 3` - - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` - - `^0.2` is equivalent to `>=0.2.0 <0.3.0` - - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` - - `^0.0` is equivalent to `>=0.0.0 <0.1.0` - - `^0` is equivalent to `>=0.0.0 <1.0.0` - -# Validation - -In addition to testing a version against a constraint, a version can be validated -against a constraint. When validation fails a slice of errors containing why a -version didn't meet the constraint is returned. For example, - - c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") - if err != nil { - // Handle constraint not being parseable. - } - - v, _ := semver.NewVersion("1.3") - if err != nil { - // Handle version not being parseable. - } - - // Validate a version against a constraint. - a, msgs := c.Validate(v) - // a is false - for _, m := range msgs { - fmt.Println(m) - - // Loops over the errors which would read - // "1.3 is greater than 1.2.3" - // "1.3 is less than 1.4" - } -*/ -package semver diff --git a/vendor/github.com/Masterminds/semver/v3/fuzz.go b/vendor/github.com/Masterminds/semver/v3/fuzz.go deleted file mode 100644 index a242ad70..00000000 --- a/vendor/github.com/Masterminds/semver/v3/fuzz.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build gofuzz - -package semver - -func Fuzz(data []byte) int { - d := string(data) - - // Test NewVersion - _, _ = NewVersion(d) - - // Test StrictNewVersion - _, _ = StrictNewVersion(d) - - // Test NewConstraint - _, _ = NewConstraint(d) - - // The return value should be 0 normally, 1 if the priority in future tests - // should be increased, and -1 if future tests should skip passing in that - // data. We do not have a reason to change priority so 0 is always returned. - // There are example tests that do this. - return 0 -} diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go deleted file mode 100644 index 7c4bed33..00000000 --- a/vendor/github.com/Masterminds/semver/v3/version.go +++ /dev/null @@ -1,639 +0,0 @@ -package semver - -import ( - "bytes" - "database/sql/driver" - "encoding/json" - "errors" - "fmt" - "regexp" - "strconv" - "strings" -) - -// The compiled version of the regex created at init() is cached here so it -// only needs to be created once. -var versionRegex *regexp.Regexp - -var ( - // ErrInvalidSemVer is returned a version is found to be invalid when - // being parsed. - ErrInvalidSemVer = errors.New("Invalid Semantic Version") - - // ErrEmptyString is returned when an empty string is passed in for parsing. - ErrEmptyString = errors.New("Version string empty") - - // ErrInvalidCharacters is returned when invalid characters are found as - // part of a version - ErrInvalidCharacters = errors.New("Invalid characters in version") - - // ErrSegmentStartsZero is returned when a version segment starts with 0. - // This is invalid in SemVer. - ErrSegmentStartsZero = errors.New("Version segment starts with 0") - - // ErrInvalidMetadata is returned when the metadata is an invalid format - ErrInvalidMetadata = errors.New("Invalid Metadata string") - - // ErrInvalidPrerelease is returned when the pre-release is an invalid format - ErrInvalidPrerelease = errors.New("Invalid Prerelease string") -) - -// semVerRegex is the regular expression used to parse a semantic version. -const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + - `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` - -// Version represents a single semantic version. -type Version struct { - major, minor, patch uint64 - pre string - metadata string - original string -} - -func init() { - versionRegex = regexp.MustCompile("^" + semVerRegex + "$") -} - -const ( - num string = "0123456789" - allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num -) - -// StrictNewVersion parses a given version and returns an instance of Version or -// an error if unable to parse the version. Only parses valid semantic versions. -// Performs checking that can find errors within the version. -// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x -// releases of semver did, use the NewVersion() function. -func StrictNewVersion(v string) (*Version, error) { - // Parsing here does not use RegEx in order to increase performance and reduce - // allocations. - - if len(v) == 0 { - return nil, ErrEmptyString - } - - // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build - parts := strings.SplitN(v, ".", 3) - if len(parts) != 3 { - return nil, ErrInvalidSemVer - } - - sv := &Version{ - original: v, - } - - // check for prerelease or build metadata - var extra []string - if strings.ContainsAny(parts[2], "-+") { - // Start with the build metadata first as it needs to be on the right - extra = strings.SplitN(parts[2], "+", 2) - if len(extra) > 1 { - // build metadata found - sv.metadata = extra[1] - parts[2] = extra[0] - } - - extra = strings.SplitN(parts[2], "-", 2) - if len(extra) > 1 { - // prerelease found - sv.pre = extra[1] - parts[2] = extra[0] - } - } - - // Validate the number segments are valid. This includes only having positive - // numbers and no leading 0's. - for _, p := range parts { - if !containsOnly(p, num) { - return nil, ErrInvalidCharacters - } - - if len(p) > 1 && p[0] == '0' { - return nil, ErrSegmentStartsZero - } - } - - // Extract the major, minor, and patch elements onto the returned Version - var err error - sv.major, err = strconv.ParseUint(parts[0], 10, 64) - if err != nil { - return nil, err - } - - sv.minor, err = strconv.ParseUint(parts[1], 10, 64) - if err != nil { - return nil, err - } - - sv.patch, err = strconv.ParseUint(parts[2], 10, 64) - if err != nil { - return nil, err - } - - // No prerelease or build metadata found so returning now as a fastpath. - if sv.pre == "" && sv.metadata == "" { - return sv, nil - } - - if sv.pre != "" { - if err = validatePrerelease(sv.pre); err != nil { - return nil, err - } - } - - if sv.metadata != "" { - if err = validateMetadata(sv.metadata); err != nil { - return nil, err - } - } - - return sv, nil -} - -// NewVersion parses a given version and returns an instance of Version or -// an error if unable to parse the version. If the version is SemVer-ish it -// attempts to convert it to SemVer. If you want to validate it was a strict -// semantic version at parse time see StrictNewVersion(). -func NewVersion(v string) (*Version, error) { - m := versionRegex.FindStringSubmatch(v) - if m == nil { - return nil, ErrInvalidSemVer - } - - sv := &Version{ - metadata: m[8], - pre: m[5], - original: v, - } - - var err error - sv.major, err = strconv.ParseUint(m[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) - } - - if m[2] != "" { - sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) - if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) - } - } else { - sv.minor = 0 - } - - if m[3] != "" { - sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) - if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) - } - } else { - sv.patch = 0 - } - - // Perform some basic due diligence on the extra parts to ensure they are - // valid. - - if sv.pre != "" { - if err = validatePrerelease(sv.pre); err != nil { - return nil, err - } - } - - if sv.metadata != "" { - if err = validateMetadata(sv.metadata); err != nil { - return nil, err - } - } - - return sv, nil -} - -// New creates a new instance of Version with each of the parts passed in as -// arguments instead of parsing a version string. -func New(major, minor, patch uint64, pre, metadata string) *Version { - v := Version{ - major: major, - minor: minor, - patch: patch, - pre: pre, - metadata: metadata, - original: "", - } - - v.original = v.String() - - return &v -} - -// MustParse parses a given version and panics on error. -func MustParse(v string) *Version { - sv, err := NewVersion(v) - if err != nil { - panic(err) - } - return sv -} - -// String converts a Version object to a string. -// Note, if the original version contained a leading v this version will not. -// See the Original() method to retrieve the original value. Semantic Versions -// don't contain a leading v per the spec. Instead it's optional on -// implementation. -func (v Version) String() string { - var buf bytes.Buffer - - fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) - if v.pre != "" { - fmt.Fprintf(&buf, "-%s", v.pre) - } - if v.metadata != "" { - fmt.Fprintf(&buf, "+%s", v.metadata) - } - - return buf.String() -} - -// Original returns the original value passed in to be parsed. -func (v *Version) Original() string { - return v.original -} - -// Major returns the major version. -func (v Version) Major() uint64 { - return v.major -} - -// Minor returns the minor version. -func (v Version) Minor() uint64 { - return v.minor -} - -// Patch returns the patch version. -func (v Version) Patch() uint64 { - return v.patch -} - -// Prerelease returns the pre-release version. -func (v Version) Prerelease() string { - return v.pre -} - -// Metadata returns the metadata on the version. -func (v Version) Metadata() string { - return v.metadata -} - -// originalVPrefix returns the original 'v' prefix if any. -func (v Version) originalVPrefix() string { - // Note, only lowercase v is supported as a prefix by the parser. - if v.original != "" && v.original[:1] == "v" { - return v.original[:1] - } - return "" -} - -// IncPatch produces the next patch version. -// If the current version does not have prerelease/metadata information, -// it unsets metadata and prerelease values, increments patch number. -// If the current version has any of prerelease or metadata information, -// it unsets both values and keeps current patch value -func (v Version) IncPatch() Version { - vNext := v - // according to http://semver.org/#spec-item-9 - // Pre-release versions have a lower precedence than the associated normal version. - // according to http://semver.org/#spec-item-10 - // Build metadata SHOULD be ignored when determining version precedence. - if v.pre != "" { - vNext.metadata = "" - vNext.pre = "" - } else { - vNext.metadata = "" - vNext.pre = "" - vNext.patch = v.patch + 1 - } - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext -} - -// IncMinor produces the next minor version. -// Sets patch to 0. -// Increments minor number. -// Unsets metadata. -// Unsets prerelease status. -func (v Version) IncMinor() Version { - vNext := v - vNext.metadata = "" - vNext.pre = "" - vNext.patch = 0 - vNext.minor = v.minor + 1 - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext -} - -// IncMajor produces the next major version. -// Sets patch to 0. -// Sets minor to 0. -// Increments major number. -// Unsets metadata. -// Unsets prerelease status. -func (v Version) IncMajor() Version { - vNext := v - vNext.metadata = "" - vNext.pre = "" - vNext.patch = 0 - vNext.minor = 0 - vNext.major = v.major + 1 - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext -} - -// SetPrerelease defines the prerelease value. -// Value must not include the required 'hyphen' prefix. -func (v Version) SetPrerelease(prerelease string) (Version, error) { - vNext := v - if len(prerelease) > 0 { - if err := validatePrerelease(prerelease); err != nil { - return vNext, err - } - } - vNext.pre = prerelease - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext, nil -} - -// SetMetadata defines metadata value. -// Value must not include the required 'plus' prefix. -func (v Version) SetMetadata(metadata string) (Version, error) { - vNext := v - if len(metadata) > 0 { - if err := validateMetadata(metadata); err != nil { - return vNext, err - } - } - vNext.metadata = metadata - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext, nil -} - -// LessThan tests if one version is less than another one. -func (v *Version) LessThan(o *Version) bool { - return v.Compare(o) < 0 -} - -// GreaterThan tests if one version is greater than another one. -func (v *Version) GreaterThan(o *Version) bool { - return v.Compare(o) > 0 -} - -// Equal tests if two versions are equal to each other. -// Note, versions can be equal with different metadata since metadata -// is not considered part of the comparable version. -func (v *Version) Equal(o *Version) bool { - return v.Compare(o) == 0 -} - -// Compare compares this version to another one. It returns -1, 0, or 1 if -// the version smaller, equal, or larger than the other version. -// -// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is -// lower than the version without a prerelease. Compare always takes into account -// prereleases. If you want to work with ranges using typical range syntaxes that -// skip prereleases if the range is not looking for them use constraints. -func (v *Version) Compare(o *Version) int { - // Compare the major, minor, and patch version for differences. If a - // difference is found return the comparison. - if d := compareSegment(v.Major(), o.Major()); d != 0 { - return d - } - if d := compareSegment(v.Minor(), o.Minor()); d != 0 { - return d - } - if d := compareSegment(v.Patch(), o.Patch()); d != 0 { - return d - } - - // At this point the major, minor, and patch versions are the same. - ps := v.pre - po := o.Prerelease() - - if ps == "" && po == "" { - return 0 - } - if ps == "" { - return 1 - } - if po == "" { - return -1 - } - - return comparePrerelease(ps, po) -} - -// UnmarshalJSON implements JSON.Unmarshaler interface. -func (v *Version) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - temp, err := NewVersion(s) - if err != nil { - return err - } - v.major = temp.major - v.minor = temp.minor - v.patch = temp.patch - v.pre = temp.pre - v.metadata = temp.metadata - v.original = temp.original - return nil -} - -// MarshalJSON implements JSON.Marshaler interface. -func (v Version) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -func (v *Version) UnmarshalText(text []byte) error { - temp, err := NewVersion(string(text)) - if err != nil { - return err - } - - *v = *temp - - return nil -} - -// MarshalText implements the encoding.TextMarshaler interface. -func (v Version) MarshalText() ([]byte, error) { - return []byte(v.String()), nil -} - -// Scan implements the SQL.Scanner interface. -func (v *Version) Scan(value interface{}) error { - var s string - s, _ = value.(string) - temp, err := NewVersion(s) - if err != nil { - return err - } - v.major = temp.major - v.minor = temp.minor - v.patch = temp.patch - v.pre = temp.pre - v.metadata = temp.metadata - v.original = temp.original - return nil -} - -// Value implements the Driver.Valuer interface. -func (v Version) Value() (driver.Value, error) { - return v.String(), nil -} - -func compareSegment(v, o uint64) int { - if v < o { - return -1 - } - if v > o { - return 1 - } - - return 0 -} - -func comparePrerelease(v, o string) int { - // split the prelease versions by their part. The separator, per the spec, - // is a . - sparts := strings.Split(v, ".") - oparts := strings.Split(o, ".") - - // Find the longer length of the parts to know how many loop iterations to - // go through. - slen := len(sparts) - olen := len(oparts) - - l := slen - if olen > slen { - l = olen - } - - // Iterate over each part of the prereleases to compare the differences. - for i := 0; i < l; i++ { - // Since the lentgh of the parts can be different we need to create - // a placeholder. This is to avoid out of bounds issues. - stemp := "" - if i < slen { - stemp = sparts[i] - } - - otemp := "" - if i < olen { - otemp = oparts[i] - } - - d := comparePrePart(stemp, otemp) - if d != 0 { - return d - } - } - - // Reaching here means two versions are of equal value but have different - // metadata (the part following a +). They are not identical in string form - // but the version comparison finds them to be equal. - return 0 -} - -func comparePrePart(s, o string) int { - // Fastpath if they are equal - if s == o { - return 0 - } - - // When s or o are empty we can use the other in an attempt to determine - // the response. - if s == "" { - if o != "" { - return -1 - } - return 1 - } - - if o == "" { - if s != "" { - return 1 - } - return -1 - } - - // When comparing strings "99" is greater than "103". To handle - // cases like this we need to detect numbers and compare them. According - // to the semver spec, numbers are always positive. If there is a - at the - // start like -99 this is to be evaluated as an alphanum. numbers always - // have precedence over alphanum. Parsing as Uints because negative numbers - // are ignored. - - oi, n1 := strconv.ParseUint(o, 10, 64) - si, n2 := strconv.ParseUint(s, 10, 64) - - // The case where both are strings compare the strings - if n1 != nil && n2 != nil { - if s > o { - return 1 - } - return -1 - } else if n1 != nil { - // o is a string and s is a number - return -1 - } else if n2 != nil { - // s is a string and o is a number - return 1 - } - // Both are numbers - if si > oi { - return 1 - } - return -1 -} - -// Like strings.ContainsAny but does an only instead of any. -func containsOnly(s string, comp string) bool { - return strings.IndexFunc(s, func(r rune) bool { - return !strings.ContainsRune(comp, r) - }) == -1 -} - -// From the spec, "Identifiers MUST comprise only -// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. -// Numeric identifiers MUST NOT include leading zeroes.". These segments can -// be dot separated. -func validatePrerelease(p string) error { - eparts := strings.Split(p, ".") - for _, p := range eparts { - if containsOnly(p, num) { - if len(p) > 1 && p[0] == '0' { - return ErrSegmentStartsZero - } - } else if !containsOnly(p, allowed) { - return ErrInvalidPrerelease - } - } - - return nil -} - -// From the spec, "Build metadata MAY be denoted by -// appending a plus sign and a series of dot separated identifiers immediately -// following the patch or pre-release version. Identifiers MUST comprise only -// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty." -func validateMetadata(m string) error { - eparts := strings.Split(m, ".") - for _, p := range eparts { - if !containsOnly(p, allowed) { - return ErrInvalidMetadata - } - } - return nil -} diff --git a/vendor/github.com/Masterminds/sprig/v3/.gitignore b/vendor/github.com/Masterminds/sprig/v3/.gitignore deleted file mode 100644 index 5e3002f8..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -vendor/ -/.glide diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md deleted file mode 100644 index 2ce45dd4..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md +++ /dev/null @@ -1,383 +0,0 @@ -# Changelog - -## Release 3.2.3 (2022-11-29) - -### Changed - -- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) -- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) -- #353: Updated masterminds/semver which included bug fixes -- #354: Updated golang.org/x/crypto which included bug fixes - -## Release 3.2.2 (2021-02-04) - -This is a re-release of 3.2.1 to satisfy something with the Go module system. - -## Release 3.2.1 (2021-02-04) - -### Changed - -- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) - -## Release 3.2.0 (2020-12-14) - -### Added - -- #211: Added randInt function (thanks @kochurovro) -- #223: Added fromJson and mustFromJson functions (thanks @mholt) -- #242: Added a bcrypt function (thanks @robbiet480) -- #253: Added randBytes function (thanks @MikaelSmith) -- #254: Added dig function for dicts (thanks @nyarly) -- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) -- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) -- #268: Added and and all functions for testing conditions (thanks @phuslu) -- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf - (thanks @andrewmostello) -- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) -- #270: Extend certificate functions to handle non-RSA keys + add support for - ed25519 keys (thanks @misberner) - -### Changed - -- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer -- Using semver 3.1.1 and mergo 0.3.11 - -### Fixed - -- #249: Fix htmlDateInZone example (thanks @spawnia) - -NOTE: The dependency github.com/imdario/mergo reverted the breaking change in -0.3.9 via 0.3.10 release. - -## Release 3.1.0 (2020-04-16) - -NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 -that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. - -### Added - -- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) -- #224: Added duration filter (thanks @frebib) -- #205: Added `seq` function (thanks @thadc23) - -### Changed - -- #203: Unlambda functions with correct signature (thanks @muesli) -- #236: Updated the license formatting for GitHub display purposes -- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 - as it causes a breaking change for sprig. That issue is tracked at - https://github.com/imdario/mergo/issues/139 - -### Fixed - -- #229: Fix `seq` example in docs (thanks @kalmant) - -## Release 3.0.2 (2019-12-13) - -### Fixed - -- #220: Updating to semver v3.0.3 to fix issue with <= ranges -- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) - -## Release 3.0.1 (2019-12-08) - -### Fixed - -- #212: Updated semver fixing broken constraint checking with ^0.0 - -## Release 3.0.0 (2019-10-02) - -### Added - -- #187: Added durationRound function (thanks @yjp20) -- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) -- #193: Added toRawJson support (thanks @Dean-Coakley) -- #197: Added get support to dicts (thanks @Dean-Coakley) - -### Changed - -- #186: Moving dependency management to Go modules -- #186: Updated semver to v3. This has changes in the way ^ is handled -- #194: Updated documentation on merging and how it copies. Added example using deepCopy -- #196: trunc now supports negative values (thanks @Dean-Coakley) - -## Release 2.22.0 (2019-10-02) - -### Added - -- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) -- #195: Added deepCopy function for use with dicts - -### Changed - -- Updated merge and mergeOverwrite documentation to explain copying and how to - use deepCopy with it - -## Release 2.21.0 (2019-09-18) - -### Added - -- #122: Added encryptAES/decryptAES functions (thanks @n0madic) -- #128: Added toDecimal support (thanks @Dean-Coakley) -- #169: Added list contcat (thanks @astorath) -- #174: Added deepEqual function (thanks @bonifaido) -- #170: Added url parse and join functions (thanks @astorath) - -### Changed - -- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify - -### Fixed - -- #172: Fix semver wildcard example (thanks @piepmatz) -- #175: Fix dateInZone doc example (thanks @s3than) - -## Release 2.20.0 (2019-06-18) - -### Added - -- #164: Adding function to get unix epoch for a time (@mattfarina) -- #166: Adding tests for date_in_zone (@mattfarina) - -### Changed - -- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) -- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) -- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) - -### Fixed - -## Release 2.19.0 (2019-03-02) - -IMPORTANT: This release reverts a change from 2.18.0 - -In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. - -We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. - -### Changed - -- Fix substr panic 35fb796 (Alexey igrychev) -- Remove extra period 1eb7729 (Matthew Lorimor) -- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) -- README edits/fixes/suggestions 08fe136 (Lauri Apple) - - -## Release 2.18.0 (2019-02-12) - -### Added - -- Added mergeOverwrite function -- cryptographic functions that use secure random (see fe1de12) - -### Changed - -- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) -- Handle has for nil list 9c10885 (Daniel Cohen) -- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) -- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) -- Replace outdated goutils imports 01893d2 (Matthew Lorimor) -- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) -- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) - -### Fixed - -- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) -- Fix substr var names and comments d581f80 (Dean Coakley) -- Fix substr documentation 2737203 (Dean Coakley) - -## Release 2.17.1 (2019-01-03) - -### Fixed - -The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. - -## Release 2.17.0 (2019-01-03) - -### Added - -- adds alder32sum function and test 6908fc2 (marshallford) -- Added kebabcase function ca331a1 (Ilyes512) - -### Changed - -- Update goutils to 1.1.0 4e1125d (Matt Butcher) - -### Fixed - -- Fix 'has' documentation e3f2a85 (dean-coakley) -- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) -- fixes spelling errors... not sure how that happened 4cf188a (marshallford) - -## Release 2.16.0 (2018-08-13) - -### Added - -- add splitn function fccb0b0 (Helgi Þorbjörnsson) -- Add slice func df28ca7 (gongdo) -- Generate serial number a3bdffd (Cody Coons) -- Extract values of dict with values function df39312 (Lawrence Jones) - -### Changed - -- Modify panic message for list.slice ae38335 (gongdo) -- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) -- Remove duplicated documentation 1d97af1 (Matthew Fisher) -- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) - -### Fixed - -- Fix file permissions c5f40b5 (gongdo) -- Fix example for buildCustomCert 7779e0d (Tin Lam) - -## Release 2.15.0 (2018-04-02) - -### Added - -- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) -- #66: Add ternary function (thanks @binoculars) -- #67: Allow keys function to take multiple dicts (thanks @binoculars) -- #89: Added sha1sum to crypto function (thanks @benkeil) -- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) -- #92: Add travis testing for go 1.10 -- #93: Adding appveyor config for windows testing - -### Changed - -- #90: Updating to more recent dependencies -- #73: replace satori/go.uuid with google/uuid (thanks @petterw) - -### Fixed - -- #76: Fixed documentation typos (thanks @Thiht) -- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older - -## Release 2.14.1 (2017-12-01) - -### Fixed - -- #60: Fix typo in function name documentation (thanks @neil-ca-moore) -- #61: Removing line with {{ due to blocking github pages genertion -- #64: Update the list functions to handle int, string, and other slices for compatibility - -## Release 2.14.0 (2017-10-06) - -This new version of Sprig adds a set of functions for generating and working with SSL certificates. - -- `genCA` generates an SSL Certificate Authority -- `genSelfSignedCert` generates an SSL self-signed certificate -- `genSignedCert` generates an SSL certificate and key based on a given CA - -## Release 2.13.0 (2017-09-18) - -This release adds new functions, including: - -- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions -- `floor`, `ceil`, and `round` math functions -- `toDate` converts a string to a date -- `nindent` is just like `indent` but also prepends a new line -- `ago` returns the time from `time.Now` - -### Added - -- #40: Added basic regex functionality (thanks @alanquillin) -- #41: Added ceil floor and round functions (thanks @alanquillin) -- #48: Added toDate function (thanks @andreynering) -- #50: Added nindent function (thanks @binoculars) -- #46: Added ago function (thanks @slayer) - -### Changed - -- #51: Updated godocs to include new string functions (thanks @curtisallen) -- #49: Added ability to merge multiple dicts (thanks @binoculars) - -## Release 2.12.0 (2017-05-17) - -- `snakecase`, `camelcase`, and `shuffle` are three new string functions -- `fail` allows you to bail out of a template render when conditions are not met - -## Release 2.11.0 (2017-05-02) - -- Added `toJson` and `toPrettyJson` -- Added `merge` -- Refactored documentation - -## Release 2.10.0 (2017-03-15) - -- Added `semver` and `semverCompare` for Semantic Versions -- `list` replaces `tuple` -- Fixed issue with `join` -- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` - -## Release 2.9.0 (2017-02-23) - -- Added `splitList` to split a list -- Added crypto functions of `genPrivateKey` and `derivePassword` - -## Release 2.8.0 (2016-12-21) - -- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) -- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) - -## Release 2.7.0 (2016-12-01) - -- Added `sha256sum` to generate a hash of an input -- Added functions to convert a numeric or string to `int`, `int64`, `float64` - -## Release 2.6.0 (2016-10-03) - -- Added a `uuidv4` template function for generating UUIDs inside of a template. - -## Release 2.5.0 (2016-08-19) - -- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions -- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) -- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 - -## Release 2.4.0 (2016-08-16) - -- Adds two functions: `until` and `untilStep` - -## Release 2.3.0 (2016-06-21) - -- cat: Concatenate strings with whitespace separators. -- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" -- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" -- indent: Indent blocks of text in a way that is sensitive to "\n" characters. - -## Release 2.2.0 (2016-04-21) - -- Added a `genPrivateKey` function (Thanks @bacongobbler) - -## Release 2.1.0 (2016-03-30) - -- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. -- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. - -## Release 2.0.0 (2016-03-29) - -Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. - -- `min` complements `max` (formerly `biggest`) -- `empty` indicates that a value is the empty value for its type -- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` -- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` -- Date formatters have been added for HTML dates (as used in `date` input fields) -- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). - -## Release 1.2.0 (2016-02-01) - -- Added quote and squote -- Added b32enc and b32dec -- add now takes varargs -- biggest now takes varargs - -## Release 1.1.0 (2015-12-29) - -- Added #4: Added contains function. strings.Contains, but with the arguments - switched to simplify common pipelines. (thanks krancour) -- Added Travis-CI testing support - -## Release 1.0.0 (2015-12-23) - -- Initial release diff --git a/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt deleted file mode 100644 index f311b1ea..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2013-2020 Masterminds - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/sprig/v3/Makefile b/vendor/github.com/Masterminds/sprig/v3/Makefile deleted file mode 100644 index 78d409cd..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -.PHONY: test -test: - @echo "==> Running tests" - GO111MODULE=on go test -v - -.PHONY: test-cover -test-cover: - @echo "==> Running Tests with coverage" - GO111MODULE=on go test -cover . diff --git a/vendor/github.com/Masterminds/sprig/v3/README.md b/vendor/github.com/Masterminds/sprig/v3/README.md deleted file mode 100644 index 3e22c60e..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/README.md +++ /dev/null @@ -1,100 +0,0 @@ -# Sprig: Template functions for Go templates - -[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/sprig/v3) -[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/sprig)](https://goreportcard.com/report/github.com/Masterminds/sprig) -[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) -[![](https://github.com/Masterminds/sprig/workflows/Tests/badge.svg)](https://github.com/Masterminds/sprig/actions) - -The Go language comes with a [built-in template -language](http://golang.org/pkg/text/template/), but not -very many template functions. Sprig is a library that provides more than 100 commonly -used template functions. - -It is inspired by the template functions found in -[Twig](http://twig.sensiolabs.org/documentation) and in various -JavaScript libraries, such as [underscore.js](http://underscorejs.org/). - -## IMPORTANT NOTES - -Sprig leverages [mergo](https://github.com/imdario/mergo) to handle merges. In -its v0.3.9 release, there was a behavior change that impacts merging template -functions in sprig. It is currently recommended to use v0.3.10 or later of that package. -Using v0.3.9 will cause sprig tests to fail. - -## Package Versions - -There are two active major versions of the `sprig` package. - -* v3 is currently stable release series on the `master` branch. The Go API should - remain compatible with v2, the current stable version. Behavior change behind - some functions is the reason for the new major version. -* v2 is the previous stable release series. It has been more than three years since - the initial release of v2. You can read the documentation and see the code - on the [release-2](https://github.com/Masterminds/sprig/tree/release-2) branch. - Bug fixes to this major version will continue for some time. - -## Usage - -**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for -detailed instructions and code snippets for the >100 template functions available. - -**Go developers**: If you'd like to include Sprig as a library in your program, -our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig). - -For standard usage, read on. - -### Load the Sprig library - -To load the Sprig `FuncMap`: - -```go - -import ( - "github.com/Masterminds/sprig/v3" - "html/template" -) - -// This example illustrates that the FuncMap *must* be set before the -// templates themselves are loaded. -tpl := template.Must( - template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") -) - - -``` - -### Calling the functions inside of templates - -By convention, all functions are lowercase. This seems to follow the Go -idiom for template functions (as opposed to template methods, which are -TitleCase). For example, this: - -``` -{{ "hello!" | upper | repeat 5 }} -``` - -produces this: - -``` -HELLO!HELLO!HELLO!HELLO!HELLO! -``` - -## Principles Driving Our Function Selection - -We followed these principles to decide which functions to add and how to implement them: - -- Use template functions to build layout. The following - types of operations are within the domain of template functions: - - Formatting - - Layout - - Simple type conversions - - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) -- Template functions should not return errors unless there is no way to print - a sensible value. For example, converting a string to an integer should not - produce an error if conversion fails. Instead, it should display a default - value. -- Simple math is necessary for grid layouts, pagers, and so on. Complex math - (anything other than arithmetic) should be done outside of templates. -- Template functions only deal with the data passed into them. They never retrieve - data from a source. -- Finally, do not override core Go template functions. diff --git a/vendor/github.com/Masterminds/sprig/v3/crypto.go b/vendor/github.com/Masterminds/sprig/v3/crypto.go deleted file mode 100644 index 13a5cd55..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/crypto.go +++ /dev/null @@ -1,653 +0,0 @@ -package sprig - -import ( - "bytes" - "crypto" - "crypto/aes" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/hmac" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/base64" - "encoding/binary" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "hash/adler32" - "io" - "math/big" - "net" - "time" - - "strings" - - "github.com/google/uuid" - bcrypt_lib "golang.org/x/crypto/bcrypt" - "golang.org/x/crypto/scrypt" -) - -func sha256sum(input string) string { - hash := sha256.Sum256([]byte(input)) - return hex.EncodeToString(hash[:]) -} - -func sha1sum(input string) string { - hash := sha1.Sum([]byte(input)) - return hex.EncodeToString(hash[:]) -} - -func adler32sum(input string) string { - hash := adler32.Checksum([]byte(input)) - return fmt.Sprintf("%d", hash) -} - -func bcrypt(input string) string { - hash, err := bcrypt_lib.GenerateFromPassword([]byte(input), bcrypt_lib.DefaultCost) - if err != nil { - return fmt.Sprintf("failed to encrypt string with bcrypt: %s", err) - } - - return string(hash) -} - -func htpasswd(username string, password string) string { - if strings.Contains(username, ":") { - return fmt.Sprintf("invalid username: %s", username) - } - return fmt.Sprintf("%s:%s", username, bcrypt(password)) -} - -func randBytes(count int) (string, error) { - buf := make([]byte, count) - if _, err := rand.Read(buf); err != nil { - return "", err - } - return base64.StdEncoding.EncodeToString(buf), nil -} - -// uuidv4 provides a safe and secure UUID v4 implementation -func uuidv4() string { - return uuid.New().String() -} - -var masterPasswordSeed = "com.lyndir.masterpassword" - -var passwordTypeTemplates = map[string][][]byte{ - "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")}, - "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"), - []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"), - []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"), - []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"), - []byte("CvccCvcvCvccno")}, - "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")}, - "short": {[]byte("Cvcn")}, - "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")}, - "pin": {[]byte("nnnn")}, -} - -var templateCharacters = map[byte]string{ - 'V': "AEIOU", - 'C': "BCDFGHJKLMNPQRSTVWXYZ", - 'v': "aeiou", - 'c': "bcdfghjklmnpqrstvwxyz", - 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ", - 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz", - 'n': "0123456789", - 'o': "@&%?,=[]_:-+*$#!'^~;()/.", - 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()", -} - -func derivePassword(counter uint32, passwordType, password, user, site string) string { - var templates = passwordTypeTemplates[passwordType] - if templates == nil { - return fmt.Sprintf("cannot find password template %s", passwordType) - } - - var buffer bytes.Buffer - buffer.WriteString(masterPasswordSeed) - binary.Write(&buffer, binary.BigEndian, uint32(len(user))) - buffer.WriteString(user) - - salt := buffer.Bytes() - key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64) - if err != nil { - return fmt.Sprintf("failed to derive password: %s", err) - } - - buffer.Truncate(len(masterPasswordSeed)) - binary.Write(&buffer, binary.BigEndian, uint32(len(site))) - buffer.WriteString(site) - binary.Write(&buffer, binary.BigEndian, counter) - - var hmacv = hmac.New(sha256.New, key) - hmacv.Write(buffer.Bytes()) - var seed = hmacv.Sum(nil) - var temp = templates[int(seed[0])%len(templates)] - - buffer.Truncate(0) - for i, element := range temp { - passChars := templateCharacters[element] - passChar := passChars[int(seed[i+1])%len(passChars)] - buffer.WriteByte(passChar) - } - - return buffer.String() -} - -func generatePrivateKey(typ string) string { - var priv interface{} - var err error - switch typ { - case "", "rsa": - // good enough for government work - priv, err = rsa.GenerateKey(rand.Reader, 4096) - case "dsa": - key := new(dsa.PrivateKey) - // again, good enough for government work - if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil { - return fmt.Sprintf("failed to generate dsa params: %s", err) - } - err = dsa.GenerateKey(key, rand.Reader) - priv = key - case "ecdsa": - // again, good enough for government work - priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - case "ed25519": - _, priv, err = ed25519.GenerateKey(rand.Reader) - default: - return "Unknown type " + typ - } - if err != nil { - return fmt.Sprintf("failed to generate private key: %s", err) - } - - return string(pem.EncodeToMemory(pemBlockForKey(priv))) -} - -// DSAKeyFormat stores the format for DSA keys. -// Used by pemBlockForKey -type DSAKeyFormat struct { - Version int - P, Q, G, Y, X *big.Int -} - -func pemBlockForKey(priv interface{}) *pem.Block { - switch k := priv.(type) { - case *rsa.PrivateKey: - return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} - case *dsa.PrivateKey: - val := DSAKeyFormat{ - P: k.P, Q: k.Q, G: k.G, - Y: k.Y, X: k.X, - } - bytes, _ := asn1.Marshal(val) - return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes} - case *ecdsa.PrivateKey: - b, _ := x509.MarshalECPrivateKey(k) - return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} - default: - // attempt PKCS#8 format for all other keys - b, err := x509.MarshalPKCS8PrivateKey(k) - if err != nil { - return nil - } - return &pem.Block{Type: "PRIVATE KEY", Bytes: b} - } -} - -func parsePrivateKeyPEM(pemBlock string) (crypto.PrivateKey, error) { - block, _ := pem.Decode([]byte(pemBlock)) - if block == nil { - return nil, errors.New("no PEM data in input") - } - - if block.Type == "PRIVATE KEY" { - priv, err := x509.ParsePKCS8PrivateKey(block.Bytes) - if err != nil { - return nil, fmt.Errorf("decoding PEM as PKCS#8: %s", err) - } - return priv, nil - } else if !strings.HasSuffix(block.Type, " PRIVATE KEY") { - return nil, fmt.Errorf("no private key data in PEM block of type %s", block.Type) - } - - switch block.Type[:len(block.Type)-12] { // strip " PRIVATE KEY" - case "RSA": - priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - return nil, fmt.Errorf("parsing RSA private key from PEM: %s", err) - } - return priv, nil - case "EC": - priv, err := x509.ParseECPrivateKey(block.Bytes) - if err != nil { - return nil, fmt.Errorf("parsing EC private key from PEM: %s", err) - } - return priv, nil - case "DSA": - var k DSAKeyFormat - _, err := asn1.Unmarshal(block.Bytes, &k) - if err != nil { - return nil, fmt.Errorf("parsing DSA private key from PEM: %s", err) - } - priv := &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: k.P, Q: k.Q, G: k.G, - }, - Y: k.Y, - }, - X: k.X, - } - return priv, nil - default: - return nil, fmt.Errorf("invalid private key type %s", block.Type) - } -} - -func getPublicKey(priv crypto.PrivateKey) (crypto.PublicKey, error) { - switch k := priv.(type) { - case interface{ Public() crypto.PublicKey }: - return k.Public(), nil - case *dsa.PrivateKey: - return &k.PublicKey, nil - default: - return nil, fmt.Errorf("unable to get public key for type %T", priv) - } -} - -type certificate struct { - Cert string - Key string -} - -func buildCustomCertificate(b64cert string, b64key string) (certificate, error) { - crt := certificate{} - - cert, err := base64.StdEncoding.DecodeString(b64cert) - if err != nil { - return crt, errors.New("unable to decode base64 certificate") - } - - key, err := base64.StdEncoding.DecodeString(b64key) - if err != nil { - return crt, errors.New("unable to decode base64 private key") - } - - decodedCert, _ := pem.Decode(cert) - if decodedCert == nil { - return crt, errors.New("unable to decode certificate") - } - _, err = x509.ParseCertificate(decodedCert.Bytes) - if err != nil { - return crt, fmt.Errorf( - "error parsing certificate: decodedCert.Bytes: %s", - err, - ) - } - - _, err = parsePrivateKeyPEM(string(key)) - if err != nil { - return crt, fmt.Errorf( - "error parsing private key: %s", - err, - ) - } - - crt.Cert = string(cert) - crt.Key = string(key) - - return crt, nil -} - -func generateCertificateAuthority( - cn string, - daysValid int, -) (certificate, error) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return certificate{}, fmt.Errorf("error generating rsa key: %s", err) - } - - return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) -} - -func generateCertificateAuthorityWithPEMKey( - cn string, - daysValid int, - privPEM string, -) (certificate, error) { - priv, err := parsePrivateKeyPEM(privPEM) - if err != nil { - return certificate{}, fmt.Errorf("parsing private key: %s", err) - } - return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) -} - -func generateCertificateAuthorityWithKeyInternal( - cn string, - daysValid int, - priv crypto.PrivateKey, -) (certificate, error) { - ca := certificate{} - - template, err := getBaseCertTemplate(cn, nil, nil, daysValid) - if err != nil { - return ca, err - } - // Override KeyUsage and IsCA - template.KeyUsage = x509.KeyUsageKeyEncipherment | - x509.KeyUsageDigitalSignature | - x509.KeyUsageCertSign - template.IsCA = true - - ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) - - return ca, err -} - -func generateSelfSignedCertificate( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, -) (certificate, error) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return certificate{}, fmt.Errorf("error generating rsa key: %s", err) - } - return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) -} - -func generateSelfSignedCertificateWithPEMKey( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, - privPEM string, -) (certificate, error) { - priv, err := parsePrivateKeyPEM(privPEM) - if err != nil { - return certificate{}, fmt.Errorf("parsing private key: %s", err) - } - return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) -} - -func generateSelfSignedCertificateWithKeyInternal( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, - priv crypto.PrivateKey, -) (certificate, error) { - cert := certificate{} - - template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) - if err != nil { - return cert, err - } - - cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) - - return cert, err -} - -func generateSignedCertificate( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, - ca certificate, -) (certificate, error) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return certificate{}, fmt.Errorf("error generating rsa key: %s", err) - } - return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) -} - -func generateSignedCertificateWithPEMKey( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, - ca certificate, - privPEM string, -) (certificate, error) { - priv, err := parsePrivateKeyPEM(privPEM) - if err != nil { - return certificate{}, fmt.Errorf("parsing private key: %s", err) - } - return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) -} - -func generateSignedCertificateWithKeyInternal( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, - ca certificate, - priv crypto.PrivateKey, -) (certificate, error) { - cert := certificate{} - - decodedSignerCert, _ := pem.Decode([]byte(ca.Cert)) - if decodedSignerCert == nil { - return cert, errors.New("unable to decode certificate") - } - signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes) - if err != nil { - return cert, fmt.Errorf( - "error parsing certificate: decodedSignerCert.Bytes: %s", - err, - ) - } - signerKey, err := parsePrivateKeyPEM(ca.Key) - if err != nil { - return cert, fmt.Errorf( - "error parsing private key: %s", - err, - ) - } - - template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) - if err != nil { - return cert, err - } - - cert.Cert, cert.Key, err = getCertAndKey( - template, - priv, - signerCert, - signerKey, - ) - - return cert, err -} - -func getCertAndKey( - template *x509.Certificate, - signeeKey crypto.PrivateKey, - parent *x509.Certificate, - signingKey crypto.PrivateKey, -) (string, string, error) { - signeePubKey, err := getPublicKey(signeeKey) - if err != nil { - return "", "", fmt.Errorf("error retrieving public key from signee key: %s", err) - } - derBytes, err := x509.CreateCertificate( - rand.Reader, - template, - parent, - signeePubKey, - signingKey, - ) - if err != nil { - return "", "", fmt.Errorf("error creating certificate: %s", err) - } - - certBuffer := bytes.Buffer{} - if err := pem.Encode( - &certBuffer, - &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}, - ); err != nil { - return "", "", fmt.Errorf("error pem-encoding certificate: %s", err) - } - - keyBuffer := bytes.Buffer{} - if err := pem.Encode( - &keyBuffer, - pemBlockForKey(signeeKey), - ); err != nil { - return "", "", fmt.Errorf("error pem-encoding key: %s", err) - } - - return certBuffer.String(), keyBuffer.String(), nil -} - -func getBaseCertTemplate( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, -) (*x509.Certificate, error) { - ipAddresses, err := getNetIPs(ips) - if err != nil { - return nil, err - } - dnsNames, err := getAlternateDNSStrs(alternateDNS) - if err != nil { - return nil, err - } - serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound) - if err != nil { - return nil, err - } - return &x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - CommonName: cn, - }, - IPAddresses: ipAddresses, - DNSNames: dnsNames, - NotBefore: time.Now(), - NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageServerAuth, - x509.ExtKeyUsageClientAuth, - }, - BasicConstraintsValid: true, - }, nil -} - -func getNetIPs(ips []interface{}) ([]net.IP, error) { - if ips == nil { - return []net.IP{}, nil - } - var ipStr string - var ok bool - var netIP net.IP - netIPs := make([]net.IP, len(ips)) - for i, ip := range ips { - ipStr, ok = ip.(string) - if !ok { - return nil, fmt.Errorf("error parsing ip: %v is not a string", ip) - } - netIP = net.ParseIP(ipStr) - if netIP == nil { - return nil, fmt.Errorf("error parsing ip: %s", ipStr) - } - netIPs[i] = netIP - } - return netIPs, nil -} - -func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { - if alternateDNS == nil { - return []string{}, nil - } - var dnsStr string - var ok bool - alternateDNSStrs := make([]string, len(alternateDNS)) - for i, dns := range alternateDNS { - dnsStr, ok = dns.(string) - if !ok { - return nil, fmt.Errorf( - "error processing alternate dns name: %v is not a string", - dns, - ) - } - alternateDNSStrs[i] = dnsStr - } - return alternateDNSStrs, nil -} - -func encryptAES(password string, plaintext string) (string, error) { - if plaintext == "" { - return "", nil - } - - key := make([]byte, 32) - copy(key, []byte(password)) - block, err := aes.NewCipher(key) - if err != nil { - return "", err - } - - content := []byte(plaintext) - blockSize := block.BlockSize() - padding := blockSize - len(content)%blockSize - padtext := bytes.Repeat([]byte{byte(padding)}, padding) - content = append(content, padtext...) - - ciphertext := make([]byte, aes.BlockSize+len(content)) - - iv := ciphertext[:aes.BlockSize] - if _, err := io.ReadFull(rand.Reader, iv); err != nil { - return "", err - } - - mode := cipher.NewCBCEncrypter(block, iv) - mode.CryptBlocks(ciphertext[aes.BlockSize:], content) - - return base64.StdEncoding.EncodeToString(ciphertext), nil -} - -func decryptAES(password string, crypt64 string) (string, error) { - if crypt64 == "" { - return "", nil - } - - key := make([]byte, 32) - copy(key, []byte(password)) - - crypt, err := base64.StdEncoding.DecodeString(crypt64) - if err != nil { - return "", err - } - - block, err := aes.NewCipher(key) - if err != nil { - return "", err - } - - iv := crypt[:aes.BlockSize] - crypt = crypt[aes.BlockSize:] - decrypted := make([]byte, len(crypt)) - mode := cipher.NewCBCDecrypter(block, iv) - mode.CryptBlocks(decrypted, crypt) - - return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil -} diff --git a/vendor/github.com/Masterminds/sprig/v3/date.go b/vendor/github.com/Masterminds/sprig/v3/date.go deleted file mode 100644 index ed022dda..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/date.go +++ /dev/null @@ -1,152 +0,0 @@ -package sprig - -import ( - "strconv" - "time" -) - -// Given a format and a date, format the date string. -// -// Date can be a `time.Time` or an `int, int32, int64`. -// In the later case, it is treated as seconds since UNIX -// epoch. -func date(fmt string, date interface{}) string { - return dateInZone(fmt, date, "Local") -} - -func htmlDate(date interface{}) string { - return dateInZone("2006-01-02", date, "Local") -} - -func htmlDateInZone(date interface{}, zone string) string { - return dateInZone("2006-01-02", date, zone) -} - -func dateInZone(fmt string, date interface{}, zone string) string { - var t time.Time - switch date := date.(type) { - default: - t = time.Now() - case time.Time: - t = date - case *time.Time: - t = *date - case int64: - t = time.Unix(date, 0) - case int: - t = time.Unix(int64(date), 0) - case int32: - t = time.Unix(int64(date), 0) - } - - loc, err := time.LoadLocation(zone) - if err != nil { - loc, _ = time.LoadLocation("UTC") - } - - return t.In(loc).Format(fmt) -} - -func dateModify(fmt string, date time.Time) time.Time { - d, err := time.ParseDuration(fmt) - if err != nil { - return date - } - return date.Add(d) -} - -func mustDateModify(fmt string, date time.Time) (time.Time, error) { - d, err := time.ParseDuration(fmt) - if err != nil { - return time.Time{}, err - } - return date.Add(d), nil -} - -func dateAgo(date interface{}) string { - var t time.Time - - switch date := date.(type) { - default: - t = time.Now() - case time.Time: - t = date - case int64: - t = time.Unix(date, 0) - case int: - t = time.Unix(int64(date), 0) - } - // Drop resolution to seconds - duration := time.Since(t).Round(time.Second) - return duration.String() -} - -func duration(sec interface{}) string { - var n int64 - switch value := sec.(type) { - default: - n = 0 - case string: - n, _ = strconv.ParseInt(value, 10, 64) - case int64: - n = value - } - return (time.Duration(n) * time.Second).String() -} - -func durationRound(duration interface{}) string { - var d time.Duration - switch duration := duration.(type) { - default: - d = 0 - case string: - d, _ = time.ParseDuration(duration) - case int64: - d = time.Duration(duration) - case time.Time: - d = time.Since(duration) - } - - u := uint64(d) - neg := d < 0 - if neg { - u = -u - } - - var ( - year = uint64(time.Hour) * 24 * 365 - month = uint64(time.Hour) * 24 * 30 - day = uint64(time.Hour) * 24 - hour = uint64(time.Hour) - minute = uint64(time.Minute) - second = uint64(time.Second) - ) - switch { - case u > year: - return strconv.FormatUint(u/year, 10) + "y" - case u > month: - return strconv.FormatUint(u/month, 10) + "mo" - case u > day: - return strconv.FormatUint(u/day, 10) + "d" - case u > hour: - return strconv.FormatUint(u/hour, 10) + "h" - case u > minute: - return strconv.FormatUint(u/minute, 10) + "m" - case u > second: - return strconv.FormatUint(u/second, 10) + "s" - } - return "0s" -} - -func toDate(fmt, str string) time.Time { - t, _ := time.ParseInLocation(fmt, str, time.Local) - return t -} - -func mustToDate(fmt, str string) (time.Time, error) { - return time.ParseInLocation(fmt, str, time.Local) -} - -func unixEpoch(date time.Time) string { - return strconv.FormatInt(date.Unix(), 10) -} diff --git a/vendor/github.com/Masterminds/sprig/v3/defaults.go b/vendor/github.com/Masterminds/sprig/v3/defaults.go deleted file mode 100644 index b9f97966..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/defaults.go +++ /dev/null @@ -1,163 +0,0 @@ -package sprig - -import ( - "bytes" - "encoding/json" - "math/rand" - "reflect" - "strings" - "time" -) - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -// dfault checks whether `given` is set, and returns default if not set. -// -// This returns `d` if `given` appears not to be set, and `given` otherwise. -// -// For numeric types 0 is unset. -// For strings, maps, arrays, and slices, len() = 0 is considered unset. -// For bool, false is unset. -// Structs are never considered unset. -// -// For everything else, including pointers, a nil value is unset. -func dfault(d interface{}, given ...interface{}) interface{} { - - if empty(given) || empty(given[0]) { - return d - } - return given[0] -} - -// empty returns true if the given value has the zero value for its type. -func empty(given interface{}) bool { - g := reflect.ValueOf(given) - if !g.IsValid() { - return true - } - - // Basically adapted from text/template.isTrue - switch g.Kind() { - default: - return g.IsNil() - case reflect.Array, reflect.Slice, reflect.Map, reflect.String: - return g.Len() == 0 - case reflect.Bool: - return !g.Bool() - case reflect.Complex64, reflect.Complex128: - return g.Complex() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return g.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return g.Uint() == 0 - case reflect.Float32, reflect.Float64: - return g.Float() == 0 - case reflect.Struct: - return false - } -} - -// coalesce returns the first non-empty value. -func coalesce(v ...interface{}) interface{} { - for _, val := range v { - if !empty(val) { - return val - } - } - return nil -} - -// all returns true if empty(x) is false for all values x in the list. -// If the list is empty, return true. -func all(v ...interface{}) bool { - for _, val := range v { - if empty(val) { - return false - } - } - return true -} - -// any returns true if empty(x) is false for any x in the list. -// If the list is empty, return false. -func any(v ...interface{}) bool { - for _, val := range v { - if !empty(val) { - return true - } - } - return false -} - -// fromJson decodes JSON into a structured value, ignoring errors. -func fromJson(v string) interface{} { - output, _ := mustFromJson(v) - return output -} - -// mustFromJson decodes JSON into a structured value, returning errors. -func mustFromJson(v string) (interface{}, error) { - var output interface{} - err := json.Unmarshal([]byte(v), &output) - return output, err -} - -// toJson encodes an item into a JSON string -func toJson(v interface{}) string { - output, _ := json.Marshal(v) - return string(output) -} - -func mustToJson(v interface{}) (string, error) { - output, err := json.Marshal(v) - if err != nil { - return "", err - } - return string(output), nil -} - -// toPrettyJson encodes an item into a pretty (indented) JSON string -func toPrettyJson(v interface{}) string { - output, _ := json.MarshalIndent(v, "", " ") - return string(output) -} - -func mustToPrettyJson(v interface{}) (string, error) { - output, err := json.MarshalIndent(v, "", " ") - if err != nil { - return "", err - } - return string(output), nil -} - -// toRawJson encodes an item into a JSON string with no escaping of HTML characters. -func toRawJson(v interface{}) string { - output, err := mustToRawJson(v) - if err != nil { - panic(err) - } - return string(output) -} - -// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. -func mustToRawJson(v interface{}) (string, error) { - buf := new(bytes.Buffer) - enc := json.NewEncoder(buf) - enc.SetEscapeHTML(false) - err := enc.Encode(&v) - if err != nil { - return "", err - } - return strings.TrimSuffix(buf.String(), "\n"), nil -} - -// ternary returns the first value if the last value is true, otherwise returns the second value. -func ternary(vt interface{}, vf interface{}, v bool) interface{} { - if v { - return vt - } - - return vf -} diff --git a/vendor/github.com/Masterminds/sprig/v3/dict.go b/vendor/github.com/Masterminds/sprig/v3/dict.go deleted file mode 100644 index ade88969..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/dict.go +++ /dev/null @@ -1,174 +0,0 @@ -package sprig - -import ( - "github.com/imdario/mergo" - "github.com/mitchellh/copystructure" -) - -func get(d map[string]interface{}, key string) interface{} { - if val, ok := d[key]; ok { - return val - } - return "" -} - -func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { - d[key] = value - return d -} - -func unset(d map[string]interface{}, key string) map[string]interface{} { - delete(d, key) - return d -} - -func hasKey(d map[string]interface{}, key string) bool { - _, ok := d[key] - return ok -} - -func pluck(key string, d ...map[string]interface{}) []interface{} { - res := []interface{}{} - for _, dict := range d { - if val, ok := dict[key]; ok { - res = append(res, val) - } - } - return res -} - -func keys(dicts ...map[string]interface{}) []string { - k := []string{} - for _, dict := range dicts { - for key := range dict { - k = append(k, key) - } - } - return k -} - -func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { - res := map[string]interface{}{} - for _, k := range keys { - if v, ok := dict[k]; ok { - res[k] = v - } - } - return res -} - -func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { - res := map[string]interface{}{} - - omit := make(map[string]bool, len(keys)) - for _, k := range keys { - omit[k] = true - } - - for k, v := range dict { - if _, ok := omit[k]; !ok { - res[k] = v - } - } - return res -} - -func dict(v ...interface{}) map[string]interface{} { - dict := map[string]interface{}{} - lenv := len(v) - for i := 0; i < lenv; i += 2 { - key := strval(v[i]) - if i+1 >= lenv { - dict[key] = "" - continue - } - dict[key] = v[i+1] - } - return dict -} - -func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { - for _, src := range srcs { - if err := mergo.Merge(&dst, src); err != nil { - // Swallow errors inside of a template. - return "" - } - } - return dst -} - -func mustMerge(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { - for _, src := range srcs { - if err := mergo.Merge(&dst, src); err != nil { - return nil, err - } - } - return dst, nil -} - -func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { - for _, src := range srcs { - if err := mergo.MergeWithOverwrite(&dst, src); err != nil { - // Swallow errors inside of a template. - return "" - } - } - return dst -} - -func mustMergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { - for _, src := range srcs { - if err := mergo.MergeWithOverwrite(&dst, src); err != nil { - return nil, err - } - } - return dst, nil -} - -func values(dict map[string]interface{}) []interface{} { - values := []interface{}{} - for _, value := range dict { - values = append(values, value) - } - - return values -} - -func deepCopy(i interface{}) interface{} { - c, err := mustDeepCopy(i) - if err != nil { - panic("deepCopy error: " + err.Error()) - } - - return c -} - -func mustDeepCopy(i interface{}) (interface{}, error) { - return copystructure.Copy(i) -} - -func dig(ps ...interface{}) (interface{}, error) { - if len(ps) < 3 { - panic("dig needs at least three arguments") - } - dict := ps[len(ps)-1].(map[string]interface{}) - def := ps[len(ps)-2] - ks := make([]string, len(ps)-2) - for i := 0; i < len(ks); i++ { - ks[i] = ps[i].(string) - } - - return digFromDict(dict, def, ks) -} - -func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { - k, ns := ks[0], ks[1:len(ks)] - step, has := dict[k] - if !has { - return d, nil - } - if len(ns) == 0 { - return step, nil - } - return digFromDict(step.(map[string]interface{}), d, ns) -} diff --git a/vendor/github.com/Masterminds/sprig/v3/doc.go b/vendor/github.com/Masterminds/sprig/v3/doc.go deleted file mode 100644 index aabb9d44..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Package sprig provides template functions for Go. - -This package contains a number of utility functions for working with data -inside of Go `html/template` and `text/template` files. - -To add these functions, use the `template.Funcs()` method: - - t := templates.New("foo").Funcs(sprig.FuncMap()) - -Note that you should add the function map before you parse any template files. - - In several cases, Sprig reverses the order of arguments from the way they - appear in the standard library. This is to make it easier to pipe - arguments into functions. - -See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. -*/ -package sprig diff --git a/vendor/github.com/Masterminds/sprig/v3/functions.go b/vendor/github.com/Masterminds/sprig/v3/functions.go deleted file mode 100644 index 57fcec1d..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/functions.go +++ /dev/null @@ -1,382 +0,0 @@ -package sprig - -import ( - "errors" - "html/template" - "math/rand" - "os" - "path" - "path/filepath" - "reflect" - "strconv" - "strings" - ttemplate "text/template" - "time" - - util "github.com/Masterminds/goutils" - "github.com/huandu/xstrings" - "github.com/shopspring/decimal" -) - -// FuncMap produces the function map. -// -// Use this to pass the functions into the template engine: -// -// tpl := template.New("foo").Funcs(sprig.FuncMap())) -// -func FuncMap() template.FuncMap { - return HtmlFuncMap() -} - -// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. -func HermeticTxtFuncMap() ttemplate.FuncMap { - r := TxtFuncMap() - for _, name := range nonhermeticFunctions { - delete(r, name) - } - return r -} - -// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. -func HermeticHtmlFuncMap() template.FuncMap { - r := HtmlFuncMap() - for _, name := range nonhermeticFunctions { - delete(r, name) - } - return r -} - -// TxtFuncMap returns a 'text/template'.FuncMap -func TxtFuncMap() ttemplate.FuncMap { - return ttemplate.FuncMap(GenericFuncMap()) -} - -// HtmlFuncMap returns an 'html/template'.Funcmap -func HtmlFuncMap() template.FuncMap { - return template.FuncMap(GenericFuncMap()) -} - -// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. -func GenericFuncMap() map[string]interface{} { - gfm := make(map[string]interface{}, len(genericMap)) - for k, v := range genericMap { - gfm[k] = v - } - return gfm -} - -// These functions are not guaranteed to evaluate to the same result for given input, because they -// refer to the environment or global state. -var nonhermeticFunctions = []string{ - // Date functions - "date", - "date_in_zone", - "date_modify", - "now", - "htmlDate", - "htmlDateInZone", - "dateInZone", - "dateModify", - - // Strings - "randAlphaNum", - "randAlpha", - "randAscii", - "randNumeric", - "randBytes", - "uuidv4", - - // OS - "env", - "expandenv", - - // Network - "getHostByName", -} - -var genericMap = map[string]interface{}{ - "hello": func() string { return "Hello!" }, - - // Date functions - "ago": dateAgo, - "date": date, - "date_in_zone": dateInZone, - "date_modify": dateModify, - "dateInZone": dateInZone, - "dateModify": dateModify, - "duration": duration, - "durationRound": durationRound, - "htmlDate": htmlDate, - "htmlDateInZone": htmlDateInZone, - "must_date_modify": mustDateModify, - "mustDateModify": mustDateModify, - "mustToDate": mustToDate, - "now": time.Now, - "toDate": toDate, - "unixEpoch": unixEpoch, - - // Strings - "abbrev": abbrev, - "abbrevboth": abbrevboth, - "trunc": trunc, - "trim": strings.TrimSpace, - "upper": strings.ToUpper, - "lower": strings.ToLower, - "title": strings.Title, - "untitle": untitle, - "substr": substring, - // Switch order so that "foo" | repeat 5 - "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, - // Deprecated: Use trimAll. - "trimall": func(a, b string) string { return strings.Trim(b, a) }, - // Switch order so that "$foo" | trimall "$" - "trimAll": func(a, b string) string { return strings.Trim(b, a) }, - "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, - "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, - "nospace": util.DeleteWhiteSpace, - "initials": initials, - "randAlphaNum": randAlphaNumeric, - "randAlpha": randAlpha, - "randAscii": randAscii, - "randNumeric": randNumeric, - "swapcase": util.SwapCase, - "shuffle": xstrings.Shuffle, - "snakecase": xstrings.ToSnakeCase, - "camelcase": xstrings.ToCamelCase, - "kebabcase": xstrings.ToKebabCase, - "wrap": func(l int, s string) string { return util.Wrap(s, l) }, - "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, - // Switch order so that "foobar" | contains "foo" - "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, - "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, - "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, - "quote": quote, - "squote": squote, - "cat": cat, - "indent": indent, - "nindent": nindent, - "replace": replace, - "plural": plural, - "sha1sum": sha1sum, - "sha256sum": sha256sum, - "adler32sum": adler32sum, - "toString": strval, - - // Wrap Atoi to stop errors. - "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, - "int64": toInt64, - "int": toInt, - "float64": toFloat64, - "seq": seq, - "toDecimal": toDecimal, - - //"gt": func(a, b int) bool {return a > b}, - //"gte": func(a, b int) bool {return a >= b}, - //"lt": func(a, b int) bool {return a < b}, - //"lte": func(a, b int) bool {return a <= b}, - - // split "/" foo/bar returns map[int]string{0: foo, 1: bar} - "split": split, - "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, - // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} - "splitn": splitn, - "toStrings": strslice, - - "until": until, - "untilStep": untilStep, - - // VERY basic arithmetic. - "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, - "add": func(i ...interface{}) int64 { - var a int64 = 0 - for _, b := range i { - a += toInt64(b) - } - return a - }, - "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, - "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, - "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, - "mul": func(a interface{}, v ...interface{}) int64 { - val := toInt64(a) - for _, b := range v { - val = val * toInt64(b) - } - return val - }, - "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, - "add1f": func(i interface{}) float64 { - return execDecimalOp(i, []interface{}{1}, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) - }, - "addf": func(i ...interface{}) float64 { - a := interface{}(float64(0)) - return execDecimalOp(a, i, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) - }, - "subf": func(a interface{}, v ...interface{}) float64 { - return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Sub(d2) }) - }, - "divf": func(a interface{}, v ...interface{}) float64 { - return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Div(d2) }) - }, - "mulf": func(a interface{}, v ...interface{}) float64 { - return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Mul(d2) }) - }, - "biggest": max, - "max": max, - "min": min, - "maxf": maxf, - "minf": minf, - "ceil": ceil, - "floor": floor, - "round": round, - - // string slices. Note that we reverse the order b/c that's better - // for template processing. - "join": join, - "sortAlpha": sortAlpha, - - // Defaults - "default": dfault, - "empty": empty, - "coalesce": coalesce, - "all": all, - "any": any, - "compact": compact, - "mustCompact": mustCompact, - "fromJson": fromJson, - "toJson": toJson, - "toPrettyJson": toPrettyJson, - "toRawJson": toRawJson, - "mustFromJson": mustFromJson, - "mustToJson": mustToJson, - "mustToPrettyJson": mustToPrettyJson, - "mustToRawJson": mustToRawJson, - "ternary": ternary, - "deepCopy": deepCopy, - "mustDeepCopy": mustDeepCopy, - - // Reflection - "typeOf": typeOf, - "typeIs": typeIs, - "typeIsLike": typeIsLike, - "kindOf": kindOf, - "kindIs": kindIs, - "deepEqual": reflect.DeepEqual, - - // OS: - "env": os.Getenv, - "expandenv": os.ExpandEnv, - - // Network: - "getHostByName": getHostByName, - - // Paths: - "base": path.Base, - "dir": path.Dir, - "clean": path.Clean, - "ext": path.Ext, - "isAbs": path.IsAbs, - - // Filepaths: - "osBase": filepath.Base, - "osClean": filepath.Clean, - "osDir": filepath.Dir, - "osExt": filepath.Ext, - "osIsAbs": filepath.IsAbs, - - // Encoding: - "b64enc": base64encode, - "b64dec": base64decode, - "b32enc": base32encode, - "b32dec": base32decode, - - // Data Structures: - "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. - "list": list, - "dict": dict, - "get": get, - "set": set, - "unset": unset, - "hasKey": hasKey, - "pluck": pluck, - "keys": keys, - "pick": pick, - "omit": omit, - "merge": merge, - "mergeOverwrite": mergeOverwrite, - "mustMerge": mustMerge, - "mustMergeOverwrite": mustMergeOverwrite, - "values": values, - - "append": push, "push": push, - "mustAppend": mustPush, "mustPush": mustPush, - "prepend": prepend, - "mustPrepend": mustPrepend, - "first": first, - "mustFirst": mustFirst, - "rest": rest, - "mustRest": mustRest, - "last": last, - "mustLast": mustLast, - "initial": initial, - "mustInitial": mustInitial, - "reverse": reverse, - "mustReverse": mustReverse, - "uniq": uniq, - "mustUniq": mustUniq, - "without": without, - "mustWithout": mustWithout, - "has": has, - "mustHas": mustHas, - "slice": slice, - "mustSlice": mustSlice, - "concat": concat, - "dig": dig, - "chunk": chunk, - "mustChunk": mustChunk, - - // Crypto: - "bcrypt": bcrypt, - "htpasswd": htpasswd, - "genPrivateKey": generatePrivateKey, - "derivePassword": derivePassword, - "buildCustomCert": buildCustomCertificate, - "genCA": generateCertificateAuthority, - "genCAWithKey": generateCertificateAuthorityWithPEMKey, - "genSelfSignedCert": generateSelfSignedCertificate, - "genSelfSignedCertWithKey": generateSelfSignedCertificateWithPEMKey, - "genSignedCert": generateSignedCertificate, - "genSignedCertWithKey": generateSignedCertificateWithPEMKey, - "encryptAES": encryptAES, - "decryptAES": decryptAES, - "randBytes": randBytes, - - // UUIDs: - "uuidv4": uuidv4, - - // SemVer: - "semver": semver, - "semverCompare": semverCompare, - - // Flow Control: - "fail": func(msg string) (string, error) { return "", errors.New(msg) }, - - // Regex - "regexMatch": regexMatch, - "mustRegexMatch": mustRegexMatch, - "regexFindAll": regexFindAll, - "mustRegexFindAll": mustRegexFindAll, - "regexFind": regexFind, - "mustRegexFind": mustRegexFind, - "regexReplaceAll": regexReplaceAll, - "mustRegexReplaceAll": mustRegexReplaceAll, - "regexReplaceAllLiteral": regexReplaceAllLiteral, - "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, - "regexSplit": regexSplit, - "mustRegexSplit": mustRegexSplit, - "regexQuoteMeta": regexQuoteMeta, - - // URLs: - "urlParse": urlParse, - "urlJoin": urlJoin, -} diff --git a/vendor/github.com/Masterminds/sprig/v3/list.go b/vendor/github.com/Masterminds/sprig/v3/list.go deleted file mode 100644 index ca0fbb78..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/list.go +++ /dev/null @@ -1,464 +0,0 @@ -package sprig - -import ( - "fmt" - "math" - "reflect" - "sort" -) - -// Reflection is used in these functions so that slices and arrays of strings, -// ints, and other types not implementing []interface{} can be worked with. -// For example, this is useful if you need to work on the output of regexs. - -func list(v ...interface{}) []interface{} { - return v -} - -func push(list interface{}, v interface{}) []interface{} { - l, err := mustPush(list, v) - if err != nil { - panic(err) - } - - return l -} - -func mustPush(list interface{}, v interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - nl := make([]interface{}, l) - for i := 0; i < l; i++ { - nl[i] = l2.Index(i).Interface() - } - - return append(nl, v), nil - - default: - return nil, fmt.Errorf("Cannot push on type %s", tp) - } -} - -func prepend(list interface{}, v interface{}) []interface{} { - l, err := mustPrepend(list, v) - if err != nil { - panic(err) - } - - return l -} - -func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { - //return append([]interface{}{v}, list...) - - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - nl := make([]interface{}, l) - for i := 0; i < l; i++ { - nl[i] = l2.Index(i).Interface() - } - - return append([]interface{}{v}, nl...), nil - - default: - return nil, fmt.Errorf("Cannot prepend on type %s", tp) - } -} - -func chunk(size int, list interface{}) [][]interface{} { - l, err := mustChunk(size, list) - if err != nil { - panic(err) - } - - return l -} - -func mustChunk(size int, list interface{}) ([][]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - - cs := int(math.Floor(float64(l-1)/float64(size)) + 1) - nl := make([][]interface{}, cs) - - for i := 0; i < cs; i++ { - clen := size - if i == cs-1 { - clen = int(math.Floor(math.Mod(float64(l), float64(size)))) - if clen == 0 { - clen = size - } - } - - nl[i] = make([]interface{}, clen) - - for j := 0; j < clen; j++ { - ix := i*size + j - nl[i][j] = l2.Index(ix).Interface() - } - } - - return nl, nil - - default: - return nil, fmt.Errorf("Cannot chunk type %s", tp) - } -} - -func last(list interface{}) interface{} { - l, err := mustLast(list) - if err != nil { - panic(err) - } - - return l -} - -func mustLast(list interface{}) (interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil, nil - } - - return l2.Index(l - 1).Interface(), nil - default: - return nil, fmt.Errorf("Cannot find last on type %s", tp) - } -} - -func first(list interface{}) interface{} { - l, err := mustFirst(list) - if err != nil { - panic(err) - } - - return l -} - -func mustFirst(list interface{}) (interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil, nil - } - - return l2.Index(0).Interface(), nil - default: - return nil, fmt.Errorf("Cannot find first on type %s", tp) - } -} - -func rest(list interface{}) []interface{} { - l, err := mustRest(list) - if err != nil { - panic(err) - } - - return l -} - -func mustRest(list interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil, nil - } - - nl := make([]interface{}, l-1) - for i := 1; i < l; i++ { - nl[i-1] = l2.Index(i).Interface() - } - - return nl, nil - default: - return nil, fmt.Errorf("Cannot find rest on type %s", tp) - } -} - -func initial(list interface{}) []interface{} { - l, err := mustInitial(list) - if err != nil { - panic(err) - } - - return l -} - -func mustInitial(list interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil, nil - } - - nl := make([]interface{}, l-1) - for i := 0; i < l-1; i++ { - nl[i] = l2.Index(i).Interface() - } - - return nl, nil - default: - return nil, fmt.Errorf("Cannot find initial on type %s", tp) - } -} - -func sortAlpha(list interface{}) []string { - k := reflect.Indirect(reflect.ValueOf(list)).Kind() - switch k { - case reflect.Slice, reflect.Array: - a := strslice(list) - s := sort.StringSlice(a) - s.Sort() - return s - } - return []string{strval(list)} -} - -func reverse(v interface{}) []interface{} { - l, err := mustReverse(v) - if err != nil { - panic(err) - } - - return l -} - -func mustReverse(v interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(v).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(v) - - l := l2.Len() - // We do not sort in place because the incoming array should not be altered. - nl := make([]interface{}, l) - for i := 0; i < l; i++ { - nl[l-i-1] = l2.Index(i).Interface() - } - - return nl, nil - default: - return nil, fmt.Errorf("Cannot find reverse on type %s", tp) - } -} - -func compact(list interface{}) []interface{} { - l, err := mustCompact(list) - if err != nil { - panic(err) - } - - return l -} - -func mustCompact(list interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - nl := []interface{}{} - var item interface{} - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if !empty(item) { - nl = append(nl, item) - } - } - - return nl, nil - default: - return nil, fmt.Errorf("Cannot compact on type %s", tp) - } -} - -func uniq(list interface{}) []interface{} { - l, err := mustUniq(list) - if err != nil { - panic(err) - } - - return l -} - -func mustUniq(list interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - dest := []interface{}{} - var item interface{} - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if !inList(dest, item) { - dest = append(dest, item) - } - } - - return dest, nil - default: - return nil, fmt.Errorf("Cannot find uniq on type %s", tp) - } -} - -func inList(haystack []interface{}, needle interface{}) bool { - for _, h := range haystack { - if reflect.DeepEqual(needle, h) { - return true - } - } - return false -} - -func without(list interface{}, omit ...interface{}) []interface{} { - l, err := mustWithout(list, omit...) - if err != nil { - panic(err) - } - - return l -} - -func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - res := []interface{}{} - var item interface{} - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if !inList(omit, item) { - res = append(res, item) - } - } - - return res, nil - default: - return nil, fmt.Errorf("Cannot find without on type %s", tp) - } -} - -func has(needle interface{}, haystack interface{}) bool { - l, err := mustHas(needle, haystack) - if err != nil { - panic(err) - } - - return l -} - -func mustHas(needle interface{}, haystack interface{}) (bool, error) { - if haystack == nil { - return false, nil - } - tp := reflect.TypeOf(haystack).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(haystack) - var item interface{} - l := l2.Len() - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if reflect.DeepEqual(needle, item) { - return true, nil - } - } - - return false, nil - default: - return false, fmt.Errorf("Cannot find has on type %s", tp) - } -} - -// $list := [1, 2, 3, 4, 5] -// slice $list -> list[0:5] = list[:] -// slice $list 0 3 -> list[0:3] = list[:3] -// slice $list 3 5 -> list[3:5] -// slice $list 3 -> list[3:5] = list[3:] -func slice(list interface{}, indices ...interface{}) interface{} { - l, err := mustSlice(list, indices...) - if err != nil { - panic(err) - } - - return l -} - -func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil, nil - } - - var start, end int - if len(indices) > 0 { - start = toInt(indices[0]) - } - if len(indices) < 2 { - end = l - } else { - end = toInt(indices[1]) - } - - return l2.Slice(start, end).Interface(), nil - default: - return nil, fmt.Errorf("list should be type of slice or array but %s", tp) - } -} - -func concat(lists ...interface{}) interface{} { - var res []interface{} - for _, list := range lists { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - for i := 0; i < l2.Len(); i++ { - res = append(res, l2.Index(i).Interface()) - } - default: - panic(fmt.Sprintf("Cannot concat type %s as list", tp)) - } - } - return res -} diff --git a/vendor/github.com/Masterminds/sprig/v3/network.go b/vendor/github.com/Masterminds/sprig/v3/network.go deleted file mode 100644 index 108d78a9..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/network.go +++ /dev/null @@ -1,12 +0,0 @@ -package sprig - -import ( - "math/rand" - "net" -) - -func getHostByName(name string) string { - addrs, _ := net.LookupHost(name) - //TODO: add error handing when release v3 comes out - return addrs[rand.Intn(len(addrs))] -} diff --git a/vendor/github.com/Masterminds/sprig/v3/numeric.go b/vendor/github.com/Masterminds/sprig/v3/numeric.go deleted file mode 100644 index f68e4182..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/numeric.go +++ /dev/null @@ -1,186 +0,0 @@ -package sprig - -import ( - "fmt" - "math" - "strconv" - "strings" - - "github.com/spf13/cast" - "github.com/shopspring/decimal" -) - -// toFloat64 converts 64-bit floats -func toFloat64(v interface{}) float64 { - return cast.ToFloat64(v) -} - -func toInt(v interface{}) int { - return cast.ToInt(v) -} - -// toInt64 converts integer types to 64-bit integers -func toInt64(v interface{}) int64 { - return cast.ToInt64(v) -} - -func max(a interface{}, i ...interface{}) int64 { - aa := toInt64(a) - for _, b := range i { - bb := toInt64(b) - if bb > aa { - aa = bb - } - } - return aa -} - -func maxf(a interface{}, i ...interface{}) float64 { - aa := toFloat64(a) - for _, b := range i { - bb := toFloat64(b) - aa = math.Max(aa, bb) - } - return aa -} - -func min(a interface{}, i ...interface{}) int64 { - aa := toInt64(a) - for _, b := range i { - bb := toInt64(b) - if bb < aa { - aa = bb - } - } - return aa -} - -func minf(a interface{}, i ...interface{}) float64 { - aa := toFloat64(a) - for _, b := range i { - bb := toFloat64(b) - aa = math.Min(aa, bb) - } - return aa -} - -func until(count int) []int { - step := 1 - if count < 0 { - step = -1 - } - return untilStep(0, count, step) -} - -func untilStep(start, stop, step int) []int { - v := []int{} - - if stop < start { - if step >= 0 { - return v - } - for i := start; i > stop; i += step { - v = append(v, i) - } - return v - } - - if step <= 0 { - return v - } - for i := start; i < stop; i += step { - v = append(v, i) - } - return v -} - -func floor(a interface{}) float64 { - aa := toFloat64(a) - return math.Floor(aa) -} - -func ceil(a interface{}) float64 { - aa := toFloat64(a) - return math.Ceil(aa) -} - -func round(a interface{}, p int, rOpt ...float64) float64 { - roundOn := .5 - if len(rOpt) > 0 { - roundOn = rOpt[0] - } - val := toFloat64(a) - places := toFloat64(p) - - var round float64 - pow := math.Pow(10, places) - digit := pow * val - _, div := math.Modf(digit) - if div >= roundOn { - round = math.Ceil(digit) - } else { - round = math.Floor(digit) - } - return round / pow -} - -// converts unix octal to decimal -func toDecimal(v interface{}) int64 { - result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) - if err != nil { - return 0 - } - return result -} - -func seq(params ...int) string { - increment := 1 - switch len(params) { - case 0: - return "" - case 1: - start := 1 - end := params[0] - if end < start { - increment = -1 - } - return intArrayToString(untilStep(start, end+increment, increment), " ") - case 3: - start := params[0] - end := params[2] - step := params[1] - if end < start { - increment = -1 - if step > 0 { - return "" - } - } - return intArrayToString(untilStep(start, end+increment, step), " ") - case 2: - start := params[0] - end := params[1] - step := 1 - if end < start { - step = -1 - } - return intArrayToString(untilStep(start, end+step, step), " ") - default: - return "" - } -} - -func intArrayToString(slice []int, delimeter string) string { - return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") -} - -// performs a float and subsequent decimal.Decimal conversion on inputs, -// and iterates through a and b executing the mathmetical operation f -func execDecimalOp(a interface{}, b []interface{}, f func(d1, d2 decimal.Decimal) decimal.Decimal) float64 { - prt := decimal.NewFromFloat(toFloat64(a)) - for _, x := range b { - dx := decimal.NewFromFloat(toFloat64(x)) - prt = f(prt, dx) - } - rslt, _ := prt.Float64() - return rslt -} diff --git a/vendor/github.com/Masterminds/sprig/v3/reflect.go b/vendor/github.com/Masterminds/sprig/v3/reflect.go deleted file mode 100644 index 8a65c132..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/reflect.go +++ /dev/null @@ -1,28 +0,0 @@ -package sprig - -import ( - "fmt" - "reflect" -) - -// typeIs returns true if the src is the type named in target. -func typeIs(target string, src interface{}) bool { - return target == typeOf(src) -} - -func typeIsLike(target string, src interface{}) bool { - t := typeOf(src) - return target == t || "*"+target == t -} - -func typeOf(src interface{}) string { - return fmt.Sprintf("%T", src) -} - -func kindIs(target string, src interface{}) bool { - return target == kindOf(src) -} - -func kindOf(src interface{}) string { - return reflect.ValueOf(src).Kind().String() -} diff --git a/vendor/github.com/Masterminds/sprig/v3/regex.go b/vendor/github.com/Masterminds/sprig/v3/regex.go deleted file mode 100644 index fab55101..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/regex.go +++ /dev/null @@ -1,83 +0,0 @@ -package sprig - -import ( - "regexp" -) - -func regexMatch(regex string, s string) bool { - match, _ := regexp.MatchString(regex, s) - return match -} - -func mustRegexMatch(regex string, s string) (bool, error) { - return regexp.MatchString(regex, s) -} - -func regexFindAll(regex string, s string, n int) []string { - r := regexp.MustCompile(regex) - return r.FindAllString(s, n) -} - -func mustRegexFindAll(regex string, s string, n int) ([]string, error) { - r, err := regexp.Compile(regex) - if err != nil { - return []string{}, err - } - return r.FindAllString(s, n), nil -} - -func regexFind(regex string, s string) string { - r := regexp.MustCompile(regex) - return r.FindString(s) -} - -func mustRegexFind(regex string, s string) (string, error) { - r, err := regexp.Compile(regex) - if err != nil { - return "", err - } - return r.FindString(s), nil -} - -func regexReplaceAll(regex string, s string, repl string) string { - r := regexp.MustCompile(regex) - return r.ReplaceAllString(s, repl) -} - -func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { - r, err := regexp.Compile(regex) - if err != nil { - return "", err - } - return r.ReplaceAllString(s, repl), nil -} - -func regexReplaceAllLiteral(regex string, s string, repl string) string { - r := regexp.MustCompile(regex) - return r.ReplaceAllLiteralString(s, repl) -} - -func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { - r, err := regexp.Compile(regex) - if err != nil { - return "", err - } - return r.ReplaceAllLiteralString(s, repl), nil -} - -func regexSplit(regex string, s string, n int) []string { - r := regexp.MustCompile(regex) - return r.Split(s, n) -} - -func mustRegexSplit(regex string, s string, n int) ([]string, error) { - r, err := regexp.Compile(regex) - if err != nil { - return []string{}, err - } - return r.Split(s, n), nil -} - -func regexQuoteMeta(s string) string { - return regexp.QuoteMeta(s) -} diff --git a/vendor/github.com/Masterminds/sprig/v3/semver.go b/vendor/github.com/Masterminds/sprig/v3/semver.go deleted file mode 100644 index 3fbe08aa..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/semver.go +++ /dev/null @@ -1,23 +0,0 @@ -package sprig - -import ( - sv2 "github.com/Masterminds/semver/v3" -) - -func semverCompare(constraint, version string) (bool, error) { - c, err := sv2.NewConstraint(constraint) - if err != nil { - return false, err - } - - v, err := sv2.NewVersion(version) - if err != nil { - return false, err - } - - return c.Check(v), nil -} - -func semver(version string) (*sv2.Version, error) { - return sv2.NewVersion(version) -} diff --git a/vendor/github.com/Masterminds/sprig/v3/strings.go b/vendor/github.com/Masterminds/sprig/v3/strings.go deleted file mode 100644 index e0ae628c..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/strings.go +++ /dev/null @@ -1,236 +0,0 @@ -package sprig - -import ( - "encoding/base32" - "encoding/base64" - "fmt" - "reflect" - "strconv" - "strings" - - util "github.com/Masterminds/goutils" -) - -func base64encode(v string) string { - return base64.StdEncoding.EncodeToString([]byte(v)) -} - -func base64decode(v string) string { - data, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return err.Error() - } - return string(data) -} - -func base32encode(v string) string { - return base32.StdEncoding.EncodeToString([]byte(v)) -} - -func base32decode(v string) string { - data, err := base32.StdEncoding.DecodeString(v) - if err != nil { - return err.Error() - } - return string(data) -} - -func abbrev(width int, s string) string { - if width < 4 { - return s - } - r, _ := util.Abbreviate(s, width) - return r -} - -func abbrevboth(left, right int, s string) string { - if right < 4 || left > 0 && right < 7 { - return s - } - r, _ := util.AbbreviateFull(s, left, right) - return r -} -func initials(s string) string { - // Wrap this just to eliminate the var args, which templates don't do well. - return util.Initials(s) -} - -func randAlphaNumeric(count int) string { - // It is not possible, it appears, to actually generate an error here. - r, _ := util.CryptoRandomAlphaNumeric(count) - return r -} - -func randAlpha(count int) string { - r, _ := util.CryptoRandomAlphabetic(count) - return r -} - -func randAscii(count int) string { - r, _ := util.CryptoRandomAscii(count) - return r -} - -func randNumeric(count int) string { - r, _ := util.CryptoRandomNumeric(count) - return r -} - -func untitle(str string) string { - return util.Uncapitalize(str) -} - -func quote(str ...interface{}) string { - out := make([]string, 0, len(str)) - for _, s := range str { - if s != nil { - out = append(out, fmt.Sprintf("%q", strval(s))) - } - } - return strings.Join(out, " ") -} - -func squote(str ...interface{}) string { - out := make([]string, 0, len(str)) - for _, s := range str { - if s != nil { - out = append(out, fmt.Sprintf("'%v'", s)) - } - } - return strings.Join(out, " ") -} - -func cat(v ...interface{}) string { - v = removeNilElements(v) - r := strings.TrimSpace(strings.Repeat("%v ", len(v))) - return fmt.Sprintf(r, v...) -} - -func indent(spaces int, v string) string { - pad := strings.Repeat(" ", spaces) - return pad + strings.Replace(v, "\n", "\n"+pad, -1) -} - -func nindent(spaces int, v string) string { - return "\n" + indent(spaces, v) -} - -func replace(old, new, src string) string { - return strings.Replace(src, old, new, -1) -} - -func plural(one, many string, count int) string { - if count == 1 { - return one - } - return many -} - -func strslice(v interface{}) []string { - switch v := v.(type) { - case []string: - return v - case []interface{}: - b := make([]string, 0, len(v)) - for _, s := range v { - if s != nil { - b = append(b, strval(s)) - } - } - return b - default: - val := reflect.ValueOf(v) - switch val.Kind() { - case reflect.Array, reflect.Slice: - l := val.Len() - b := make([]string, 0, l) - for i := 0; i < l; i++ { - value := val.Index(i).Interface() - if value != nil { - b = append(b, strval(value)) - } - } - return b - default: - if v == nil { - return []string{} - } - - return []string{strval(v)} - } - } -} - -func removeNilElements(v []interface{}) []interface{} { - newSlice := make([]interface{}, 0, len(v)) - for _, i := range v { - if i != nil { - newSlice = append(newSlice, i) - } - } - return newSlice -} - -func strval(v interface{}) string { - switch v := v.(type) { - case string: - return v - case []byte: - return string(v) - case error: - return v.Error() - case fmt.Stringer: - return v.String() - default: - return fmt.Sprintf("%v", v) - } -} - -func trunc(c int, s string) string { - if c < 0 && len(s)+c > 0 { - return s[len(s)+c:] - } - if c >= 0 && len(s) > c { - return s[:c] - } - return s -} - -func join(sep string, v interface{}) string { - return strings.Join(strslice(v), sep) -} - -func split(sep, orig string) map[string]string { - parts := strings.Split(orig, sep) - res := make(map[string]string, len(parts)) - for i, v := range parts { - res["_"+strconv.Itoa(i)] = v - } - return res -} - -func splitn(sep string, n int, orig string) map[string]string { - parts := strings.SplitN(orig, sep, n) - res := make(map[string]string, len(parts)) - for i, v := range parts { - res["_"+strconv.Itoa(i)] = v - } - return res -} - -// substring creates a substring of the given string. -// -// If start is < 0, this calls string[:end]. -// -// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] -// -// Otherwise, this calls string[start, end]. -func substring(start, end int, s string) string { - if start < 0 { - return s[:end] - } - if end < 0 || end > len(s) { - return s[start:] - } - return s[start:end] -} diff --git a/vendor/github.com/Masterminds/sprig/v3/url.go b/vendor/github.com/Masterminds/sprig/v3/url.go deleted file mode 100644 index b8e120e1..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/url.go +++ /dev/null @@ -1,66 +0,0 @@ -package sprig - -import ( - "fmt" - "net/url" - "reflect" -) - -func dictGetOrEmpty(dict map[string]interface{}, key string) string { - value, ok := dict[key] - if !ok { - return "" - } - tp := reflect.TypeOf(value).Kind() - if tp != reflect.String { - panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) - } - return reflect.ValueOf(value).String() -} - -// parses given URL to return dict object -func urlParse(v string) map[string]interface{} { - dict := map[string]interface{}{} - parsedURL, err := url.Parse(v) - if err != nil { - panic(fmt.Sprintf("unable to parse url: %s", err)) - } - dict["scheme"] = parsedURL.Scheme - dict["host"] = parsedURL.Host - dict["hostname"] = parsedURL.Hostname() - dict["path"] = parsedURL.Path - dict["query"] = parsedURL.RawQuery - dict["opaque"] = parsedURL.Opaque - dict["fragment"] = parsedURL.Fragment - if parsedURL.User != nil { - dict["userinfo"] = parsedURL.User.String() - } else { - dict["userinfo"] = "" - } - - return dict -} - -// join given dict to URL string -func urlJoin(d map[string]interface{}) string { - resURL := url.URL{ - Scheme: dictGetOrEmpty(d, "scheme"), - Host: dictGetOrEmpty(d, "host"), - Path: dictGetOrEmpty(d, "path"), - RawQuery: dictGetOrEmpty(d, "query"), - Opaque: dictGetOrEmpty(d, "opaque"), - Fragment: dictGetOrEmpty(d, "fragment"), - } - userinfo := dictGetOrEmpty(d, "userinfo") - var user *url.Userinfo - if userinfo != "" { - tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) - if err != nil { - panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) - } - user = tempURL.User - } - - resURL.User = user - return resURL.String() -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/bindfilter/bind_filter.go b/vendor/github.com/Microsoft/go-winio/pkg/bindfilter/bind_filter.go new file mode 100644 index 00000000..7ac377ae --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/bindfilter/bind_filter.go @@ -0,0 +1,308 @@ +//go:build windows +// +build windows + +package bindfilter + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./bind_filter.go +//sys bfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath string, virtTargetPath string, virtExceptions **uint16, virtExceptionPathCount uint32) (hr error) = bindfltapi.BfSetupFilter? +//sys bfRemoveMapping(jobHandle windows.Handle, virtRootPath string) (hr error) = bindfltapi.BfRemoveMapping? +//sys bfGetMappings(flags uint32, jobHandle windows.Handle, virtRootPath *uint16, sid *windows.SID, bufferSize *uint32, outBuffer *byte) (hr error) = bindfltapi.BfGetMappings? + +// BfSetupFilter flags. See: +// https://github.com/microsoft/BuildXL/blob/a6dce509f0d4f774255e5fbfb75fa6d5290ed163/Public/Src/Utilities/Native/Processes/Windows/NativeContainerUtilities.cs#L193-L240 +// +//nolint:revive // var-naming: ALL_CAPS +const ( + BINDFLT_FLAG_READ_ONLY_MAPPING uint32 = 0x00000001 + // Tells bindflt to fail mapping with STATUS_INVALID_PARAMETER if a mapping produces + // multiple targets. + BINDFLT_FLAG_NO_MULTIPLE_TARGETS uint32 = 0x00000040 +) + +//nolint:revive // var-naming: ALL_CAPS +const ( + BINDFLT_GET_MAPPINGS_FLAG_VOLUME uint32 = 0x00000001 + BINDFLT_GET_MAPPINGS_FLAG_SILO uint32 = 0x00000002 + BINDFLT_GET_MAPPINGS_FLAG_USER uint32 = 0x00000004 +) + +// ApplyFileBinding creates a global mount of the source in root, with an optional +// read only flag. +// The bind filter allows us to create mounts of directories and volumes. By default it allows +// us to mount multiple sources inside a single root, acting as an overlay. Files from the +// second source will superscede the first source that was mounted. +// This function disables this behavior and sets the BINDFLT_FLAG_NO_MULTIPLE_TARGETS flag +// on the mount. +func ApplyFileBinding(root, source string, readOnly bool) error { + // The parent directory needs to exist for the bind to work. MkdirAll stats and + // returns nil if the directory exists internally so we should be fine to mkdirall + // every time. + if err := os.MkdirAll(filepath.Dir(root), 0); err != nil { + return err + } + + if strings.Contains(source, "Volume{") && !strings.HasSuffix(source, "\\") { + // Add trailing slash to volumes, otherwise we get an error when binding it to + // a folder. + source = source + "\\" + } + + flags := BINDFLT_FLAG_NO_MULTIPLE_TARGETS + if readOnly { + flags |= BINDFLT_FLAG_READ_ONLY_MAPPING + } + + // Set the job handle to 0 to create a global mount. + if err := bfSetupFilter( + 0, + flags, + root, + source, + nil, + 0, + ); err != nil { + return fmt.Errorf("failed to bind target %q to root %q: %w", source, root, err) + } + return nil +} + +// RemoveFileBinding removes a mount from the root path. +func RemoveFileBinding(root string) error { + if err := bfRemoveMapping(0, root); err != nil { + return fmt.Errorf("removing file binding: %w", err) + } + return nil +} + +// GetBindMappings returns a list of bind mappings that have their root on a +// particular volume. The volumePath parameter can be any path that exists on +// a volume. For example, if a number of mappings are created in C:\ProgramData\test, +// to get a list of those mappings, the volumePath parameter would have to be set to +// C:\ or the VOLUME_NAME_GUID notation of C:\ (\\?\Volume{GUID}\), or any child +// path that exists. +func GetBindMappings(volumePath string) ([]BindMapping, error) { + rootPtr, err := windows.UTF16PtrFromString(volumePath) + if err != nil { + return nil, err + } + + flags := BINDFLT_GET_MAPPINGS_FLAG_VOLUME + // allocate a large buffer for results + var outBuffSize uint32 = 256 * 1024 + buf := make([]byte, outBuffSize) + + if err := bfGetMappings(flags, 0, rootPtr, nil, &outBuffSize, &buf[0]); err != nil { + return nil, err + } + + if outBuffSize < 12 { + return nil, fmt.Errorf("invalid buffer returned") + } + + result := buf[:outBuffSize] + + // The first 12 bytes are the three uint32 fields in getMappingsResponseHeader{} + headerBuffer := result[:12] + // The alternative to using unsafe and casting it to the above defined structures, is to manually + // parse the fields. Not too terrible, but not sure it'd worth the trouble. + header := *(*getMappingsResponseHeader)(unsafe.Pointer(&headerBuffer[0])) + + if header.MappingCount == 0 { + // no mappings + return []BindMapping{}, nil + } + + mappingsBuffer := result[12 : int(unsafe.Sizeof(mappingEntry{}))*int(header.MappingCount)] + // Get a pointer to the first mapping in the slice + mappingsPointer := (*mappingEntry)(unsafe.Pointer(&mappingsBuffer[0])) + // Get slice of mappings + mappings := unsafe.Slice(mappingsPointer, header.MappingCount) + + mappingEntries := make([]BindMapping, header.MappingCount) + for i := 0; i < int(header.MappingCount); i++ { + bindMapping, err := getBindMappingFromBuffer(result, mappings[i]) + if err != nil { + return nil, fmt.Errorf("fetching bind mappings: %w", err) + } + mappingEntries[i] = bindMapping + } + + return mappingEntries, nil +} + +// mappingEntry holds information about where in the response buffer we can +// find information about the virtual root (the mount point) and the targets (sources) +// that get mounted, as well as the flags used to bind the targets to the virtual root. +type mappingEntry struct { + VirtRootLength uint32 + VirtRootOffset uint32 + Flags uint32 + NumberOfTargets uint32 + TargetEntriesOffset uint32 +} + +type mappingTargetEntry struct { + TargetRootLength uint32 + TargetRootOffset uint32 +} + +// getMappingsResponseHeader represents the first 12 bytes of the BfGetMappings() response. +// It gives us the size of the buffer, the status of the call and the number of mappings. +// A response +type getMappingsResponseHeader struct { + Size uint32 + Status uint32 + MappingCount uint32 +} + +type BindMapping struct { + MountPoint string + Flags uint32 + Targets []string +} + +func decodeEntry(buffer []byte) (string, error) { + name := make([]uint16, len(buffer)/2) + err := binary.Read(bytes.NewReader(buffer), binary.LittleEndian, &name) + if err != nil { + return "", fmt.Errorf("decoding name: %w", err) + } + return windows.UTF16ToString(name), nil +} + +func getTargetsFromBuffer(buffer []byte, offset, count int) ([]string, error) { + if len(buffer) < offset+count*6 { + return nil, fmt.Errorf("invalid buffer") + } + + targets := make([]string, count) + for i := 0; i < count; i++ { + entryBuf := buffer[offset+i*8 : offset+i*8+8] + tgt := *(*mappingTargetEntry)(unsafe.Pointer(&entryBuf[0])) + if len(buffer) < int(tgt.TargetRootOffset)+int(tgt.TargetRootLength) { + return nil, fmt.Errorf("invalid buffer") + } + decoded, err := decodeEntry(buffer[tgt.TargetRootOffset : tgt.TargetRootOffset+tgt.TargetRootLength]) + if err != nil { + return nil, fmt.Errorf("decoding name: %w", err) + } + decoded, err = getFinalPath(decoded) + if err != nil { + return nil, fmt.Errorf("fetching final path: %w", err) + } + + targets[i] = decoded + } + return targets, nil +} + +func getFinalPath(pth string) (string, error) { + // BfGetMappings returns VOLUME_NAME_NT paths like \Device\HarddiskVolume2\ProgramData. + // These can be accessed by prepending \\.\GLOBALROOT to the path. We use this to get the + // DOS paths for these files. + if strings.HasPrefix(pth, `\Device`) { + pth = `\\.\GLOBALROOT` + pth + } + + han, err := openPath(pth) + if err != nil { + return "", fmt.Errorf("fetching file handle: %w", err) + } + defer func() { + _ = windows.CloseHandle(han) + }() + + buf := make([]uint16, 100) + var flags uint32 = 0x0 + for { + n, err := windows.GetFinalPathNameByHandle(han, &buf[0], uint32(len(buf)), flags) + if err != nil { + // if we mounted a volume that does not also have a drive letter assigned, attempting to + // fetch the VOLUME_NAME_DOS will fail with os.ErrNotExist. Attempt to get the VOLUME_NAME_GUID. + if errors.Is(err, os.ErrNotExist) && flags != 0x1 { + flags = 0x1 + continue + } + return "", fmt.Errorf("getting final path name: %w", err) + } + if n < uint32(len(buf)) { + break + } + buf = make([]uint16, n) + } + finalPath := syscall.UTF16ToString(buf) + // We got VOLUME_NAME_DOS, we need to strip away some leading slashes. + // Leave unchanged if we ended up requesting VOLUME_NAME_GUID + if len(finalPath) > 4 && finalPath[:4] == `\\?\` && flags == 0x0 { + finalPath = finalPath[4:] + if len(finalPath) > 3 && finalPath[:3] == `UNC` { + // return path like \\server\share\... + finalPath = `\` + finalPath[3:] + } + } + + return finalPath, nil +} + +func getBindMappingFromBuffer(buffer []byte, entry mappingEntry) (BindMapping, error) { + if len(buffer) < int(entry.VirtRootOffset)+int(entry.VirtRootLength) { + return BindMapping{}, fmt.Errorf("invalid buffer") + } + + src, err := decodeEntry(buffer[entry.VirtRootOffset : entry.VirtRootOffset+entry.VirtRootLength]) + if err != nil { + return BindMapping{}, fmt.Errorf("decoding entry: %w", err) + } + targets, err := getTargetsFromBuffer(buffer, int(entry.TargetEntriesOffset), int(entry.NumberOfTargets)) + if err != nil { + return BindMapping{}, fmt.Errorf("fetching targets: %w", err) + } + + src, err = getFinalPath(src) + if err != nil { + return BindMapping{}, fmt.Errorf("fetching final path: %w", err) + } + + return BindMapping{ + Flags: entry.Flags, + Targets: targets, + MountPoint: src, + }, nil +} + +func openPath(path string) (windows.Handle, error) { + u16, err := windows.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + h, err := windows.CreateFile( + u16, + 0, + windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, + nil, + windows.OPEN_EXISTING, + windows.FILE_FLAG_BACKUP_SEMANTICS, // Needed to open a directory handle. + 0) + if err != nil { + return 0, &os.PathError{ + Op: "CreateFile", + Path: path, + Err: err, + } + } + return h, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/bindfilter/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/bindfilter/zsyscall_windows.go new file mode 100644 index 00000000..45c45c96 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/bindfilter/zsyscall_windows.go @@ -0,0 +1,116 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package bindfilter + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modbindfltapi = windows.NewLazySystemDLL("bindfltapi.dll") + + procBfGetMappings = modbindfltapi.NewProc("BfGetMappings") + procBfRemoveMapping = modbindfltapi.NewProc("BfRemoveMapping") + procBfSetupFilter = modbindfltapi.NewProc("BfSetupFilter") +) + +func bfGetMappings(flags uint32, jobHandle windows.Handle, virtRootPath *uint16, sid *windows.SID, bufferSize *uint32, outBuffer *byte) (hr error) { + hr = procBfGetMappings.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procBfGetMappings.Addr(), 6, uintptr(flags), uintptr(jobHandle), uintptr(unsafe.Pointer(virtRootPath)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(bufferSize)), uintptr(unsafe.Pointer(outBuffer))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func bfRemoveMapping(jobHandle windows.Handle, virtRootPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(virtRootPath) + if hr != nil { + return + } + return _bfRemoveMapping(jobHandle, _p0) +} + +func _bfRemoveMapping(jobHandle windows.Handle, virtRootPath *uint16) (hr error) { + hr = procBfRemoveMapping.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procBfRemoveMapping.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(virtRootPath)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func bfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath string, virtTargetPath string, virtExceptions **uint16, virtExceptionPathCount uint32) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(virtRootPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(virtTargetPath) + if hr != nil { + return + } + return _bfSetupFilter(jobHandle, flags, _p0, _p1, virtExceptions, virtExceptionPathCount) +} + +func _bfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath *uint16, virtTargetPath *uint16, virtExceptions **uint16, virtExceptionPathCount uint32) (hr error) { + hr = procBfSetupFilter.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procBfSetupFilter.Addr(), 6, uintptr(jobHandle), uintptr(flags), uintptr(unsafe.Pointer(virtRootPath)), uintptr(unsafe.Pointer(virtTargetPath)), uintptr(unsafe.Pointer(virtExceptions)), uintptr(virtExceptionPathCount)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile index 742c76d8..d8eb30b8 100644 --- a/vendor/github.com/Microsoft/hcsshim/Makefile +++ b/vendor/github.com/Microsoft/hcsshim/Makefile @@ -94,23 +94,9 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho tar -zcf $@ -C rootfs . rm -rf rootfs --include deps/cmd/gcs.gomake --include deps/cmd/gcstools.gomake --include deps/cmd/hooks/wait-paths.gomake --include deps/cmd/tar2ext4.gomake --include deps/internal/tools/snp-report.gomake - -# Implicit rule for includes that define Go targets. -%.gomake: $(SRCROOT)/Makefile +bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report: @mkdir -p $(dir $@) - @/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new - @/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new - @/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new - @/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new - @/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new - @/bin/echo -e '\tmv $$@.new $$@' >> $@.new - @/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new - mv $@.new $@ + GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%) bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o @mkdir -p bin diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go index e437e297..65025f3f 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -421,12 +421,6 @@ func (process *Process) CloseStdin(ctx context.Context) (err error) { return makeProcessError(process, operation, ErrAlreadyClosed, nil) } - process.stdioLock.Lock() - defer process.stdioLock.Unlock() - if process.stdin == nil { - return nil - } - //HcsModifyProcess request to close stdin will fail if the process has already exited if !process.stopped() { modifyRequest := processModifyRequest{ @@ -448,8 +442,12 @@ func (process *Process) CloseStdin(ctx context.Context) (err error) { } } - process.stdin.Close() - process.stdin = nil + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + if process.stdin != nil { + process.stdin.Close() + process.stdin = nil + } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/format.go b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go new file mode 100644 index 00000000..4b650033 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go @@ -0,0 +1,85 @@ +package log + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net" + "reflect" + "time" + + "github.com/containerd/containerd/log" +) + +const TimeFormat = log.RFC3339NanoFixed + +func FormatTime(t time.Time) string { + return t.Format(TimeFormat) +} + +// DurationFormat formats a [time.Duration] log entry. +// +// A nil value signals an error with the formatting. +type DurationFormat func(time.Duration) interface{} + +func DurationFormatString(d time.Duration) interface{} { return d.String() } +func DurationFormatSeconds(d time.Duration) interface{} { return d.Seconds() } +func DurationFormatMilliseconds(d time.Duration) interface{} { return d.Milliseconds() } + +// FormatIO formats net.Conn and other types that have an `Addr()` or `Name()`. +// +// See FormatEnabled for more information. +func FormatIO(ctx context.Context, v interface{}) string { + m := make(map[string]string) + m["type"] = reflect.TypeOf(v).String() + + switch t := v.(type) { + case net.Conn: + m["localAddress"] = formatAddr(t.LocalAddr()) + m["remoteAddress"] = formatAddr(t.RemoteAddr()) + case interface{ Addr() net.Addr }: + m["address"] = formatAddr(t.Addr()) + default: + return Format(ctx, t) + } + + return Format(ctx, m) +} + +func formatAddr(a net.Addr) string { + return a.Network() + "://" + a.String() +} + +// Format formats an object into a JSON string, without any indendtation or +// HTML escapes. +// Context is used to output a log waring if the conversion fails. +// +// This is intended primarily for `trace.StringAttribute()` +func Format(ctx context.Context, v interface{}) string { + b, err := encode(v) + if err != nil { + G(ctx).WithError(err).Warning("could not format value") + return "" + } + + return string(b) +} + +func encode(v interface{}) ([]byte, error) { + return encodeBuffer(&bytes.Buffer{}, v) +} + +func encodeBuffer(buf *bytes.Buffer, v interface{}) ([]byte, error) { + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + enc.SetIndent("", "") + + if err := enc.Encode(v); err != nil { + err = fmt.Errorf("could not marshall %T to JSON for logging: %w", v, err) + return nil, err + } + + // encoder.Encode appends a newline to the end + return bytes.TrimSpace(buf.Bytes()), nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go b/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go index 8f894059..94c6d091 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go @@ -1,23 +1,58 @@ package log import ( + "bytes" + "reflect" + "time" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/containerd/containerd/log" "github.com/sirupsen/logrus" "go.opencensus.io/trace" ) -// Hook serves to intercept and format `logrus.Entry`s before they are passed -// to the ETW hook. +const nullString = "null" + +// Hook intercepts and formats a [logrus.Entry] before it logged. // -// The containerd shim discards the (formatted) logrus output, and outputs only via ETW. -// The Linux GCS outputs logrus entries over stdout, which is consumed by the shim and -// then re-output via the ETW hook. -type Hook struct{} +// The shim either outputs the logs through an ETW hook, discarding the (formatted) output +// or logs output to a pipe for logging binaries to consume. +// The Linux GCS outputs logrus entries over stdout, which is then consumed and re-output +// by the shim. +type Hook struct { + // EncodeAsJSON formats structs, maps, arrays, slices, and [bytes.Buffer] as JSON. + // Variables of [bytes.Buffer] will be converted to []byte. + // + // Default is false. + EncodeAsJSON bool + + // FormatTime specifies the format for [time.Time] variables. + // An empty string disables formatting. + // When disabled, the fall back will the JSON encoding, if enabled. + // + // Default is [github.com/containerd/containerd/log.RFC3339NanoFixed]. + TimeFormat string + + // Duration format converts a [time.Duration] fields to an appropriate encoding. + // nil disables formatting. + // When disabled, the fall back will the JSON encoding, if enabled. + // + // Default is [DurationFormatString], which appends a duration unit after the value. + DurationFormat DurationFormat + + // AddSpanContext adds [logfields.TraceID] and [logfields.SpanID] fields to + // the entry from the span context stored in [logrus.Entry.Context], if it exists. + AddSpanContext bool +} var _ logrus.Hook = &Hook{} func NewHook() *Hook { - return &Hook{} + return &Hook{ + TimeFormat: log.RFC3339NanoFixed, + DurationFormat: DurationFormatString, + AddSpanContext: true, + } } func (h *Hook) Levels() []logrus.Level { @@ -25,14 +60,108 @@ func (h *Hook) Levels() []logrus.Level { } func (h *Hook) Fire(e *logrus.Entry) (err error) { + // JSON encode, if necessary, then add span information + h.encode(e) h.addSpanContext(e) return nil } +// encode loops through all the fields in the [logrus.Entry] and encodes them according to +// the settings in [Hook]. +// If [Hook.TimeFormat] is non-empty, it will be passed to [time.Time.Format] for +// fields of type [time.Time]. +// +// If [Hook.EncodeAsJSON] is true, then fields that are not numeric, boolean, strings, or +// errors will be encoded via a [json.Marshal] (with HTML escaping disabled). +// Chanel- and function-typed fields, as well as unsafe pointers are left alone and not encoded. +// +// If [Hook.TimeFormat] and [Hook.DurationFormat] are empty and [Hook.EncodeAsJSON] is false, +// then this is a no-op. +func (h *Hook) encode(e *logrus.Entry) { + d := e.Data + + formatTime := h.TimeFormat != "" + formatDuration := h.DurationFormat != nil + if !(h.EncodeAsJSON || formatTime || formatDuration) { + return + } + + for k, v := range d { + // encode types with dedicated formatting options first + + if vv, ok := v.(time.Time); formatTime && ok { + d[k] = vv.Format(h.TimeFormat) + continue + } + + if vv, ok := v.(time.Duration); formatDuration && ok { + d[k] = h.DurationFormat(vv) + continue + } + + // general case JSON encoding + + if !h.EncodeAsJSON { + continue + } + + switch vv := v.(type) { + // built in types + // "json" marshals errors as "{}", so leave alone here + case bool, string, error, uintptr, + int8, int16, int32, int64, int, + uint8, uint32, uint64, uint, + float32, float64: + continue + + // Rather than setting d[k] = vv.String(), JSON encode []byte value, since it + // may be a binary payload and not representable as a string. + // `case bytes.Buffer,*bytes.Buffer:` resolves `vv` to `interface{}`, + // so cannot use `vv.Bytes`. + // Could move to below the `reflect.Indirect()` call below, but + // that would require additional typematching and dereferencing. + // Easier to keep these duplicate branches here. + case bytes.Buffer: + v = vv.Bytes() + case *bytes.Buffer: + v = vv.Bytes() + } + + // dereference pointer or interface variables + rv := reflect.Indirect(reflect.ValueOf(v)) + // check if `v` is a null pointer + if !rv.IsValid() { + d[k] = nullString + continue + } + + switch rv.Kind() { + case reflect.Map, reflect.Struct, reflect.Array, reflect.Slice: + default: + // Bool, [U]?Int*, Float*, Complex*, Uintptr, String: encoded as normal + // Chan, Func: not supported by json + // Interface, Pointer: dereferenced above + // UnsafePointer: not supported by json, not safe to de-reference; leave alone + continue + } + + b, err := encode(v) + if err != nil { + // Errors are written to stderr (ie, to `panic.log`) and stops the remaining + // hooks (ie, exporting to ETW) from firing. So add encoding errors to + // the entry data to be written out, but keep on processing. + d[k+"-"+logrus.ErrorKey] = err.Error() + // keep the original `v` as the value, + continue + } + d[k] = string(b) + } +} + func (h *Hook) addSpanContext(e *logrus.Entry) { ctx := e.Context - if ctx == nil { + if !h.AddSpanContext || ctx == nil { return } span := trace.FromContext(ctx) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go index d51e0fd8..d1ef1509 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "errors" - "strings" "sync/atomic" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" @@ -56,11 +55,11 @@ func ScrubProcessParameters(s string) (string, error) { } pp.Environment = map[string]string{_scrubbedReplacement: _scrubbedReplacement} - buf := bytes.NewBuffer(b[:0]) - if err := encode(buf, pp); err != nil { + b, err := encodeBuffer(bytes.NewBuffer(b[:0]), pp) + if err != nil { return "", err } - return strings.TrimSpace(buf.String()), nil + return string(b), nil } // ScrubBridgeCreate scrubs requests sent over the bridge of type @@ -150,21 +149,12 @@ func scrubBytes(b []byte, scrub scrubberFunc) ([]byte, error) { return nil, err } - buf := &bytes.Buffer{} - if err := encode(buf, m); err != nil { + b, err := encode(m) + if err != nil { return nil, err } - return bytes.TrimSpace(buf.Bytes()), nil -} - -func encode(buf *bytes.Buffer, v interface{}) error { - enc := json.NewEncoder(buf) - enc.SetEscapeHTML(false) - if err := enc.Encode(v); err != nil { - return err - } - return nil + return b, nil } func isRequestBase(m genMap) bool { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go new file mode 100644 index 00000000..71df25b8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go @@ -0,0 +1,69 @@ +package oc + +import ( + "errors" + "io" + "net" + "os" + + "github.com/containerd/containerd/errdefs" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// todo: break import cycle with "internal/hcs/errors.go" and reference errors defined there +// todo: add errors defined in "internal/guest/gcserror" (Hresult does not implement error) + +func toStatusCode(err error) codes.Code { + // checks if err implements GRPCStatus() *"google.golang.org/grpc/status".Status, + // wraps an error defined in "github.com/containerd/containerd/errdefs", or is a + // context timeout or cancelled error + if s, ok := status.FromError(errdefs.ToGRPC(err)); ok { + return s.Code() + } + + switch { + // case isAny(err): + // return codes.Cancelled + case isAny(err, os.ErrInvalid): + return codes.InvalidArgument + case isAny(err, os.ErrDeadlineExceeded): + return codes.DeadlineExceeded + case isAny(err, os.ErrNotExist): + return codes.NotFound + case isAny(err, os.ErrExist): + return codes.AlreadyExists + case isAny(err, os.ErrPermission): + return codes.PermissionDenied + // case isAny(err): + // return codes.ResourceExhausted + case isAny(err, os.ErrClosed, net.ErrClosed, io.ErrClosedPipe, io.ErrShortBuffer): + return codes.FailedPrecondition + // case isAny(err): + // return codes.Aborted + // case isAny(err): + // return codes.OutOfRange + // case isAny(err): + // return codes.Unimplemented + case isAny(err, io.ErrNoProgress): + return codes.Internal + // case isAny(err): + // return codes.Unavailable + case isAny(err, io.ErrShortWrite, io.ErrUnexpectedEOF): + return codes.DataLoss + // case isAny(err): + // return codes.Unauthenticated + default: + return codes.Unknown + } +} + +// isAny returns true if errors.Is is true for any of the provided errors, errs. +func isAny(err error, errs ...error) bool { + for _, e := range errs { + if errors.Is(err, e) { + return true + } + } + return false +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go index f428bdaf..28f8f43a 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go @@ -3,19 +3,26 @@ package oc import ( "github.com/sirupsen/logrus" "go.opencensus.io/trace" + "google.golang.org/grpc/codes" + + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" ) -var _ = (trace.Exporter)(&LogrusExporter{}) +const spanMessage = "Span" + +var _errorCodeKey = logrus.ErrorKey + "Code" // LogrusExporter is an OpenCensus `trace.Exporter` that exports // `trace.SpanData` to logrus output. -type LogrusExporter struct { -} +type LogrusExporter struct{} + +var _ trace.Exporter = &LogrusExporter{} // ExportSpan exports `s` based on the the following rules: // -// 1. All output will contain `s.Attributes`, `s.TraceID`, `s.SpanID`, -// `s.ParentSpanID` for correlation +// 1. All output will contain `s.Attributes`, `s.SpanKind`, `s.TraceID`, +// `s.SpanID`, and `s.ParentSpanID` for correlation // // 2. Any calls to .Annotate will not be supported. // @@ -23,21 +30,57 @@ type LogrusExporter struct { // `s.Status.Code != 0` in which case it will be written at `logrus.ErrorLevel` // providing `s.Status.Message` as the error value. func (le *LogrusExporter) ExportSpan(s *trace.SpanData) { - // Combine all span annotations with traceID, spanID, parentSpanID - baseEntry := logrus.WithFields(logrus.Fields(s.Attributes)) - baseEntry.Data["traceID"] = s.TraceID.String() - baseEntry.Data["spanID"] = s.SpanID.String() - baseEntry.Data["parentSpanID"] = s.ParentSpanID.String() - baseEntry.Data["startTime"] = s.StartTime - baseEntry.Data["endTime"] = s.EndTime - baseEntry.Data["duration"] = s.EndTime.Sub(s.StartTime).String() - baseEntry.Data["name"] = s.Name - baseEntry.Time = s.StartTime + if s.DroppedAnnotationCount > 0 { + logrus.WithFields(logrus.Fields{ + "name": s.Name, + logfields.TraceID: s.TraceID.String(), + logfields.SpanID: s.SpanID.String(), + "dropped": s.DroppedAttributeCount, + "maxAttributes": len(s.Attributes), + }).Warning("span had dropped attributes") + } + + entry := log.L.Dup() + // Combine all span annotations with span data (eg, trace ID, span ID, parent span ID, + // error, status code) + // (OC) Span attributes are guaranteed to be strings, bools, or int64s, so we can + // can skip overhead in entry.WithFields() and add them directly to entry.Data. + // Preallocate ahead of time, since we should add, at most, 10 additional entries + data := make(logrus.Fields, len(entry.Data)+len(s.Attributes)+10) + + // Default log entry may have prexisting/application-wide data + for k, v := range entry.Data { + data[k] = v + } + for k, v := range s.Attributes { + data[k] = v + } + + data[logfields.Name] = s.Name + data[logfields.TraceID] = s.TraceID.String() + data[logfields.SpanID] = s.SpanID.String() + data[logfields.ParentSpanID] = s.ParentSpanID.String() + data[logfields.StartTime] = s.StartTime + data[logfields.EndTime] = s.EndTime + data[logfields.Duration] = s.EndTime.Sub(s.StartTime) + if sk := spanKindToString(s.SpanKind); sk != "" { + data["spanKind"] = sk + } level := logrus.InfoLevel if s.Status.Code != 0 { level = logrus.ErrorLevel - baseEntry.Data[logrus.ErrorKey] = s.Status.Message + + // don't overwrite an existing "error" or "errorCode" attributes + if _, ok := data[logrus.ErrorKey]; !ok { + data[logrus.ErrorKey] = s.Status.Message + } + if _, ok := data[_errorCodeKey]; !ok { + data[_errorCodeKey] = codes.Code(s.Status.Code).String() + } } - baseEntry.Log(level, "Span") + + entry.Data = data + entry.Time = s.StartTime + entry.Log(level, spanMessage) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go index 0e2b7e9b..72607843 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go @@ -14,8 +14,7 @@ var DefaultSampler = trace.AlwaysSample() func SetSpanStatus(span *trace.Span, err error) { status := trace.Status{} if err != nil { - // TODO: JTERRY75 - Handle errors in a non-generic way - status.Code = trace.StatusCodeUnknown + status.Code = int32(toStatusCode(err)) status.Message = err.Error() } span.SetStatus(status) @@ -46,3 +45,14 @@ func update(ctx context.Context, s *trace.Span) (context.Context, *trace.Span) { var WithServerSpanKind = trace.WithSpanKind(trace.SpanKindServer) var WithClientSpanKind = trace.WithSpanKind(trace.SpanKindClient) + +func spanKindToString(sk int) string { + switch sk { + case trace.SpanKindClient: + return "client" + case trace.SpanKindServer: + return "server" + default: + return "" + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go index bfcc1576..7dfa1e59 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go @@ -23,20 +23,14 @@ type ( ) type explicitAccess struct { - //nolint:structcheck accessPermissions accessMask - //nolint:structcheck - accessMode accessMode - //nolint:structcheck - inheritance inheritMode - //nolint:structcheck - trustee trustee + accessMode accessMode + inheritance inheritMode + trustee trustee } type trustee struct { - //nolint:unused,structcheck - multipleTrustee *trustee - //nolint:unused,structcheck + multipleTrustee *trustee multipleTrusteeOperation int32 trusteeForm trusteeForm trusteeType trusteeType diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go new file mode 100644 index 00000000..f8d411ad --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go @@ -0,0 +1,35 @@ +package osversion + +// List of stable ABI compliant ltsc releases +// Note: List must be sorted in ascending order +var compatLTSCReleases = []uint16{ + V21H2Server, +} + +// CheckHostAndContainerCompat checks if given host and container +// OS versions are compatible. +// It includes support for stable ABI compliant versions as well. +// Every release after WS 2022 will support the previous ltsc +// container image. Stable ABI is in preview mode for windows 11 client. +// Refer: https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility?tabs=windows-server-2022%2Cwindows-10#windows-server-host-os-compatibility +func CheckHostAndContainerCompat(host, ctr OSVersion) bool { + // check major minor versions of host and guest + if host.MajorVersion != ctr.MajorVersion || + host.MinorVersion != ctr.MinorVersion { + return false + } + + // If host is < WS 2022, exact version match is required + if host.Build < V21H2Server { + return host.Build == ctr.Build + } + + var supportedLtscRelease uint16 + for i := len(compatLTSCReleases) - 1; i >= 0; i-- { + if host.Build >= compatLTSCReleases[i] { + supportedLtscRelease = compatLTSCReleases[i] + break + } + } + return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build +} diff --git a/vendor/github.com/anchore/stereoscope/pkg/image/oci/registry_provider.go b/vendor/github.com/anchore/stereoscope/pkg/image/oci/registry_provider.go index 9c71eb17..208c4a24 100644 --- a/vendor/github.com/anchore/stereoscope/pkg/image/oci/registry_provider.go +++ b/vendor/github.com/anchore/stereoscope/pkg/image/oci/registry_provider.go @@ -2,6 +2,7 @@ package oci import ( "context" + "crypto/tls" "fmt" "net/http" @@ -126,10 +127,15 @@ func prepareRemoteOptions(ctx context.Context, ref name.Reference, registryOptio if err != nil { log.Warn("unable to configure TLS transport: %w", err) } else if tlsConfig != nil { - options = append(options, remote.WithTransport(&http.Transport{ - TLSClientConfig: tlsConfig, - })) + options = append(options, remote.WithTransport(getTransport(tlsConfig))) } return options } + +func getTransport(tlsConfig *tls.Config) *http.Transport { + // use the default transport to inherit existing default options (including proxy options) + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = tlsConfig + return transport +} diff --git a/vendor/github.com/anchore/syft/internal/buffered_seek_reader.go b/vendor/github.com/anchore/syft/internal/buffered_seek_reader.go new file mode 100644 index 00000000..93e7e981 --- /dev/null +++ b/vendor/github.com/anchore/syft/internal/buffered_seek_reader.go @@ -0,0 +1,84 @@ +package internal + +import ( + "bytes" + "errors" + "io" + + "github.com/anchore/syft/internal/log" +) + +var _ io.ReadSeekCloser = (*bufferedSeekReader)(nil) + +// bufferedSeekReader wraps an io.ReadCloser to provide io.Seeker functionality. +// It only supports seeking from the start and cannot seek past what has already been read. +type bufferedSeekReader struct { + r io.ReadCloser + buf *bytes.Reader + data []byte + pos int64 + closed bool +} + +func NewBufferedSeeker(rc io.ReadCloser) io.ReadSeekCloser { + return &bufferedSeekReader{ + r: rc, + } +} + +func (bs *bufferedSeekReader) Read(p []byte) (int, error) { + if bs.closed { + return 0, errors.New("cannot read from closed reader") + } + if bs.pos == int64(len(bs.data)) { + // if we're at the end of our buffer, read more data into it + tmp := make([]byte, len(p)) + + n, err := bs.r.Read(tmp) + if err != nil && err != io.EOF { + return 0, err + } else if err == io.EOF { + bs.closed = true + } + bs.data = append(bs.data, tmp[:n]...) + bs.buf = bytes.NewReader(bs.data) + } + + n, err := bs.buf.ReadAt(p, bs.pos) + if err != nil && err != io.EOF { + log.WithFields("error", err).Trace("buffered seek reader failed to read from underlying reader") + } + bs.pos += int64(n) + + return n, nil +} + +func (bs *bufferedSeekReader) Seek(offset int64, whence int) (int64, error) { + var abs int64 + switch whence { + case io.SeekStart: + abs = offset + case io.SeekCurrent: + abs = bs.pos + offset + case io.SeekEnd: + return 0, errors.New("'SeekEnd' not supported") + default: + return 0, errors.New("invalid seek option") + } + + if abs < 0 { + return 0, errors.New("unable to seek before start") + } + + if abs > int64(len(bs.data)) { + return 0, errors.New("unable to seek past read data") + } + + bs.pos = abs + return bs.pos, nil +} + +func (bs *bufferedSeekReader) Close() error { + bs.closed = true + return bs.r.Close() +} diff --git a/vendor/github.com/anchore/syft/internal/constants.go b/vendor/github.com/anchore/syft/internal/constants.go index 0f7c9cc0..ba9730a7 100644 --- a/vendor/github.com/anchore/syft/internal/constants.go +++ b/vendor/github.com/anchore/syft/internal/constants.go @@ -3,5 +3,5 @@ package internal const ( // JSONSchemaVersion is the current schema version output by the JSON encoder // This is roughly following the "SchemaVer" guidelines for versioning the JSON schema. Please see schema/json/README.md for details on how to increment. - JSONSchemaVersion = "11.0.1" + JSONSchemaVersion = "12.0.1" ) diff --git a/vendor/github.com/anchore/syft/internal/file/zip_file_manifest.go b/vendor/github.com/anchore/syft/internal/file/zip_file_manifest.go index 04ea442c..6306ca6b 100644 --- a/vendor/github.com/anchore/syft/internal/file/zip_file_manifest.go +++ b/vendor/github.com/anchore/syft/internal/file/zip_file_manifest.go @@ -40,15 +40,18 @@ func (z ZipFileManifest) Add(entry string, info os.FileInfo) { } // GlobMatch returns the path keys that match the given value(s). -func (z ZipFileManifest) GlobMatch(patterns ...string) []string { +func (z ZipFileManifest) GlobMatch(caseInsensitive bool, patterns ...string) []string { uniqueMatches := strset.New() for _, pattern := range patterns { for entry := range z { // We want to match globs as if entries begin with a leading slash (akin to an absolute path) // so that glob logic is consistent inside and outside of ZIP archives - normalizedEntry := normalizeZipEntryName(entry) + normalizedEntry := normalizeZipEntryName(caseInsensitive, entry) + if caseInsensitive { + pattern = strings.ToLower(pattern) + } if GlobMatch(pattern, normalizedEntry) { uniqueMatches.Add(entry) } @@ -62,7 +65,10 @@ func (z ZipFileManifest) GlobMatch(patterns ...string) []string { } // normalizeZipEntryName takes the given path entry and ensures it is prefixed with "/". -func normalizeZipEntryName(entry string) string { +func normalizeZipEntryName(caseInsensitive bool, entry string) string { + if caseInsensitive { + entry = strings.ToLower(entry) + } if !strings.HasPrefix(entry, "/") { return "/" + entry } diff --git a/vendor/github.com/anchore/syft/internal/licenses/list.go b/vendor/github.com/anchore/syft/internal/licenses/list.go index d3d2c20f..46d63f88 100644 --- a/vendor/github.com/anchore/syft/internal/licenses/list.go +++ b/vendor/github.com/anchore/syft/internal/licenses/list.go @@ -20,7 +20,6 @@ func FileNames() []string { "LICENSE", "LICENSE.md", "LICENSE.markdown", - "license.txt", "LICENSE.txt", "LICENSE-2.0.txt", "LICENCE-2.0.txt", diff --git a/vendor/github.com/anchore/syft/syft/encode_decode.go b/vendor/github.com/anchore/syft/syft/encode_decode.go deleted file mode 100644 index b06fb801..00000000 --- a/vendor/github.com/anchore/syft/syft/encode_decode.go +++ /dev/null @@ -1,18 +0,0 @@ -package syft - -import ( - "io" - - "github.com/anchore/syft/syft/formats" - "github.com/anchore/syft/syft/sbom" -) - -// TODO: deprecated, moved to syft/formats/formats.go. will be removed in v1.0.0 -func Encode(s sbom.SBOM, f sbom.Format) ([]byte, error) { - return formats.Encode(s, f) -} - -// TODO: deprecated, moved to syft/formats/formats.go. will be removed in v1.0.0 -func Decode(reader io.Reader) (*sbom.SBOM, sbom.Format, error) { - return formats.Decode(reader) -} diff --git a/vendor/github.com/anchore/syft/syft/file/location.go b/vendor/github.com/anchore/syft/syft/file/location.go index 65af91c5..25bc753b 100644 --- a/vendor/github.com/anchore/syft/syft/file/location.go +++ b/vendor/github.com/anchore/syft/syft/file/location.go @@ -10,7 +10,7 @@ import ( ) // Location represents a path relative to a particular filesystem resolved to a specific file.Reference. This struct is used as a key -// in content fetching to uniquely identify a file relative to a request (the VirtualPath). +// in content fetching to uniquely identify a file relative to a request (the AccessPath). type Location struct { LocationData `cyclonedx:""` LocationMetadata `cyclonedx:""` @@ -20,8 +20,8 @@ type LocationData struct { Coordinates `cyclonedx:""` // Empty string here means there is no intermediate property name, e.g. syft:locations:0:path without "coordinates" // note: it is IMPORTANT to ignore anything but the coordinates for a Location when considering the ID (hash value) // since the coordinates are the minimally correct ID for a location (symlinks should not come into play) - VirtualPath string `hash:"ignore" json:"-"` // The path to the file which may or may not have hardlinks / symlinks - ref file.Reference `hash:"ignore"` // The file reference relative to the stereoscope.FileCatalog that has more information about this location. + AccessPath string `hash:"ignore" json:"accessPath"` // The path to the file which may or may not have hardlinks / symlinks + ref file.Reference `hash:"ignore"` // The file reference relative to the stereoscope.FileCatalog that has more information about this location. } func (l LocationData) Reference() file.Reference { @@ -68,6 +68,7 @@ func NewLocation(realPath string) Location { Coordinates: Coordinates{ RealPath: realPath, }, + AccessPath: realPath, }, LocationMetadata: LocationMetadata{ Annotations: map[string]string{}, @@ -76,13 +77,13 @@ func NewLocation(realPath string) Location { } // NewVirtualLocation creates a new location for a path accessed by a virtual path (a path with a symlink or hardlink somewhere in the path) -func NewVirtualLocation(realPath, virtualPath string) Location { +func NewVirtualLocation(realPath, accessPath string) Location { return Location{ LocationData: LocationData{ Coordinates: Coordinates{ RealPath: realPath, }, - VirtualPath: virtualPath, + AccessPath: accessPath, }, LocationMetadata: LocationMetadata{ Annotations: map[string]string{}, @@ -94,6 +95,7 @@ func NewLocationFromCoordinates(coordinates Coordinates) Location { return Location{ LocationData: LocationData{ Coordinates: coordinates, + AccessPath: coordinates.RealPath, }, LocationMetadata: LocationMetadata{ Annotations: map[string]string{}, @@ -101,11 +103,11 @@ func NewLocationFromCoordinates(coordinates Coordinates) Location { } // NewVirtualLocationFromCoordinates creates a new location for the given Coordinates via a virtual path. -func NewVirtualLocationFromCoordinates(coordinates Coordinates, virtualPath string) Location { +func NewVirtualLocationFromCoordinates(coordinates Coordinates, accessPath string) Location { return Location{ LocationData: LocationData{ Coordinates: coordinates, - VirtualPath: virtualPath, + AccessPath: accessPath, }, LocationMetadata: LocationMetadata{ Annotations: map[string]string{}, @@ -113,7 +115,7 @@ func NewVirtualLocationFromCoordinates(coordinates Coordinates, virtualPath stri } // NewLocationFromImage creates a new Location representing the given path (extracted from the Reference) relative to the given image. -func NewLocationFromImage(virtualPath string, ref file.Reference, img *image.Image) Location { +func NewLocationFromImage(accessPath string, ref file.Reference, img *image.Image) Location { layer := img.FileCatalog.Layer(ref) return Location{ LocationData: LocationData{ @@ -121,8 +123,8 @@ func NewLocationFromImage(virtualPath string, ref file.Reference, img *image.Ima RealPath: string(ref.RealPath), FileSystemID: layer.Metadata.Digest, }, - VirtualPath: virtualPath, - ref: ref, + AccessPath: accessPath, + ref: ref, }, LocationMetadata: LocationMetadata{ Annotations: map[string]string{}, @@ -137,7 +139,8 @@ func NewLocationFromDirectory(responsePath string, ref file.Reference) Location Coordinates: Coordinates{ RealPath: responsePath, }, - ref: ref, + AccessPath: responsePath, + ref: ref, }, LocationMetadata: LocationMetadata{ Annotations: map[string]string{}, @@ -146,17 +149,14 @@ func NewLocationFromDirectory(responsePath string, ref file.Reference) Location } // NewVirtualLocationFromDirectory creates a new Location representing the given path (extracted from the Reference) relative to the given directory with a separate virtual access path. -func NewVirtualLocationFromDirectory(responsePath, virtualResponsePath string, ref file.Reference) Location { - if responsePath == virtualResponsePath { - return NewLocationFromDirectory(responsePath, ref) - } +func NewVirtualLocationFromDirectory(responsePath, responseAccessPath string, ref file.Reference) Location { return Location{ LocationData: LocationData{ Coordinates: Coordinates{ RealPath: responsePath, }, - VirtualPath: virtualResponsePath, - ref: ref, + AccessPath: responseAccessPath, + ref: ref, }, LocationMetadata: LocationMetadata{ Annotations: map[string]string{}, @@ -164,9 +164,9 @@ func NewVirtualLocationFromDirectory(responsePath, virtualResponsePath string, r } } -func (l Location) AccessPath() string { - if l.VirtualPath != "" { - return l.VirtualPath +func (l Location) Path() string { + if l.AccessPath != "" { + return l.AccessPath } return l.RealPath } @@ -179,8 +179,8 @@ func (l Location) String() string { str += fmt.Sprintf("RealPath=%q", l.RealPath) - if l.VirtualPath != "" { - str += fmt.Sprintf(" VirtualPath=%q", l.VirtualPath) + if l.AccessPath != "" && l.AccessPath != l.RealPath { + str += fmt.Sprintf(" AccessPath=%q", l.AccessPath) } if l.FileSystemID != "" { @@ -191,6 +191,6 @@ func (l Location) String() string { func (l Location) Equals(other Location) bool { return l.RealPath == other.RealPath && - l.VirtualPath == other.VirtualPath && + l.AccessPath == other.AccessPath && l.FileSystemID == other.FileSystemID } diff --git a/vendor/github.com/anchore/syft/syft/file/locations.go b/vendor/github.com/anchore/syft/syft/file/locations.go index da298643..80a4503c 100644 --- a/vendor/github.com/anchore/syft/syft/file/locations.go +++ b/vendor/github.com/anchore/syft/syft/file/locations.go @@ -8,10 +8,10 @@ func (l Locations) Len() int { func (l Locations) Less(i, j int) bool { if l[i].RealPath == l[j].RealPath { - if l[i].VirtualPath == l[j].VirtualPath { + if l[i].AccessPath == l[j].AccessPath { return l[i].FileSystemID < l[j].FileSystemID } - return l[i].VirtualPath < l[j].VirtualPath + return l[i].AccessPath < l[j].AccessPath } return l[i].RealPath < l[j].RealPath } diff --git a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/author.go b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/author.go similarity index 82% rename from vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/author.go rename to vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/author.go index 31ebdde8..77826864 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/author.go +++ b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/author.go @@ -10,9 +10,9 @@ import ( func encodeAuthor(p pkg.Package) string { if hasMetadata(p) { switch metadata := p.Metadata.(type) { - case pkg.NpmPackageJSONMetadata: + case pkg.NpmPackage: return metadata.Author - case pkg.PythonPackageMetadata: + case pkg.PythonPackage: author := metadata.Author if metadata.AuthorEmail != "" { if author == "" { @@ -21,7 +21,7 @@ func encodeAuthor(p pkg.Package) string { author += fmt.Sprintf(" <%s>", metadata.AuthorEmail) } return author - case pkg.GemMetadata: + case pkg.RubyGemspec: if len(metadata.Authors) > 0 { return strings.Join(metadata.Authors, ",") } @@ -33,15 +33,15 @@ func encodeAuthor(p pkg.Package) string { func decodeAuthor(author string, metadata interface{}) { switch meta := metadata.(type) { - case *pkg.NpmPackageJSONMetadata: + case *pkg.NpmPackage: meta.Author = author - case *pkg.PythonPackageMetadata: + case *pkg.PythonPackage: parts := strings.SplitN(author, " <", 2) meta.Author = parts[0] if len(parts) > 1 { meta.AuthorEmail = strings.TrimSuffix(parts[1], ">") } - case *pkg.GemMetadata: + case *pkg.RubyGemspec: meta.Authors = strings.Split(author, ",") } } diff --git a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/component.go b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/component.go similarity index 82% rename from vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/component.go rename to vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/component.go index d7c79875..6cef878a 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/component.go +++ b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/component.go @@ -7,12 +7,22 @@ import ( "github.com/anchore/packageurl-go" "github.com/anchore/syft/syft/file" - "github.com/anchore/syft/syft/formats/common" + "github.com/anchore/syft/syft/format/common" + "github.com/anchore/syft/syft/internal/packagemetadata" "github.com/anchore/syft/syft/pkg" ) func encodeComponent(p pkg.Package) cyclonedx.Component { props := encodeProperties(p, "syft:package") + + if p.Metadata != nil { + // encode the metadataType as a property, something that doesn't exist on the core model + props = append(props, cyclonedx.Property{ + Name: "syft:package:metadataType", + Value: packagemetadata.JSONName(p.Metadata), + }) + } + props = append(props, encodeCPEs(p)...) locations := p.Locations.ToSlice() if len(locations) > 0 { @@ -85,9 +95,9 @@ func decodeComponent(c *cyclonedx.Component) *pkg.Package { common.DecodeInto(p, values, "syft:package", CycloneDXFields) - p.MetadataType = pkg.CleanMetadataType(p.MetadataType) + metadataType := values["syft:package:metadataType"] - p.Metadata = decodePackageMetadata(values, c, p.MetadataType) + p.Metadata = decodePackageMetadata(values, c, metadataType) if p.Type == "" { p.Type = pkg.TypeFromPURL(p.PURL) @@ -109,13 +119,13 @@ func decodeLocations(vals map[string]string) file.LocationSet { return file.NewLocationSet(out...) } -func decodePackageMetadata(vals map[string]string, c *cyclonedx.Component, typ pkg.MetadataType) interface{} { - if typ != "" && c.Properties != nil { - metaTyp, ok := pkg.MetadataTypeByName[typ] - if !ok { +func decodePackageMetadata(vals map[string]string, c *cyclonedx.Component, typeName string) interface{} { + if typeName != "" && c.Properties != nil { + metadataType := packagemetadata.ReflectTypeFromJSONName(typeName) + if metadataType == nil { return nil } - metaPtrTyp := reflect.PtrTo(metaTyp) + metaPtrTyp := reflect.PtrTo(metadataType) metaPtr := common.Decode(metaPtrTyp, vals, "syft:metadata", CycloneDXFields) // Map all explicit metadata properties diff --git a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/cpe.go b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/cpe.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/cpe.go rename to vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/cpe.go diff --git a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/decoder.go b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/decoder.go similarity index 84% rename from vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/decoder.go rename to vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/decoder.go index 3400cf9e..37de22a9 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/decoder.go +++ b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/decoder.go @@ -2,56 +2,18 @@ package cyclonedxhelpers import ( "fmt" - "io" - "strings" "github.com/CycloneDX/cyclonedx-go" "github.com/anchore/packageurl-go" "github.com/anchore/syft/syft/artifact" - "github.com/anchore/syft/syft/formats/common" + "github.com/anchore/syft/syft/format/common" "github.com/anchore/syft/syft/linux" "github.com/anchore/syft/syft/pkg" "github.com/anchore/syft/syft/sbom" "github.com/anchore/syft/syft/source" ) -const cycloneDXXmlSchema = "http://cyclonedx.org/schema/bom" - -func GetValidator(format cyclonedx.BOMFileFormat) sbom.Validator { - return func(reader io.Reader) error { - bom := &cyclonedx.BOM{} - err := cyclonedx.NewBOMDecoder(reader, format).Decode(bom) - if err != nil { - return err - } - - xmlWithoutNS := format == cyclonedx.BOMFileFormatXML && !strings.Contains(bom.XMLNS, cycloneDXXmlSchema) - xmlWithoutComponents := format == cyclonedx.BOMFileFormatXML && bom.Components == nil - if (cyclonedx.BOM{} == *bom || xmlWithoutComponents || xmlWithoutNS) { - return fmt.Errorf("not a valid CycloneDX document") - } - return nil - } -} - -func GetDecoder(format cyclonedx.BOMFileFormat) sbom.Decoder { - return func(reader io.Reader) (*sbom.SBOM, error) { - bom := &cyclonedx.BOM{ - Components: &[]cyclonedx.Component{}, - } - err := cyclonedx.NewBOMDecoder(reader, format).Decode(bom) - if err != nil { - return nil, err - } - s, err := ToSyftModel(bom) - if err != nil { - return nil, err - } - return s, nil - } -} - func ToSyftModel(bom *cyclonedx.BOM) (*sbom.SBOM, error) { if bom == nil { return nil, fmt.Errorf("no content defined in CycloneDX BOM") @@ -251,6 +213,12 @@ func extractComponents(meta *cyclonedx.Metadata) source.Description { switch c.Type { case cyclonedx.ComponentTypeContainer: + var labels map[string]string + + if meta.Properties != nil { + labels = decodeProperties(*meta.Properties, "syft:image:labels:") + } + return source.Description{ ID: "", // TODO: can we decode alias name-version somehow? (it isn't be encoded in the first place yet) @@ -259,6 +227,7 @@ func extractComponents(meta *cyclonedx.Metadata) source.Description { UserInput: c.Name, ID: c.BOMRef, ManifestDigest: c.Version, + Labels: labels, }, } case cyclonedx.ComponentTypeFile: diff --git a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/description.go b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/description.go similarity index 78% rename from vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/description.go rename to vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/description.go index 36add799..daca390c 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/description.go +++ b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/description.go @@ -5,9 +5,9 @@ import "github.com/anchore/syft/syft/pkg" func encodeDescription(p pkg.Package) string { if hasMetadata(p) { switch metadata := p.Metadata.(type) { - case pkg.ApkMetadata: + case pkg.ApkDBEntry: return metadata.Description - case pkg.NpmPackageJSONMetadata: + case pkg.NpmPackage: return metadata.Description } } @@ -16,9 +16,9 @@ func encodeDescription(p pkg.Package) string { func decodeDescription(description string, metadata interface{}) { switch meta := metadata.(type) { - case *pkg.ApkMetadata: + case *pkg.ApkDBEntry: meta.Description = description - case *pkg.NpmPackageJSONMetadata: + case *pkg.NpmPackage: meta.Description = description } } diff --git a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/external_references.go b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/external_references.go similarity index 93% rename from vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/external_references.go rename to vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/external_references.go index 4ba99587..fcee61b6 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/external_references.go +++ b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/external_references.go @@ -19,21 +19,21 @@ func encodeExternalReferences(p pkg.Package) *[]cyclonedx.ExternalReference { // Skip adding extracted URL and Homepage metadata // as "external_reference" if the metadata isn't IRI-compliant switch metadata := p.Metadata.(type) { - case pkg.ApkMetadata: + case pkg.ApkDBEntry: if metadata.URL != "" && isValidExternalRef(metadata.URL) { refs = append(refs, cyclonedx.ExternalReference{ URL: metadata.URL, Type: cyclonedx.ERTypeDistribution, }) } - case pkg.CargoPackageMetadata: + case pkg.RustCargoLockEntry: if metadata.Source != "" { refs = append(refs, cyclonedx.ExternalReference{ URL: metadata.Source, Type: cyclonedx.ERTypeDistribution, }) } - case pkg.NpmPackageJSONMetadata: + case pkg.NpmPackage: if metadata.URL != "" && isValidExternalRef(metadata.URL) { refs = append(refs, cyclonedx.ExternalReference{ URL: metadata.URL, @@ -46,14 +46,14 @@ func encodeExternalReferences(p pkg.Package) *[]cyclonedx.ExternalReference { Type: cyclonedx.ERTypeWebsite, }) } - case pkg.GemMetadata: + case pkg.RubyGemspec: if metadata.Homepage != "" && isValidExternalRef(metadata.Homepage) { refs = append(refs, cyclonedx.ExternalReference{ URL: metadata.Homepage, Type: cyclonedx.ERTypeWebsite, }) } - case pkg.JavaMetadata: + case pkg.JavaArchive: if len(metadata.ArchiveDigests) > 0 { for _, digest := range metadata.ArchiveDigests { refs = append(refs, cyclonedx.ExternalReference{ @@ -66,7 +66,7 @@ func encodeExternalReferences(p pkg.Package) *[]cyclonedx.ExternalReference { }) } } - case pkg.PythonPackageMetadata: + case pkg.PythonPackage: if metadata.DirectURLOrigin != nil && metadata.DirectURLOrigin.URL != "" { ref := cyclonedx.ExternalReference{ URL: metadata.DirectURLOrigin.URL, @@ -105,16 +105,16 @@ func decodeExternalReferences(c *cyclonedx.Component, metadata interface{}) { return } switch meta := metadata.(type) { - case *pkg.ApkMetadata: + case *pkg.ApkDBEntry: meta.URL = refURL(c, cyclonedx.ERTypeDistribution) - case *pkg.CargoPackageMetadata: + case *pkg.RustCargoLockEntry: meta.Source = refURL(c, cyclonedx.ERTypeDistribution) - case *pkg.NpmPackageJSONMetadata: + case *pkg.NpmPackage: meta.URL = refURL(c, cyclonedx.ERTypeDistribution) meta.Homepage = refURL(c, cyclonedx.ERTypeWebsite) - case *pkg.GemMetadata: + case *pkg.RubyGemspec: meta.Homepage = refURL(c, cyclonedx.ERTypeWebsite) - case *pkg.JavaMetadata: + case *pkg.JavaArchive: var digests []syftFile.Digest if ref := findExternalRef(c, cyclonedx.ERTypeBuildMeta); ref != nil { if ref.Hashes != nil { @@ -128,7 +128,7 @@ func decodeExternalReferences(c *cyclonedx.Component, metadata interface{}) { } meta.ArchiveDigests = digests - case *pkg.PythonPackageMetadata: + case *pkg.PythonPackage: if meta.DirectURLOrigin == nil { meta.DirectURLOrigin = &pkg.PythonDirectURLOriginInfo{} } diff --git a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/format.go b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/format.go similarity index 95% rename from vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/format.go rename to vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/format.go index f99e826d..b5d96487 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/format.go +++ b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/format.go @@ -121,7 +121,8 @@ func toBomDescriptor(name, version string, srcMetadata source.Description) *cycl Version: version, }, }, - Component: toBomDescriptorComponent(srcMetadata), + Properties: toBomProperties(srcMetadata), + Component: toBomDescriptorComponent(srcMetadata), } } @@ -190,6 +191,15 @@ func toDependencies(relationships []artifact.Relationship) []cyclonedx.Dependenc return result } +func toBomProperties(srcMetadata source.Description) *[]cyclonedx.Property { + metadata, ok := srcMetadata.Metadata.(source.StereoscopeImageSourceMetadata) + if ok { + props := encodeProperties(metadata.Labels, "syft:image:labels") + return &props + } + return nil +} + func toBomDescriptorComponent(srcMetadata source.Description) *cyclonedx.Component { name := srcMetadata.Name version := srcMetadata.Version diff --git a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/group.go b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/group.go similarity index 63% rename from vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/group.go rename to vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/group.go index 6e452aee..b3dd1cdd 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/group.go +++ b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/group.go @@ -4,7 +4,7 @@ import "github.com/anchore/syft/syft/pkg" func encodeGroup(p pkg.Package) string { if hasMetadata(p) { - if metadata, ok := p.Metadata.(pkg.JavaMetadata); ok && metadata.PomProperties != nil { + if metadata, ok := p.Metadata.(pkg.JavaArchive); ok && metadata.PomProperties != nil { return metadata.PomProperties.GroupID } } @@ -12,9 +12,9 @@ func encodeGroup(p pkg.Package) string { } func decodeGroup(group string, metadata interface{}) { - if meta, ok := metadata.(*pkg.JavaMetadata); ok { + if meta, ok := metadata.(*pkg.JavaArchive); ok { if meta.PomProperties == nil { - meta.PomProperties = &pkg.PomProperties{} + meta.PomProperties = &pkg.JavaPomProperties{} } meta.PomProperties.GroupID = group } diff --git a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/licenses.go b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/licenses.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/licenses.go rename to vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/licenses.go diff --git a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/properties.go b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/properties.go similarity index 50% rename from vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/properties.go rename to vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/properties.go index ba630dae..b89d6d1b 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/properties.go +++ b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/properties.go @@ -1,9 +1,11 @@ package cyclonedxhelpers import ( + "strings" + "github.com/CycloneDX/cyclonedx-go" - "github.com/anchore/syft/syft/formats/common" + "github.com/anchore/syft/syft/format/common" ) var ( @@ -19,3 +21,14 @@ func encodeProperties(obj interface{}, prefix string) (out []cyclonedx.Property) } return } + +func decodeProperties(properties []cyclonedx.Property, prefix string) map[string]string { + labels := make(map[string]string) + for _, property := range properties { + if strings.HasPrefix(property.Name, prefix) { + labelName := strings.TrimPrefix(property.Name, prefix) + labels[labelName] = property.Value + } + } + return labels +} diff --git a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/publisher.go b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/publisher.go similarity index 76% rename from vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/publisher.go rename to vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/publisher.go index 9b0c469b..96012dc9 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/publisher.go +++ b/vendor/github.com/anchore/syft/syft/format/common/cyclonedxhelpers/publisher.go @@ -7,11 +7,11 @@ import ( func encodePublisher(p pkg.Package) string { if hasMetadata(p) { switch metadata := p.Metadata.(type) { - case pkg.ApkMetadata: + case pkg.ApkDBEntry: return metadata.Maintainer - case pkg.RpmMetadata: + case pkg.RpmDBEntry: return metadata.Vendor - case pkg.DpkgMetadata: + case pkg.DpkgDBEntry: return metadata.Maintainer } } @@ -20,11 +20,11 @@ func encodePublisher(p pkg.Package) string { func decodePublisher(publisher string, metadata interface{}) { switch meta := metadata.(type) { - case *pkg.ApkMetadata: + case *pkg.ApkDBEntry: meta.Maintainer = publisher - case *pkg.RpmMetadata: + case *pkg.RpmDBEntry: meta.Vendor = publisher - case *pkg.DpkgMetadata: + case *pkg.DpkgDBEntry: meta.Maintainer = publisher } } diff --git a/vendor/github.com/anchore/syft/syft/formats/common/property_encoder.go b/vendor/github.com/anchore/syft/syft/format/common/property_encoder.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/common/property_encoder.go rename to vendor/github.com/anchore/syft/syft/format/common/property_encoder.go diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/description.go b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/description.go similarity index 84% rename from vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/description.go rename to vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/description.go index 8bad4797..088d7151 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/description.go +++ b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/description.go @@ -5,9 +5,9 @@ import "github.com/anchore/syft/syft/pkg" func Description(p pkg.Package) string { if hasMetadata(p) { switch metadata := p.Metadata.(type) { - case pkg.ApkMetadata: + case pkg.ApkDBEntry: return metadata.Description - case pkg.NpmPackageJSONMetadata: + case pkg.NpmPackage: return metadata.Description } } diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/document_name.go b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/document_name.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/document_name.go rename to vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/document_name.go diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/document_namespace.go b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/document_namespace.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/document_namespace.go rename to vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/document_namespace.go diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/download_location.go b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/download_location.go similarity index 89% rename from vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/download_location.go rename to vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/download_location.go index 3ce24475..ea1ebab7 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/download_location.go +++ b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/download_location.go @@ -16,11 +16,11 @@ func DownloadLocation(p pkg.Package) string { if hasMetadata(p) { switch metadata := p.Metadata.(type) { - case pkg.ApkMetadata: + case pkg.ApkDBEntry: return NoneIfEmpty(metadata.URL) - case pkg.NpmPackageJSONMetadata: + case pkg.NpmPackage: return NoneIfEmpty(metadata.URL) - case pkg.NpmPackageLockJSONMetadata: + case pkg.NpmPackageLockEntry: return NoneIfEmpty(metadata.Resolved) } } diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/external_ref.go b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/external_ref.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/external_ref.go rename to vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/external_ref.go diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/external_refs.go b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/external_refs.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/external_refs.go rename to vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/external_refs.go diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/file_type.go b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/file_type.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/file_type.go rename to vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/file_type.go diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/homepage.go b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/homepage.go similarity index 80% rename from vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/homepage.go rename to vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/homepage.go index b790ba61..4974a9e3 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/homepage.go +++ b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/homepage.go @@ -5,9 +5,9 @@ import "github.com/anchore/syft/syft/pkg" func Homepage(p pkg.Package) string { if hasMetadata(p) { switch metadata := p.Metadata.(type) { - case pkg.GemMetadata: + case pkg.RubyGemspec: return metadata.Homepage - case pkg.NpmPackageJSONMetadata: + case pkg.NpmPackage: return metadata.Homepage } } diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/license.go b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/license.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/license.go rename to vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/license.go diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/none_if_empty.go b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/none_if_empty.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/none_if_empty.go rename to vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/none_if_empty.go diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/origintor.go b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/origintor.go similarity index 85% rename from vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/origintor.go rename to vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/origintor.go index 7a6c14bf..810c6a59 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/origintor.go +++ b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/origintor.go @@ -15,25 +15,25 @@ func Originator(p pkg.Package) (string, string) { author := "" if hasMetadata(p) { switch metadata := p.Metadata.(type) { - case pkg.ApkMetadata: + case pkg.ApkDBEntry: author = metadata.Maintainer - case pkg.NpmPackageJSONMetadata: + case pkg.NpmPackage: author = metadata.Author - case pkg.PythonPackageMetadata: + case pkg.PythonPackage: author = metadata.Author if author == "" { author = metadata.AuthorEmail } else if metadata.AuthorEmail != "" { author = fmt.Sprintf("%s (%s)", author, metadata.AuthorEmail) } - case pkg.GemMetadata: + case pkg.RubyGemspec: if len(metadata.Authors) > 0 { author = metadata.Authors[0] } - case pkg.RpmMetadata: + case pkg.RpmDBEntry: typ = "Organization" author = metadata.Vendor - case pkg.DpkgMetadata: + case pkg.DpkgDBEntry: author = metadata.Maintainer } if typ == "" && author != "" { diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/relationship_type.go b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/relationship_type.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/relationship_type.go rename to vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/relationship_type.go diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/source_info.go b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/source_info.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/source_info.go rename to vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/source_info.go diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/spdxid.go b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/spdxid.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/spdxid.go rename to vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/spdxid.go diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/to_format_model.go b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/to_format_model.go similarity index 99% rename from vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/to_format_model.go rename to vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/to_format_model.go index 62f80964..adc28f64 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/to_format_model.go +++ b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/to_format_model.go @@ -19,7 +19,7 @@ import ( "github.com/anchore/syft/internal/spdxlicense" "github.com/anchore/syft/syft/artifact" "github.com/anchore/syft/syft/file" - "github.com/anchore/syft/syft/formats/common/util" + "github.com/anchore/syft/syft/format/common/util" "github.com/anchore/syft/syft/pkg" "github.com/anchore/syft/syft/sbom" "github.com/anchore/syft/syft/source" @@ -477,7 +477,7 @@ func toPackageChecksums(p pkg.Package) ([]spdx.Checksum, bool) { switch meta := p.Metadata.(type) { // we generate digest for some Java packages // spdx.github.io/spdx-spec/package-information/#710-package-checksum-field - case pkg.JavaMetadata: + case pkg.JavaArchive: // if syft has generated the digest here then filesAnalyzed is true if len(meta.ArchiveDigests) > 0 { filesAnalyzed = true @@ -489,7 +489,7 @@ func toPackageChecksums(p pkg.Package) ([]spdx.Checksum, bool) { }) } } - case pkg.GolangBinMetadata: + case pkg.GolangBinaryBuildinfoEntry: // because the H1 digest is found in the Golang metadata we cannot claim that the files were analyzed algo, hexStr, err := util.HDigestToSHA(meta.H1Digest) if err != nil { diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/to_syft_model.go b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/to_syft_model.go similarity index 95% rename from vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/to_syft_model.go rename to vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/to_syft_model.go index f61f723c..3fb47740 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/to_syft_model.go +++ b/vendor/github.com/anchore/syft/syft/format/common/spdxhelpers/to_syft_model.go @@ -18,7 +18,7 @@ import ( "github.com/anchore/syft/syft/artifact" "github.com/anchore/syft/syft/cpe" "github.com/anchore/syft/syft/file" - "github.com/anchore/syft/syft/formats/common/util" + "github.com/anchore/syft/syft/format/common/util" "github.com/anchore/syft/syft/license" "github.com/anchore/syft/syft/linux" "github.com/anchore/syft/syft/pkg" @@ -490,17 +490,15 @@ func extractPkgInfo(p *spdx.Package) pkgInfo { func toSyftPackage(p *spdx.Package) pkg.Package { info := extractPkgInfo(p) - metadataType, metadata := extractMetadata(p, info) sP := &pkg.Package{ - Type: info.typ, - Name: p.PackageName, - Version: p.PackageVersion, - Licenses: pkg.NewLicenseSet(parseSPDXLicenses(p)...), - CPEs: extractCPEs(p), - PURL: purlValue(info.purl), - Language: info.lang, - MetadataType: metadataType, - Metadata: metadata, + Type: info.typ, + Name: p.PackageName, + Version: p.PackageVersion, + Licenses: pkg.NewLicenseSet(parseSPDXLicenses(p)...), + CPEs: extractCPEs(p), + PURL: purlValue(info.purl), + Language: info.lang, + Metadata: extractMetadata(p, info), } sP.SetID() @@ -541,7 +539,7 @@ func cleanSPDXID(id string) string { } //nolint:funlen -func extractMetadata(p *spdx.Package, info pkgInfo) (pkg.MetadataType, interface{}) { +func extractMetadata(p *spdx.Package, info pkgInfo) any { arch := info.qualifierValue(pkg.PURLQualifierArch) upstreamValue := info.qualifierValue(pkg.PURLQualifierUpstream) upstream := strings.SplitN(upstreamValue, "@", 2) @@ -560,7 +558,7 @@ func extractMetadata(p *spdx.Package, info pkgInfo) (pkg.MetadataType, interface } switch info.typ { case pkg.ApkPkg: - return pkg.ApkMetadataType, pkg.ApkMetadata{ + return pkg.ApkDBEntry{ Package: p.PackageName, OriginPackage: upstreamName, Maintainer: supplier, @@ -577,7 +575,7 @@ func extractMetadata(p *spdx.Package, info pkgInfo) (pkg.MetadataType, interface } else { epoch = &converted } - return pkg.RpmMetadataType, pkg.RpmMetadata{ + return pkg.RpmDBEntry{ Name: p.PackageName, Version: p.PackageVersion, Epoch: epoch, @@ -586,7 +584,7 @@ func extractMetadata(p *spdx.Package, info pkgInfo) (pkg.MetadataType, interface Vendor: originator, } case pkg.DebPkg: - return pkg.DpkgMetadataType, pkg.DpkgMetadata{ + return pkg.DpkgDBEntry{ Package: p.PackageName, Source: upstreamName, Version: p.PackageVersion, @@ -599,7 +597,7 @@ func extractMetadata(p *spdx.Package, info pkgInfo) (pkg.MetadataType, interface for _, value := range p.PackageChecksums { digests = append(digests, file.Digest{Algorithm: fromChecksumAlgorithm(value.Algorithm), Value: value.Value}) } - return pkg.JavaMetadataType, pkg.JavaMetadata{ + return pkg.JavaArchive{ ArchiveDigests: digests, } case pkg.GoModulePkg: @@ -613,11 +611,11 @@ func extractMetadata(p *spdx.Package, info pkgInfo) (pkg.MetadataType, interface h1Digest = digest break } - return pkg.GolangBinMetadataType, pkg.GolangBinMetadata{ + return pkg.GolangBinaryBuildinfoEntry{ H1Digest: h1Digest, } } - return pkg.UnknownMetadataType, nil + return nil } func findPURLValue(p *spdx.Package) string { diff --git a/vendor/github.com/anchore/syft/syft/formats/common/util/h_digest.go b/vendor/github.com/anchore/syft/syft/format/common/util/h_digest.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/common/util/h_digest.go rename to vendor/github.com/anchore/syft/syft/format/common/util/h_digest.go diff --git a/vendor/github.com/anchore/syft/syft/format/cyclonedxjson/decoder.go b/vendor/github.com/anchore/syft/syft/format/cyclonedxjson/decoder.go new file mode 100644 index 00000000..5fba5b9a --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/format/cyclonedxjson/decoder.go @@ -0,0 +1,125 @@ +package cyclonedxjson + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/CycloneDX/cyclonedx-go" + + "github.com/anchore/syft/internal/log" + "github.com/anchore/syft/syft/format/common/cyclonedxhelpers" + "github.com/anchore/syft/syft/format/internal/cyclonedxutil" + "github.com/anchore/syft/syft/sbom" +) + +var _ sbom.FormatDecoder = (*decoder)(nil) + +type decoder struct { + decoder cyclonedxutil.Decoder +} + +func NewFormatDecoder() sbom.FormatDecoder { + return decoder{ + decoder: cyclonedxutil.NewDecoder(cyclonedx.BOMFileFormatJSON), + } +} + +func (d decoder) Decode(reader io.ReadSeeker) (*sbom.SBOM, sbom.FormatID, string, error) { + if reader == nil { + return nil, "", "", fmt.Errorf("no SBOM bytes provided") + } + id, version := d.Identify(reader) + if id != ID { + return nil, "", "", fmt.Errorf("not a cyclonedx json document") + } + if version == "" { + return nil, "", "", fmt.Errorf("unsupported cyclonedx json document version") + } + + doc, err := d.decoder.Decode(reader) + if err != nil { + return nil, id, version, fmt.Errorf("unable to decode cyclonedx json document: %w", err) + } + + s, err := cyclonedxhelpers.ToSyftModel(doc) + if err != nil { + return nil, id, version, err + } + + return s, id, version, nil +} + +func (d decoder) Identify(reader io.ReadSeeker) (sbom.FormatID, string) { + if reader == nil { + return "", "" + } + if _, err := reader.Seek(0, io.SeekStart); err != nil { + log.Debugf("unable to seek to start of CycloneDX JSON SBOM: %+v", err) + return "", "" + } + + type Document struct { + JSONSchema string `json:"$schema"` + BOMFormat string `json:"bomFormat"` + SpecVersion string `json:"specVersion"` + } + + dec := json.NewDecoder(reader) + + var doc Document + err := dec.Decode(&doc) + if err != nil { + // maybe not json? maybe not valid? doesn't matter, we won't process it. + return "", "" + } + + id, version := getFormatInfo(doc.BOMFormat, doc.SpecVersion) + if version == "" || id != ID { + // not a cyclonedx json document that we support + return "", "" + } + + return id, version +} + +func getFormatInfo(bomFormat string, specVersion any) (sbom.FormatID, string) { + if bomFormat != "CycloneDX" { + // not a cyclonedx json document + return "", "" + } + + // by this point, it looks to be cyclonedx json, but we need to know the version + + var ( + version string + spec cyclonedx.SpecVersion + err error + ) + switch s := specVersion.(type) { + case string: + version = s + spec, err = cyclonedxutil.SpecVersionFromString(version) + if err != nil { + // not a supported version, but is cyclonedx json + return ID, "" + } + case cyclonedx.SpecVersion: + spec = s + version = cyclonedxutil.VersionFromSpecVersion(spec) + if version == "" { + // not a supported version, but is cyclonedx json + return ID, "" + } + default: + // bad input provided for version info + return ID, "" + } + + if spec < 0 { + // not a supported version, but is cyclonedx json + return ID, "" + } + + return ID, version +} diff --git a/vendor/github.com/anchore/syft/syft/format/cyclonedxjson/encoder.go b/vendor/github.com/anchore/syft/syft/format/cyclonedxjson/encoder.go new file mode 100644 index 00000000..259dc9eb --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/format/cyclonedxjson/encoder.go @@ -0,0 +1,52 @@ +package cyclonedxjson + +import ( + "github.com/CycloneDX/cyclonedx-go" + + "github.com/anchore/syft/syft/format/internal/cyclonedxutil" + "github.com/anchore/syft/syft/sbom" +) + +const ID = cyclonedxutil.JSONFormatID + +func SupportedVersions() []string { + return cyclonedxutil.SupportedVersions(ID) +} + +type EncoderConfig struct { + Version string +} + +type encoder struct { + cfg EncoderConfig + cyclonedxutil.Encoder +} + +func NewFormatEncoderWithConfig(cfg EncoderConfig) (sbom.FormatEncoder, error) { + enc, err := cyclonedxutil.NewEncoder(cfg.Version, cyclonedx.BOMFileFormatJSON) + if err != nil { + return nil, err + } + return encoder{ + cfg: cfg, + Encoder: enc, + }, nil +} + +func DefaultEncoderConfig() EncoderConfig { + return EncoderConfig{ + Version: cyclonedxutil.DefaultVersion, + } +} + +func (e encoder) ID() sbom.FormatID { + return ID +} + +func (e encoder) Aliases() []string { + return []string{} +} + +func (e encoder) Version() string { + return e.cfg.Version +} diff --git a/vendor/github.com/anchore/syft/syft/format/cyclonedxxml/decoder.go b/vendor/github.com/anchore/syft/syft/format/cyclonedxxml/decoder.go new file mode 100644 index 00000000..4e9df4a6 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/format/cyclonedxxml/decoder.go @@ -0,0 +1,106 @@ +package cyclonedxxml + +import ( + "encoding/xml" + "fmt" + "io" + "strings" + + "github.com/CycloneDX/cyclonedx-go" + + "github.com/anchore/syft/internal/log" + "github.com/anchore/syft/syft/format/common/cyclonedxhelpers" + "github.com/anchore/syft/syft/format/internal/cyclonedxutil" + "github.com/anchore/syft/syft/sbom" +) + +var _ sbom.FormatDecoder = (*decoder)(nil) + +type decoder struct { + decoder cyclonedxutil.Decoder +} + +func NewFormatDecoder() sbom.FormatDecoder { + return decoder{ + decoder: cyclonedxutil.NewDecoder(cyclonedx.BOMFileFormatXML), + } +} + +func (d decoder) Decode(reader io.ReadSeeker) (*sbom.SBOM, sbom.FormatID, string, error) { + if reader == nil { + return nil, "", "", fmt.Errorf("no SBOM bytes provided") + } + + id, version := d.Identify(reader) + if id != ID { + return nil, "", "", fmt.Errorf("not a cyclonedx xml document") + } + if version == "" { + return nil, "", "", fmt.Errorf("unsupported cyclonedx xml document version") + } + + doc, err := d.decoder.Decode(reader) + if err != nil { + return nil, id, version, fmt.Errorf("unable to decode cyclonedx xml document: %w", err) + } + + s, err := cyclonedxhelpers.ToSyftModel(doc) + if err != nil { + return nil, id, version, err + } + + return s, id, version, nil +} + +func (d decoder) Identify(reader io.ReadSeeker) (sbom.FormatID, string) { + if reader == nil { + return "", "" + } + + if _, err := reader.Seek(0, io.SeekStart); err != nil { + log.Debugf("unable to seek to start of CycloneDX XML SBOM: %+v", err) + return "", "" + } + + type Document struct { + XMLNS string `xml:"xmlns,attr"` + } + + dec := xml.NewDecoder(reader) + + var doc Document + err := dec.Decode(&doc) + if err != nil { + // maybe not xml? maybe not valid? doesn't matter, we won't process it. + return "", "" + } + + id, version := getFormatInfo(doc.XMLNS) + if version == "" || id != ID { + // not a cyclonedx xml document that we support + return "", "" + } + + return id, version +} + +func getFormatInfo(xmlns string) (sbom.FormatID, string) { + version := getVersionFromXMLNS(xmlns) + + if !strings.Contains(xmlns, "cyclonedx.org/schema/bom") { + // not a cyclonedx xml document + return "", "" + } + + spec, err := cyclonedxutil.SpecVersionFromString(version) + if spec < 0 || err != nil { + // not a supported version, but is cyclonedx xml + return ID, "" + } + return ID, version +} + +func getVersionFromXMLNS(xmlns string) string { + fields := strings.Split(xmlns, "/") + return fields[len(fields)-1] +} diff --git a/vendor/github.com/anchore/syft/syft/format/cyclonedxxml/encoder.go b/vendor/github.com/anchore/syft/syft/format/cyclonedxxml/encoder.go new file mode 100644 index 00000000..a27c83a0 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/format/cyclonedxxml/encoder.go @@ -0,0 +1,58 @@ +package cyclonedxxml + +import ( + "github.com/CycloneDX/cyclonedx-go" + + "github.com/anchore/syft/syft/format/internal/cyclonedxutil" + "github.com/anchore/syft/syft/sbom" +) + +var _ sbom.FormatEncoder = (*encoder)(nil) + +const ID = cyclonedxutil.XMLFormatID + +func SupportedVersions() []string { + return cyclonedxutil.SupportedVersions(ID) +} + +type EncoderConfig struct { + Version string +} + +type encoder struct { + cfg EncoderConfig + cyclonedxutil.Encoder +} + +func NewFormatEncoderWithConfig(cfg EncoderConfig) (sbom.FormatEncoder, error) { + enc, err := cyclonedxutil.NewEncoder(cfg.Version, cyclonedx.BOMFileFormatXML) + if err != nil { + return nil, err + } + return encoder{ + cfg: cfg, + Encoder: enc, + }, nil +} + +func DefaultEncoderConfig() EncoderConfig { + return EncoderConfig{ + Version: cyclonedxutil.DefaultVersion, + } +} + +func (e encoder) ID() sbom.FormatID { + return ID +} + +func (e encoder) Aliases() []string { + return []string{ + "cyclonedx", + "cyclone", + "cdx", + } +} + +func (e encoder) Version() string { + return e.cfg.Version +} diff --git a/vendor/github.com/anchore/syft/syft/format/decoders.go b/vendor/github.com/anchore/syft/syft/format/decoders.go new file mode 100644 index 00000000..f7e0d6a2 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/format/decoders.go @@ -0,0 +1,91 @@ +package format + +import ( + "fmt" + "io" + + "github.com/anchore/syft/syft/format/cyclonedxjson" + "github.com/anchore/syft/syft/format/cyclonedxxml" + "github.com/anchore/syft/syft/format/spdxjson" + "github.com/anchore/syft/syft/format/spdxtagvalue" + "github.com/anchore/syft/syft/format/syftjson" + "github.com/anchore/syft/syft/sbom" +) + +var ( + staticDecoders sbom.FormatDecoder + _ sbom.FormatDecoder = (*DecoderCollection)(nil) +) + +func init() { + staticDecoders = NewDecoderCollection(Decoders()...) +} + +func Decoders() []sbom.FormatDecoder { + return []sbom.FormatDecoder{ + syftjson.NewFormatDecoder(), + cyclonedxxml.NewFormatDecoder(), + cyclonedxjson.NewFormatDecoder(), + spdxtagvalue.NewFormatDecoder(), + spdxjson.NewFormatDecoder(), + } +} + +type DecoderCollection struct { + decoders []sbom.FormatDecoder +} + +func NewDecoderCollection(decoders ...sbom.FormatDecoder) sbom.FormatDecoder { + return &DecoderCollection{ + decoders: decoders, + } +} + +// Decode takes a set of bytes and attempts to decode it into an SBOM relative to the decoders in the collection. +func (c *DecoderCollection) Decode(reader io.ReadSeeker) (*sbom.SBOM, sbom.FormatID, string, error) { + if reader == nil { + return nil, "", "", fmt.Errorf("no SBOM bytes provided") + } + var bestID sbom.FormatID + for _, d := range c.decoders { + id, version := d.Identify(reader) + if id == "" || version == "" { + if id != "" { + bestID = id + } + continue + } + + return d.Decode(reader) + } + + if bestID != "" { + return nil, bestID, "", fmt.Errorf("sbom format found to be %q but the version is not supported", bestID) + } + + return nil, "", "", fmt.Errorf("sbom format not recognized") +} + +// Identify takes a set of bytes and attempts to identify the format of the SBOM relative to the decoders in the collection. +func (c *DecoderCollection) Identify(reader io.ReadSeeker) (sbom.FormatID, string) { + if reader == nil { + return "", "" + } + for _, d := range c.decoders { + id, version := d.Identify(reader) + if id != "" && version != "" { + return id, version + } + } + return "", "" +} + +// Identify takes a set of bytes and attempts to identify the format of the SBOM. +func Identify(reader io.ReadSeeker) (sbom.FormatID, string) { + return staticDecoders.Identify(reader) +} + +// Decode takes a set of bytes and attempts to decode it into an SBOM. +func Decode(reader io.ReadSeeker) (*sbom.SBOM, sbom.FormatID, string, error) { + return staticDecoders.Decode(reader) +} diff --git a/vendor/github.com/anchore/syft/syft/format/encoders.go b/vendor/github.com/anchore/syft/syft/format/encoders.go new file mode 100644 index 00000000..ffe7c59c --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/format/encoders.go @@ -0,0 +1,143 @@ +package format + +import ( + "bytes" + "fmt" + "regexp" + "sort" + "strings" + + "github.com/scylladb/go-set/strset" + + "github.com/anchore/syft/internal/log" + "github.com/anchore/syft/syft/sbom" +) + +type EncoderCollection struct { + encoders []sbom.FormatEncoder +} + +func NewEncoderCollection(encoders ...sbom.FormatEncoder) *EncoderCollection { + return &EncoderCollection{ + encoders: encoders, + } +} + +// IDs returns all format IDs represented in the collection. +func (e EncoderCollection) IDs() []sbom.FormatID { + idSet := strset.New() + for _, f := range e.encoders { + idSet.Add(string(f.ID())) + } + + idList := idSet.List() + sort.Strings(idList) + + var ids []sbom.FormatID + for _, id := range idList { + ids = append(ids, sbom.FormatID(id)) + } + + return ids +} + +// NameVersions returns all formats that are supported by the collection as a list of "name@version" strings. +func (e EncoderCollection) NameVersions() []string { + set := strset.New() + for _, f := range e.encoders { + if f.Version() == sbom.AnyVersion { + set.Add(string(f.ID())) + } else { + set.Add(fmt.Sprintf("%s@%s", f.ID(), f.Version())) + } + } + + list := set.List() + sort.Strings(list) + + return list +} + +// Aliases returns all format aliases represented in the collection (where an ID would be "spdx-tag-value" the alias would be "spdx"). +func (e EncoderCollection) Aliases() []string { + aliases := strset.New() + for _, f := range e.encoders { + aliases.Add(f.Aliases()...) + } + lst := aliases.List() + sort.Strings(lst) + return lst +} + +// Get returns the contained encoder for a given format name and version. +func (e EncoderCollection) Get(name string, version string) sbom.FormatEncoder { + log.WithFields("name", name, "version", version).Trace("looking for matching encoder") + + name = cleanFormatName(name) + var mostRecentFormat sbom.FormatEncoder + + for _, f := range e.encoders { + log.WithFields("name", f.ID(), "version", f.Version(), "aliases", f.Aliases()).Trace("considering format") + names := []string{string(f.ID())} + names = append(names, f.Aliases()...) + for _, n := range names { + if cleanFormatName(n) == name && versionMatches(f.Version(), version) { + if mostRecentFormat == nil || f.Version() > mostRecentFormat.Version() { + mostRecentFormat = f + } + } + } + } + + if mostRecentFormat != nil { + log.WithFields("name", mostRecentFormat.ID(), "version", mostRecentFormat.Version()).Trace("found matching encoder") + } else { + log.WithFields("search-name", name, "search-version", version).Trace("no matching encoder found") + } + + return mostRecentFormat +} + +// GetByString accepts a name@version string, such as: +// - json +// - spdx-json@2.1 +// - cdx@1.5 +func (e EncoderCollection) GetByString(s string) sbom.FormatEncoder { + parts := strings.SplitN(s, "@", 2) + version := sbom.AnyVersion + if len(parts) > 1 { + version = parts[1] + } + return e.Get(parts[0], version) +} + +func versionMatches(version string, match string) bool { + if version == sbom.AnyVersion || match == sbom.AnyVersion { + return true + } + + match = strings.ReplaceAll(match, ".", "\\.") + match = strings.ReplaceAll(match, "*", ".*") + match = fmt.Sprintf("^%s(\\..*)*$", match) + matcher, err := regexp.Compile(match) + if err != nil { + return false + } + return matcher.MatchString(version) +} + +func cleanFormatName(name string) string { + r := strings.NewReplacer("-", "", "_", "") + return strings.ToLower(r.Replace(name)) +} + +// Encode takes all SBOM elements and a format option and encodes an SBOM document. +func Encode(s sbom.SBOM, f sbom.FormatEncoder) ([]byte, error) { + buff := bytes.Buffer{} + + if err := f.Encode(&buff, s); err != nil { + return nil, fmt.Errorf("unable to encode sbom: %w", err) + } + + return buff.Bytes(), nil +} diff --git a/vendor/github.com/anchore/syft/syft/format/internal/cyclonedxutil/decoder.go b/vendor/github.com/anchore/syft/syft/format/internal/cyclonedxutil/decoder.go new file mode 100644 index 00000000..735e8866 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/format/internal/cyclonedxutil/decoder.go @@ -0,0 +1,33 @@ +package cyclonedxutil + +import ( + "fmt" + "io" + + "github.com/CycloneDX/cyclonedx-go" +) + +type Decoder struct { + format cyclonedx.BOMFileFormat +} + +func NewDecoder(format cyclonedx.BOMFileFormat) Decoder { + return Decoder{ + format: format, + } +} + +func (d Decoder) Decode(reader io.ReadSeeker) (*cyclonedx.BOM, error) { + doc := &cyclonedx.BOM{ + Components: &[]cyclonedx.Component{}, + } + if _, err := reader.Seek(0, io.SeekStart); err != nil { + return nil, fmt.Errorf("unable to seek to start of CycloneDX SBOM: %w", err) + } + err := cyclonedx.NewBOMDecoder(reader, d.format).Decode(doc) + if err != nil { + return nil, err + } + + return doc, nil +} diff --git a/vendor/github.com/anchore/syft/syft/format/internal/cyclonedxutil/encoder.go b/vendor/github.com/anchore/syft/syft/format/internal/cyclonedxutil/encoder.go new file mode 100644 index 00000000..ed209ef1 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/format/internal/cyclonedxutil/encoder.go @@ -0,0 +1,37 @@ +package cyclonedxutil + +import ( + "io" + + "github.com/CycloneDX/cyclonedx-go" + + "github.com/anchore/syft/syft/format/common/cyclonedxhelpers" + "github.com/anchore/syft/syft/sbom" +) + +const DefaultVersion = "1.5" + +type Encoder struct { + version cyclonedx.SpecVersion + format cyclonedx.BOMFileFormat +} + +func NewEncoder(version string, format cyclonedx.BOMFileFormat) (Encoder, error) { + specVersion, err := SpecVersionFromString(version) + if err != nil { + return Encoder{}, err + } + return Encoder{ + version: specVersion, + format: format, + }, nil +} + +func (e Encoder) Encode(writer io.Writer, s sbom.SBOM) error { + bom := cyclonedxhelpers.ToFormatModel(s) + enc := cyclonedx.NewBOMEncoder(writer, e.format) + enc.SetPretty(true) + enc.SetEscapeHTML(false) + + return enc.EncodeVersion(bom, e.version) +} diff --git a/vendor/github.com/anchore/syft/syft/format/internal/cyclonedxutil/versions.go b/vendor/github.com/anchore/syft/syft/format/internal/cyclonedxutil/versions.go new file mode 100644 index 00000000..426da2c6 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/format/internal/cyclonedxutil/versions.go @@ -0,0 +1,66 @@ +package cyclonedxutil + +import ( + "fmt" + + "github.com/CycloneDX/cyclonedx-go" + + "github.com/anchore/syft/syft/sbom" +) + +const ( + XMLFormatID sbom.FormatID = "cyclonedx-xml" + JSONFormatID sbom.FormatID = "cyclonedx-json" +) + +func SupportedVersions(id sbom.FormatID) []string { + versions := []string{ + "1.2", + "1.3", + "1.4", + "1.5", + } + + if id != JSONFormatID { + // JSON format not supported for version < 1.2 + versions = append([]string{"1.0", "1.1"}, versions...) + } + + return versions +} + +func SpecVersionFromString(v string) (cyclonedx.SpecVersion, error) { + switch v { + case "1.0": + return cyclonedx.SpecVersion1_0, nil + case "1.1": + return cyclonedx.SpecVersion1_1, nil + case "1.2": + return cyclonedx.SpecVersion1_2, nil + case "1.3": + return cyclonedx.SpecVersion1_3, nil + case "1.4": + return cyclonedx.SpecVersion1_4, nil + case "1.5": + return cyclonedx.SpecVersion1_5, nil + } + return -1, fmt.Errorf("unsupported CycloneDX version %q", v) +} + +func VersionFromSpecVersion(spec cyclonedx.SpecVersion) string { + switch spec { + case cyclonedx.SpecVersion1_0: + return "1.0" + case cyclonedx.SpecVersion1_1: + return "1.1" + case cyclonedx.SpecVersion1_2: + return "1.2" + case cyclonedx.SpecVersion1_3: + return "1.3" + case cyclonedx.SpecVersion1_4: + return "1.4" + case cyclonedx.SpecVersion1_5: + return "1.5" + } + return "" +} diff --git a/vendor/github.com/anchore/syft/syft/format/internal/spdxutil/versions.go b/vendor/github.com/anchore/syft/syft/format/internal/spdxutil/versions.go new file mode 100644 index 00000000..5718e5d2 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/format/internal/spdxutil/versions.go @@ -0,0 +1,26 @@ +package spdxutil + +import ( + "github.com/anchore/syft/syft/sbom" +) + +const DefaultVersion = "2.3" + +const ( + JSONFormatID sbom.FormatID = "spdx-json" + TagValueFormatID sbom.FormatID = "spdx-tag-value" +) + +func SupportedVersions(id sbom.FormatID) []string { + versions := []string{ + "2.2", + "2.3", + } + + if id != JSONFormatID { + // JSON format is not supported in v2.1 + return append([]string{"2.1"}, versions...) + } + + return versions +} diff --git a/vendor/github.com/anchore/syft/syft/format/spdxjson/decoder.go b/vendor/github.com/anchore/syft/syft/format/spdxjson/decoder.go new file mode 100644 index 00000000..f9484dcc --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/format/spdxjson/decoder.go @@ -0,0 +1,104 @@ +package spdxjson + +import ( + "encoding/json" + "fmt" + "io" + "strings" + + spdxJson "github.com/spdx/tools-golang/json" + + "github.com/anchore/syft/internal/log" + "github.com/anchore/syft/syft/format/common/spdxhelpers" + "github.com/anchore/syft/syft/sbom" +) + +var _ sbom.FormatDecoder = (*decoder)(nil) + +type decoder struct { +} + +func NewFormatDecoder() sbom.FormatDecoder { + return decoder{} +} + +func (d decoder) Decode(reader io.ReadSeeker) (*sbom.SBOM, sbom.FormatID, string, error) { + if reader == nil { + return nil, "", "", fmt.Errorf("no SBOM bytes provided") + } + + // since spdx lib will always return the latest version of the document, we need to identify the version + // first and then decode into the appropriate document object. Otherwise if we get the version info from the + // decoded object we will always get the latest version (instead of the version we decoded from). + id, version := d.Identify(reader) + if id != ID { + return nil, "", "", fmt.Errorf("not a spdx json document") + } + if version == "" { + return nil, "", "", fmt.Errorf("unsupported spdx json document version") + } + + if _, err := reader.Seek(0, io.SeekStart); err != nil { + return nil, "", "", fmt.Errorf("unable to seek to start of SPDX JSON SBOM: %+v", err) + } + + doc, err := spdxJson.Read(reader) + if err != nil { + return nil, id, version, fmt.Errorf("unable to decode spdx json: %w", err) + } + + s, err := spdxhelpers.ToSyftModel(doc) + if err != nil { + return nil, id, version, err + } + return s, id, version, nil +} + +func (d decoder) Identify(reader io.ReadSeeker) (sbom.FormatID, string) { + if reader == nil { + return "", "" + } + + if _, err := reader.Seek(0, io.SeekStart); err != nil { + log.Debugf("unable to seek to start of SPDX JSON SBOM: %+v", err) + return "", "" + } + + // Example JSON document + // { + // "spdxVersion": "SPDX-2.3", + // ... + type Document struct { + SPDXVersion string `json:"spdxVersion"` + } + + dec := json.NewDecoder(reader) + + var doc Document + err := dec.Decode(&doc) + if err != nil { + // maybe not json? maybe not valid? doesn't matter, we won't process it. + return "", "" + } + + id, version := getFormatInfo(doc.SPDXVersion) + if version == "" || id != ID { + // not a spdx json document that we support + return "", "" + } + + return id, version +} + +func getFormatInfo(spdxVersion string) (sbom.FormatID, string) { + // example input: SPDX-2.3 + if !strings.HasPrefix(strings.ToLower(spdxVersion), "spdx-") { + return "", "" + } + fields := strings.Split(spdxVersion, "-") + if len(fields) != 2 { + return ID, "" + } + + return ID, fields[1] +} diff --git a/vendor/github.com/anchore/syft/syft/format/spdxjson/encoder.go b/vendor/github.com/anchore/syft/syft/format/spdxjson/encoder.go new file mode 100644 index 00000000..e8bb68b9 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/format/spdxjson/encoder.go @@ -0,0 +1,92 @@ +package spdxjson + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/spdx/tools-golang/convert" + "github.com/spdx/tools-golang/spdx/v2/v2_1" + "github.com/spdx/tools-golang/spdx/v2/v2_2" + "github.com/spdx/tools-golang/spdx/v2/v2_3" + + "github.com/anchore/syft/syft/format/common/spdxhelpers" + "github.com/anchore/syft/syft/format/internal/spdxutil" + "github.com/anchore/syft/syft/sbom" +) + +const ID = spdxutil.JSONFormatID + +func SupportedVersions() []string { + return spdxutil.SupportedVersions(ID) +} + +type EncoderConfig struct { + Version string +} + +type encoder struct { + cfg EncoderConfig +} + +func NewFormatEncoderWithConfig(cfg EncoderConfig) (sbom.FormatEncoder, error) { + return encoder{ + cfg: cfg, + }, nil +} + +func DefaultEncoderConfig() EncoderConfig { + return EncoderConfig{ + Version: spdxutil.DefaultVersion, + } +} + +func (e encoder) ID() sbom.FormatID { + return ID +} + +func (e encoder) Aliases() []string { + return []string{} +} + +func (e encoder) Version() string { + return e.cfg.Version +} + +func (e encoder) Encode(writer io.Writer, s sbom.SBOM) error { + latestDoc := spdxhelpers.ToFormatModel(s) + if latestDoc == nil { + return fmt.Errorf("unable to convert SBOM to SPDX document") + } + + var err error + var encodeDoc any + switch e.cfg.Version { + case "2.1": + doc := v2_1.Document{} + err = convert.Document(latestDoc, &doc) + encodeDoc = doc + case "2.2": + doc := v2_2.Document{} + err = convert.Document(latestDoc, &doc) + encodeDoc = doc + + case "2.3": + doc := v2_3.Document{} + err = convert.Document(latestDoc, &doc) + encodeDoc = doc + default: + return fmt.Errorf("unsupported SPDX version %q", e.cfg.Version) + } + + if err != nil { + return fmt.Errorf("unable to convert SBOM to SPDX document: %w", err) + } + + enc := json.NewEncoder(writer) + // prevent > and < from being escaped in the payload + enc.SetEscapeHTML(false) + enc.SetIndent("", " ") + + return enc.Encode(encodeDoc) +} diff --git a/vendor/github.com/anchore/syft/syft/format/spdxtagvalue/decoder.go b/vendor/github.com/anchore/syft/syft/format/spdxtagvalue/decoder.go new file mode 100644 index 00000000..4f70e382 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/format/spdxtagvalue/decoder.go @@ -0,0 +1,109 @@ +package spdxtagvalue + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/spdx/tools-golang/tagvalue" + + "github.com/anchore/syft/internal/log" + "github.com/anchore/syft/syft/format/common/spdxhelpers" + "github.com/anchore/syft/syft/sbom" +) + +var _ sbom.FormatDecoder = (*decoder)(nil) + +type decoder struct { +} + +func NewFormatDecoder() sbom.FormatDecoder { + return decoder{} +} + +func (d decoder) Decode(reader io.ReadSeeker) (*sbom.SBOM, sbom.FormatID, string, error) { + if reader == nil { + return nil, "", "", fmt.Errorf("no SBOM bytes provided") + } + + // since spdx lib will always return the latest version of the document, we need to identify the version + // first and then decode into the appropriate document object. Otherwise if we get the version info from the + // decoded object we will always get the latest version (instead of the version we decoded from). + id, version := d.Identify(reader) + if id != ID { + return nil, "", "", fmt.Errorf("not a spdx tag-value document") + } + if version == "" { + return nil, "", "", fmt.Errorf("unsupported spdx tag-value document version") + } + + if _, err := reader.Seek(0, io.SeekStart); err != nil { + return nil, "", "", fmt.Errorf("unable to seek to start of SPDX Tag-Value SBOM: %+v", err) + } + + doc, err := tagvalue.Read(reader) + if err != nil { + return nil, id, version, fmt.Errorf("unable to decode spdx tag-value: %w", err) + } + + s, err := spdxhelpers.ToSyftModel(doc) + if err != nil { + return nil, id, version, err + } + return s, id, version, nil +} + +func (d decoder) Identify(reader io.ReadSeeker) (sbom.FormatID, string) { + if reader == nil { + return "", "" + } + + if _, err := reader.Seek(0, io.SeekStart); err != nil { + log.Debugf("unable to seek to start of SPDX Tag-Value SBOM: %+v", err) + return "", "" + } + + // Example document + // SPDXVersion: SPDX-2.3 + // DataLicense: CC0-1.0 + // SPDXID: SPDXRef-DOCUMENT + + scanner := bufio.NewScanner(reader) + scanner.Split(bufio.ScanLines) + + var id sbom.FormatID + var version string + for i := 0; scanner.Scan() && i < 3; i++ { + line := scanner.Text() + if strings.HasPrefix(line, "SPDXVersion:") { + id, version = getFormatInfo(line) + break + } + } + + if version == "" || id != ID { + // not a spdx tag-value document + return "", "" + } + + return id, version +} + +func getFormatInfo(line string) (sbom.FormatID, string) { + // example input: SPDXVersion: SPDX-2.3 + fields := strings.SplitN(line, ":", 2) + if len(fields) != 2 { + return "", "" + } + spdxVersion := fields[1] + if !strings.HasPrefix(strings.TrimSpace(strings.ToLower(spdxVersion)), "spdx-") { + return "", "" + } + fields = strings.Split(spdxVersion, "-") + if len(fields) != 2 { + return ID, "" + } + + return ID, fields[1] +} diff --git a/vendor/github.com/anchore/syft/syft/format/spdxtagvalue/encoder.go b/vendor/github.com/anchore/syft/syft/format/spdxtagvalue/encoder.go new file mode 100644 index 00000000..7923801e --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/format/spdxtagvalue/encoder.go @@ -0,0 +1,90 @@ +package spdxtagvalue + +import ( + "fmt" + "io" + + "github.com/spdx/tools-golang/convert" + "github.com/spdx/tools-golang/spdx/v2/v2_1" + "github.com/spdx/tools-golang/spdx/v2/v2_2" + "github.com/spdx/tools-golang/spdx/v2/v2_3" + "github.com/spdx/tools-golang/tagvalue" + + "github.com/anchore/syft/syft/format/common/spdxhelpers" + "github.com/anchore/syft/syft/format/internal/spdxutil" + "github.com/anchore/syft/syft/sbom" +) + +const ID = spdxutil.TagValueFormatID + +func SupportedVersions() []string { + return spdxutil.SupportedVersions(ID) +} + +type EncoderConfig struct { + Version string +} + +type encoder struct { + cfg EncoderConfig +} + +func NewFormatEncoderWithConfig(cfg EncoderConfig) (sbom.FormatEncoder, error) { + return encoder{ + cfg: cfg, + }, nil +} + +func DefaultEncoderConfig() EncoderConfig { + return EncoderConfig{ + Version: spdxutil.DefaultVersion, + } +} + +func (e encoder) ID() sbom.FormatID { + return ID +} + +func (e encoder) Aliases() []string { + return []string{ + "spdx", + "spdx-tv", + } +} + +func (e encoder) Version() string { + return e.cfg.Version +} + +func (e encoder) Encode(writer io.Writer, s sbom.SBOM) error { + latestDoc := spdxhelpers.ToFormatModel(s) + if latestDoc == nil { + return fmt.Errorf("unable to convert SBOM to SPDX document") + } + + var err error + var encodeDoc any + switch e.cfg.Version { + case "2.1": + doc := v2_1.Document{} + err = convert.Document(latestDoc, &doc) + encodeDoc = doc + case "2.2": + doc := v2_2.Document{} + err = convert.Document(latestDoc, &doc) + encodeDoc = doc + + case "2.3", "", "2", "2.x": + doc := v2_3.Document{} + err = convert.Document(latestDoc, &doc) + encodeDoc = doc + default: + return fmt.Errorf("unsupported SPDX version %q", e.cfg.Version) + } + + if err != nil { + return fmt.Errorf("unable to convert SBOM to SPDX document: %w", err) + } + + return tagvalue.Write(encodeDoc, writer) +} diff --git a/vendor/github.com/anchore/syft/syft/format/syftjson/decoder.go b/vendor/github.com/anchore/syft/syft/format/syftjson/decoder.go new file mode 100644 index 00000000..22cb8645 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/format/syftjson/decoder.go @@ -0,0 +1,102 @@ +package syftjson + +import ( + "encoding/json" + "fmt" + "io" + "strings" + + "github.com/Masterminds/semver" + + "github.com/anchore/syft/internal" + "github.com/anchore/syft/internal/log" + "github.com/anchore/syft/syft/format/syftjson/model" + "github.com/anchore/syft/syft/sbom" +) + +var _ sbom.FormatDecoder = (*decoder)(nil) + +type decoder struct{} + +func NewFormatDecoder() sbom.FormatDecoder { + return decoder{} +} + +func (d decoder) Decode(reader io.ReadSeeker) (*sbom.SBOM, sbom.FormatID, string, error) { + if reader == nil { + return nil, "", "", fmt.Errorf("no SBOM bytes provided") + } + + id, version := d.Identify(reader) + if version == "" || id != ID { + return nil, "", "", fmt.Errorf("not a syft-json document") + } + var doc model.Document + + if _, err := reader.Seek(0, io.SeekStart); err != nil { + return nil, "", "", fmt.Errorf("unable to seek to start of Syft JSON SBOM: %+v", err) + } + + dec := json.NewDecoder(reader) + + err := dec.Decode(&doc) + if err != nil { + return nil, "", "", fmt.Errorf("unable to decode syft-json document: %w", err) + } + + if err := checkSupportedSchema(doc.Schema.Version, internal.JSONSchemaVersion); err != nil { + log.Warn(err) + } + + return toSyftModel(doc), ID, doc.Schema.Version, nil +} + +func (d decoder) Identify(reader io.ReadSeeker) (sbom.FormatID, string) { + if reader == nil { + return "", "" + } + + if _, err := reader.Seek(0, io.SeekStart); err != nil { + log.Debugf("unable to seek to start of Syft JSON SBOM: %+v", err) + return "", "" + } + + type Document struct { + Schema model.Schema `json:"schema"` + } + + dec := json.NewDecoder(reader) + + var doc Document + err := dec.Decode(&doc) + if err != nil { + // maybe not json? maybe not valid? doesn't matter, we won't process it. + return "", "" + } + + if !strings.Contains(doc.Schema.URL, "anchore/syft") { + // not a syft-json document + return "", "" + } + + // note: we support all previous schema versions + return ID, doc.Schema.Version +} + +func checkSupportedSchema(documentVersion string, parserVersion string) error { + documentV, err := semver.NewVersion(documentVersion) + if err != nil { + return fmt.Errorf("error comparing document schema version with parser schema version: %w", err) + } + + parserV, err := semver.NewVersion(parserVersion) + if err != nil { + return fmt.Errorf("error comparing document schema version with parser schema version: %w", err) + } + + if documentV.GreaterThan(parserV) { + return fmt.Errorf("document has schema version %s, but parser has older schema version (%s)", documentVersion, parserVersion) + } + + return nil +} diff --git a/vendor/github.com/anchore/syft/syft/format/syftjson/encoder.go b/vendor/github.com/anchore/syft/syft/format/syftjson/encoder.go new file mode 100644 index 00000000..c85166da --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/format/syftjson/encoder.go @@ -0,0 +1,67 @@ +package syftjson + +import ( + "encoding/json" + "io" + + "github.com/anchore/syft/internal" + "github.com/anchore/syft/syft/sbom" +) + +var _ sbom.FormatEncoder = (*encoder)(nil) + +const ID sbom.FormatID = "syft-json" + +type EncoderConfig struct { + Legacy bool // transform the output to the legacy syft-json format (pre v1.0 changes, enumerated in the README.md) +} + +type encoder struct { + cfg EncoderConfig +} + +func NewFormatEncoder() sbom.FormatEncoder { + enc, err := NewFormatEncoderWithConfig(DefaultEncoderConfig()) + if err != nil { + panic(err) + } + return enc +} + +func NewFormatEncoderWithConfig(cfg EncoderConfig) (sbom.FormatEncoder, error) { + return encoder{ + cfg: cfg, + }, nil +} + +func DefaultEncoderConfig() EncoderConfig { + return EncoderConfig{ + Legacy: false, + } +} + +func (e encoder) ID() sbom.FormatID { + return ID +} + +func (e encoder) Aliases() []string { + return []string{ + "json", + "syft", + } +} + +func (e encoder) Version() string { + return internal.JSONSchemaVersion +} + +func (e encoder) Encode(writer io.Writer, s sbom.SBOM) error { + doc := ToFormatModel(s, e.cfg) + + enc := json.NewEncoder(writer) + // prevent > and < from being escaped in the payload + enc.SetEscapeHTML(false) + enc.SetIndent("", " ") + + return enc.Encode(&doc) +} diff --git a/vendor/github.com/anchore/syft/syft/formats/syftjson/model/document.go b/vendor/github.com/anchore/syft/syft/format/syftjson/model/document.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/syftjson/model/document.go rename to vendor/github.com/anchore/syft/syft/format/syftjson/model/document.go diff --git a/vendor/github.com/anchore/syft/syft/formats/syftjson/model/file.go b/vendor/github.com/anchore/syft/syft/format/syftjson/model/file.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/syftjson/model/file.go rename to vendor/github.com/anchore/syft/syft/format/syftjson/model/file.go diff --git a/vendor/github.com/anchore/syft/syft/formats/syftjson/model/linux_release.go b/vendor/github.com/anchore/syft/syft/format/syftjson/model/linux_release.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/syftjson/model/linux_release.go rename to vendor/github.com/anchore/syft/syft/format/syftjson/model/linux_release.go diff --git a/vendor/github.com/anchore/syft/syft/formats/syftjson/model/package.go b/vendor/github.com/anchore/syft/syft/format/syftjson/model/package.go similarity index 66% rename from vendor/github.com/anchore/syft/syft/formats/syftjson/model/package.go rename to vendor/github.com/anchore/syft/syft/format/syftjson/model/package.go index d4a819f2..f655ad12 100644 --- a/vendor/github.com/anchore/syft/syft/formats/syftjson/model/package.go +++ b/vendor/github.com/anchore/syft/syft/format/syftjson/model/package.go @@ -5,9 +5,11 @@ import ( "errors" "fmt" "reflect" + "strings" "github.com/anchore/syft/internal/log" "github.com/anchore/syft/syft/file" + "github.com/anchore/syft/syft/internal/packagemetadata" "github.com/anchore/syft/syft/license" "github.com/anchore/syft/syft/pkg" ) @@ -60,28 +62,28 @@ func newModelLicensesFromValues(licenses []string) (ml []License) { } func (f *licenses) UnmarshalJSON(b []byte) error { - var licenses []License - if err := json.Unmarshal(b, &licenses); err != nil { + var lics []License + if err := json.Unmarshal(b, &lics); err != nil { var simpleLicense []string if err := json.Unmarshal(b, &simpleLicense); err != nil { return fmt.Errorf("unable to unmarshal license: %w", err) } - licenses = newModelLicensesFromValues(simpleLicense) + lics = newModelLicensesFromValues(simpleLicense) } - *f = licenses + *f = lics return nil } // PackageCustomData contains ambiguous values (type-wise) from pkg.Package. type PackageCustomData struct { - MetadataType pkg.MetadataType `json:"metadataType,omitempty"` - Metadata interface{} `json:"metadata,omitempty"` + MetadataType string `json:"metadataType,omitempty"` + Metadata any `json:"metadata,omitempty"` } // packageMetadataUnpacker is all values needed from Package to disambiguate ambiguous fields during json unmarshaling. type packageMetadataUnpacker struct { - MetadataType pkg.MetadataType `json:"metadataType"` - Metadata json.RawMessage `json:"metadata"` + MetadataType string `json:"metadataType"` + Metadata json.RawMessage `json:"metadata"` } func (p *packageMetadataUnpacker) String() string { @@ -112,32 +114,64 @@ func (p *Package) UnmarshalJSON(b []byte) error { } func unpackPkgMetadata(p *Package, unpacker packageMetadataUnpacker) error { - p.MetadataType = pkg.CleanMetadataType(unpacker.MetadataType) + if unpacker.MetadataType == "" { + return nil + } + + // check for legacy correction cases from schema v11 -> v12 + ty := unpacker.MetadataType + switch unpacker.MetadataType { + case "HackageMetadataType": + for _, l := range p.Locations { + if strings.HasSuffix(l.RealPath, ".yaml.lock") { + ty = "haskell-hackage-stack-lock-entry" + break + } else if strings.HasSuffix(l.RealPath, ".yaml") { + ty = "haskell-hackage-stack-entry" + break + } + } + case "RpmMetadata": + for _, l := range p.Locations { + if strings.HasSuffix(l.RealPath, ".rpm") { + ty = "rpm-archive" + break + } + } + case "RustCargoPackageMetadata": + var found bool + for _, l := range p.Locations { + if strings.HasSuffix(strings.ToLower(l.RealPath), "cargo.lock") { + ty = "rust-cargo-lock-entry" + found = true + break + } + } + if !found { + ty = "rust-cargo-audit-entry" + } + } - typ, ok := pkg.MetadataTypeByName[p.MetadataType] - if ok { - val := reflect.New(typ).Interface() + typ := packagemetadata.ReflectTypeFromJSONName(ty) + if typ == nil { + // capture unknown metadata as a generic struct if len(unpacker.Metadata) > 0 { - if err := json.Unmarshal(unpacker.Metadata, val); err != nil { + var val interface{} + if err := json.Unmarshal(unpacker.Metadata, &val); err != nil { return err } + p.Metadata = val } - p.Metadata = reflect.ValueOf(val).Elem().Interface() - return nil + + return errUnknownMetadataType } - // capture unknown metadata as a generic struct + val := reflect.New(typ).Interface() if len(unpacker.Metadata) > 0 { - var val interface{} - if err := json.Unmarshal(unpacker.Metadata, &val); err != nil { + if err := json.Unmarshal(unpacker.Metadata, val); err != nil { return err } - p.Metadata = val } - - if p.MetadataType != "" { - return errUnknownMetadataType - } - + p.Metadata = reflect.ValueOf(val).Elem().Interface() return nil } diff --git a/vendor/github.com/anchore/syft/syft/formats/syftjson/model/relationship.go b/vendor/github.com/anchore/syft/syft/format/syftjson/model/relationship.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/syftjson/model/relationship.go rename to vendor/github.com/anchore/syft/syft/format/syftjson/model/relationship.go diff --git a/vendor/github.com/anchore/syft/syft/formats/syftjson/model/secrets.go b/vendor/github.com/anchore/syft/syft/format/syftjson/model/secrets.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/syftjson/model/secrets.go rename to vendor/github.com/anchore/syft/syft/format/syftjson/model/secrets.go diff --git a/vendor/github.com/anchore/syft/syft/formats/syftjson/model/source.go b/vendor/github.com/anchore/syft/syft/format/syftjson/model/source.go similarity index 100% rename from vendor/github.com/anchore/syft/syft/formats/syftjson/model/source.go rename to vendor/github.com/anchore/syft/syft/format/syftjson/model/source.go diff --git a/vendor/github.com/anchore/syft/syft/formats/syftjson/to_format_model.go b/vendor/github.com/anchore/syft/syft/format/syftjson/to_format_model.go similarity index 90% rename from vendor/github.com/anchore/syft/syft/formats/syftjson/to_format_model.go rename to vendor/github.com/anchore/syft/syft/format/syftjson/to_format_model.go index 097a6461..b07d0502 100644 --- a/vendor/github.com/anchore/syft/syft/formats/syftjson/to_format_model.go +++ b/vendor/github.com/anchore/syft/syft/format/syftjson/to_format_model.go @@ -11,7 +11,8 @@ import ( "github.com/anchore/syft/syft/artifact" "github.com/anchore/syft/syft/cpe" "github.com/anchore/syft/syft/file" - "github.com/anchore/syft/syft/formats/syftjson/model" + "github.com/anchore/syft/syft/format/syftjson/model" + "github.com/anchore/syft/syft/internal/packagemetadata" "github.com/anchore/syft/syft/internal/sourcemetadata" "github.com/anchore/syft/syft/linux" "github.com/anchore/syft/syft/pkg" @@ -19,10 +20,22 @@ import ( "github.com/anchore/syft/syft/source" ) +// MetadataType infers the metadata type value based on the pkg.Metadata payload. +func MetadataType(metadata interface{}) string { + return metadataType(metadata, false) +} + +func metadataType(metadata interface{}, legacy bool) string { + if legacy { + return packagemetadata.JSONLegacyName(metadata) + } + return packagemetadata.JSONName(metadata) +} + // ToFormatModel transforms the sbom import a format-specific model. -func ToFormatModel(s sbom.SBOM) model.Document { +func ToFormatModel(s sbom.SBOM, cfg EncoderConfig) model.Document { return model.Document{ - Artifacts: toPackageModels(s.Artifacts.Packages), + Artifacts: toPackageModels(s.Artifacts.Packages, cfg), ArtifactRelationships: toRelationshipModel(s.Relationships), Files: toFile(s), Secrets: toSecrets(s.Artifacts.Secrets), @@ -196,13 +209,13 @@ func toFileType(ty stereoscopeFile.Type) string { } } -func toPackageModels(catalog *pkg.Collection) []model.Package { +func toPackageModels(catalog *pkg.Collection, cfg EncoderConfig) []model.Package { artifacts := make([]model.Package, 0) if catalog == nil { return artifacts } for _, p := range catalog.Sorted() { - artifacts = append(artifacts, toPackageModel(p)) + artifacts = append(artifacts, toPackageModel(p, cfg)) } return artifacts } @@ -233,7 +246,7 @@ func toLicenseModel(pkgLicenses []pkg.License) (modelLicenses []model.License) { } // toPackageModel crates a new Package from the given pkg.Package. -func toPackageModel(p pkg.Package) model.Package { +func toPackageModel(p pkg.Package, cfg EncoderConfig) model.Package { var cpes = make([]string, len(p.CPEs)) for i, c := range p.CPEs { cpes[i] = cpe.String(c) @@ -260,7 +273,7 @@ func toPackageModel(p pkg.Package) model.Package { PURL: p.PURL, }, PackageCustomData: model.PackageCustomData{ - MetadataType: p.MetadataType, + MetadataType: metadataType(p.Metadata, cfg.Legacy), Metadata: p.Metadata, }, } diff --git a/vendor/github.com/anchore/syft/syft/formats/syftjson/to_syft_model.go b/vendor/github.com/anchore/syft/syft/format/syftjson/to_syft_model.go similarity index 95% rename from vendor/github.com/anchore/syft/syft/formats/syftjson/to_syft_model.go rename to vendor/github.com/anchore/syft/syft/format/syftjson/to_syft_model.go index 3d9c48d7..8807688e 100644 --- a/vendor/github.com/anchore/syft/syft/formats/syftjson/to_syft_model.go +++ b/vendor/github.com/anchore/syft/syft/format/syftjson/to_syft_model.go @@ -14,14 +14,14 @@ import ( "github.com/anchore/syft/syft/artifact" "github.com/anchore/syft/syft/cpe" "github.com/anchore/syft/syft/file" - "github.com/anchore/syft/syft/formats/syftjson/model" + "github.com/anchore/syft/syft/format/syftjson/model" "github.com/anchore/syft/syft/linux" "github.com/anchore/syft/syft/pkg" "github.com/anchore/syft/syft/sbom" "github.com/anchore/syft/syft/source" ) -func toSyftModel(doc model.Document) (*sbom.SBOM, error) { +func toSyftModel(doc model.Document) *sbom.SBOM { idAliases := make(map[string]string) catalog := toSyftCatalog(doc.Artifacts, idAliases) @@ -40,7 +40,7 @@ func toSyftModel(doc model.Document) (*sbom.SBOM, error) { Source: *toSyftSourceData(doc.Source), Descriptor: toSyftDescriptor(doc.Descriptor), Relationships: warnConversionErrors(toSyftRelationships(&doc, catalog, doc.ArtifactRelationships, idAliases)), - }, nil + } } func warnConversionErrors[T any](converted []T, errors []error) []T { @@ -311,17 +311,16 @@ func toSyftPackage(p model.Package, idAliases map[string]string) pkg.Package { } out := pkg.Package{ - Name: p.Name, - Version: p.Version, - FoundBy: p.FoundBy, - Locations: file.NewLocationSet(p.Locations...), - Licenses: pkg.NewLicenseSet(toSyftLicenses(p.Licenses)...), - Language: p.Language, - Type: p.Type, - CPEs: cpes, - PURL: p.PURL, - MetadataType: p.MetadataType, - Metadata: p.Metadata, + Name: p.Name, + Version: p.Version, + FoundBy: p.FoundBy, + Locations: file.NewLocationSet(p.Locations...), + Licenses: pkg.NewLicenseSet(toSyftLicenses(p.Licenses)...), + Language: p.Language, + Type: p.Type, + CPEs: cpes, + PURL: p.PURL, + Metadata: p.Metadata, } // we don't know if this package ID is truly unique, however, we need to trust the user input in case there are diff --git a/vendor/github.com/anchore/syft/syft/formats.go b/vendor/github.com/anchore/syft/syft/formats.go deleted file mode 100644 index 71dc8ebf..00000000 --- a/vendor/github.com/anchore/syft/syft/formats.go +++ /dev/null @@ -1,49 +0,0 @@ -package syft - -import ( - "github.com/anchore/syft/syft/formats" - "github.com/anchore/syft/syft/formats/cyclonedxjson" - "github.com/anchore/syft/syft/formats/cyclonedxxml" - "github.com/anchore/syft/syft/formats/github" - "github.com/anchore/syft/syft/formats/spdxjson" - "github.com/anchore/syft/syft/formats/spdxtagvalue" - "github.com/anchore/syft/syft/formats/syftjson" - "github.com/anchore/syft/syft/formats/table" - "github.com/anchore/syft/syft/formats/template" - "github.com/anchore/syft/syft/formats/text" - "github.com/anchore/syft/syft/sbom" -) - -// these have been exported for the benefit of API users -// TODO: deprecated: now that the formats package has been moved to syft/formats, will be removed in v1.0.0 -const ( - JSONFormatID = syftjson.ID - TextFormatID = text.ID - TableFormatID = table.ID - CycloneDxXMLFormatID = cyclonedxxml.ID - CycloneDxJSONFormatID = cyclonedxjson.ID - GitHubFormatID = github.ID - SPDXTagValueFormatID = spdxtagvalue.ID - SPDXJSONFormatID = spdxjson.ID - TemplateFormatID = template.ID -) - -// TODO: deprecated, moved to syft/formats/formats.go. will be removed in v1.0.0 -func FormatIDs() (ids []sbom.FormatID) { - return formats.AllIDs() -} - -// TODO: deprecated, moved to syft/formats/formats.go. will be removed in v1.0.0 -func FormatByID(id sbom.FormatID) sbom.Format { - return formats.ByNameAndVersion(string(id), "") -} - -// TODO: deprecated, moved to syft/formats/formats.go. will be removed in v1.0.0 -func FormatByName(name string) sbom.Format { - return formats.ByName(name) -} - -// TODO: deprecated, moved to syft/formats/formats.go. will be removed in v1.0.0 -func IdentifyFormat(by []byte) sbom.Format { - return formats.Identify(by) -} diff --git a/vendor/github.com/anchore/syft/syft/formats/cyclonedxjson/encoder.go b/vendor/github.com/anchore/syft/syft/formats/cyclonedxjson/encoder.go deleted file mode 100644 index 297b8026..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/cyclonedxjson/encoder.go +++ /dev/null @@ -1,48 +0,0 @@ -package cyclonedxjson - -import ( - "io" - - "github.com/CycloneDX/cyclonedx-go" - - "github.com/anchore/syft/syft/formats/common/cyclonedxhelpers" - "github.com/anchore/syft/syft/sbom" -) - -func encoderV1_0(output io.Writer, s sbom.SBOM) error { - enc, bom := buildEncoder(output, s) - return enc.EncodeVersion(bom, cyclonedx.SpecVersion1_0) -} - -func encoderV1_1(output io.Writer, s sbom.SBOM) error { - enc, bom := buildEncoder(output, s) - return enc.EncodeVersion(bom, cyclonedx.SpecVersion1_1) -} - -func encoderV1_2(output io.Writer, s sbom.SBOM) error { - enc, bom := buildEncoder(output, s) - return enc.EncodeVersion(bom, cyclonedx.SpecVersion1_2) -} - -func encoderV1_3(output io.Writer, s sbom.SBOM) error { - enc, bom := buildEncoder(output, s) - return enc.EncodeVersion(bom, cyclonedx.SpecVersion1_3) -} - -func encoderV1_4(output io.Writer, s sbom.SBOM) error { - enc, bom := buildEncoder(output, s) - return enc.EncodeVersion(bom, cyclonedx.SpecVersion1_4) -} - -func encoderV1_5(output io.Writer, s sbom.SBOM) error { - enc, bom := buildEncoder(output, s) - return enc.EncodeVersion(bom, cyclonedx.SpecVersion1_5) -} - -func buildEncoder(output io.Writer, s sbom.SBOM) (cyclonedx.BOMEncoder, *cyclonedx.BOM) { - bom := cyclonedxhelpers.ToFormatModel(s) - enc := cyclonedx.NewBOMEncoder(output, cyclonedx.BOMFileFormatJSON) - enc.SetPretty(true) - enc.SetEscapeHTML(false) - return enc, bom -} diff --git a/vendor/github.com/anchore/syft/syft/formats/cyclonedxjson/format.go b/vendor/github.com/anchore/syft/syft/formats/cyclonedxjson/format.go deleted file mode 100644 index 97a088aa..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/cyclonedxjson/format.go +++ /dev/null @@ -1,72 +0,0 @@ -package cyclonedxjson - -import ( - "github.com/CycloneDX/cyclonedx-go" - - "github.com/anchore/syft/syft/formats/common/cyclonedxhelpers" - "github.com/anchore/syft/syft/sbom" -) - -const ID sbom.FormatID = "cyclonedx-json" - -var Format = Format1_4 - -func Format1_0() sbom.Format { - return sbom.NewFormat( - cyclonedx.SpecVersion1_0.String(), - encoderV1_0, - cyclonedxhelpers.GetDecoder(cyclonedx.BOMFileFormatJSON), - cyclonedxhelpers.GetValidator(cyclonedx.BOMFileFormatJSON), - ID, - ) -} - -func Format1_1() sbom.Format { - return sbom.NewFormat( - cyclonedx.SpecVersion1_1.String(), - encoderV1_1, - cyclonedxhelpers.GetDecoder(cyclonedx.BOMFileFormatJSON), - cyclonedxhelpers.GetValidator(cyclonedx.BOMFileFormatJSON), - ID, - ) -} - -func Format1_2() sbom.Format { - return sbom.NewFormat( - cyclonedx.SpecVersion1_2.String(), - encoderV1_2, - cyclonedxhelpers.GetDecoder(cyclonedx.BOMFileFormatJSON), - cyclonedxhelpers.GetValidator(cyclonedx.BOMFileFormatJSON), - ID, - ) -} - -func Format1_3() sbom.Format { - return sbom.NewFormat( - cyclonedx.SpecVersion1_3.String(), - encoderV1_3, - cyclonedxhelpers.GetDecoder(cyclonedx.BOMFileFormatJSON), - cyclonedxhelpers.GetValidator(cyclonedx.BOMFileFormatJSON), - ID, - ) -} - -func Format1_4() sbom.Format { - return sbom.NewFormat( - cyclonedx.SpecVersion1_4.String(), - encoderV1_4, - cyclonedxhelpers.GetDecoder(cyclonedx.BOMFileFormatJSON), - cyclonedxhelpers.GetValidator(cyclonedx.BOMFileFormatJSON), - ID, - ) -} - -func Format1_5() sbom.Format { - return sbom.NewFormat( - cyclonedx.SpecVersion1_5.String(), - encoderV1_5, - cyclonedxhelpers.GetDecoder(cyclonedx.BOMFileFormatJSON), - cyclonedxhelpers.GetValidator(cyclonedx.BOMFileFormatJSON), - ID, - ) -} diff --git a/vendor/github.com/anchore/syft/syft/formats/cyclonedxxml/encoder.go b/vendor/github.com/anchore/syft/syft/formats/cyclonedxxml/encoder.go deleted file mode 100644 index 3941feca..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/cyclonedxxml/encoder.go +++ /dev/null @@ -1,48 +0,0 @@ -package cyclonedxxml - -import ( - "io" - - "github.com/CycloneDX/cyclonedx-go" - - "github.com/anchore/syft/syft/formats/common/cyclonedxhelpers" - "github.com/anchore/syft/syft/sbom" -) - -func encoderV1_0(output io.Writer, s sbom.SBOM) error { - enc, bom := buildEncoder(output, s) - return enc.EncodeVersion(bom, cyclonedx.SpecVersion1_0) -} - -func encoderV1_1(output io.Writer, s sbom.SBOM) error { - enc, bom := buildEncoder(output, s) - return enc.EncodeVersion(bom, cyclonedx.SpecVersion1_1) -} - -func encoderV1_2(output io.Writer, s sbom.SBOM) error { - enc, bom := buildEncoder(output, s) - return enc.EncodeVersion(bom, cyclonedx.SpecVersion1_2) -} - -func encoderV1_3(output io.Writer, s sbom.SBOM) error { - enc, bom := buildEncoder(output, s) - return enc.EncodeVersion(bom, cyclonedx.SpecVersion1_3) -} - -func encoderV1_4(output io.Writer, s sbom.SBOM) error { - enc, bom := buildEncoder(output, s) - return enc.EncodeVersion(bom, cyclonedx.SpecVersion1_4) -} - -func encoderV1_5(output io.Writer, s sbom.SBOM) error { - enc, bom := buildEncoder(output, s) - return enc.EncodeVersion(bom, cyclonedx.SpecVersion1_5) -} - -func buildEncoder(output io.Writer, s sbom.SBOM) (cyclonedx.BOMEncoder, *cyclonedx.BOM) { - bom := cyclonedxhelpers.ToFormatModel(s) - enc := cyclonedx.NewBOMEncoder(output, cyclonedx.BOMFileFormatXML) - enc.SetPretty(true) - enc.SetEscapeHTML(false) - return enc, bom -} diff --git a/vendor/github.com/anchore/syft/syft/formats/cyclonedxxml/format.go b/vendor/github.com/anchore/syft/syft/formats/cyclonedxxml/format.go deleted file mode 100644 index 1b22cee1..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/cyclonedxxml/format.go +++ /dev/null @@ -1,72 +0,0 @@ -package cyclonedxxml - -import ( - "github.com/CycloneDX/cyclonedx-go" - - "github.com/anchore/syft/syft/formats/common/cyclonedxhelpers" - "github.com/anchore/syft/syft/sbom" -) - -const ID sbom.FormatID = "cyclonedx-xml" - -var Format = Format1_4 - -func Format1_0() sbom.Format { - return sbom.NewFormat( - cyclonedx.SpecVersion1_0.String(), - encoderV1_0, - cyclonedxhelpers.GetDecoder(cyclonedx.BOMFileFormatXML), - cyclonedxhelpers.GetValidator(cyclonedx.BOMFileFormatXML), - ID, "cyclonedx", "cyclone", - ) -} - -func Format1_1() sbom.Format { - return sbom.NewFormat( - cyclonedx.SpecVersion1_1.String(), - encoderV1_1, - cyclonedxhelpers.GetDecoder(cyclonedx.BOMFileFormatXML), - cyclonedxhelpers.GetValidator(cyclonedx.BOMFileFormatXML), - ID, "cyclonedx", "cyclone", - ) -} - -func Format1_2() sbom.Format { - return sbom.NewFormat( - cyclonedx.SpecVersion1_2.String(), - encoderV1_2, - cyclonedxhelpers.GetDecoder(cyclonedx.BOMFileFormatXML), - cyclonedxhelpers.GetValidator(cyclonedx.BOMFileFormatXML), - ID, "cyclonedx", "cyclone", - ) -} - -func Format1_3() sbom.Format { - return sbom.NewFormat( - cyclonedx.SpecVersion1_3.String(), - encoderV1_3, - cyclonedxhelpers.GetDecoder(cyclonedx.BOMFileFormatXML), - cyclonedxhelpers.GetValidator(cyclonedx.BOMFileFormatXML), - ID, "cyclonedx", "cyclone", - ) -} - -func Format1_4() sbom.Format { - return sbom.NewFormat( - cyclonedx.SpecVersion1_4.String(), - encoderV1_4, - cyclonedxhelpers.GetDecoder(cyclonedx.BOMFileFormatXML), - cyclonedxhelpers.GetValidator(cyclonedx.BOMFileFormatXML), - ID, "cyclonedx", "cyclone", - ) -} - -func Format1_5() sbom.Format { - return sbom.NewFormat( - cyclonedx.SpecVersion1_5.String(), - encoderV1_5, - cyclonedxhelpers.GetDecoder(cyclonedx.BOMFileFormatXML), - cyclonedxhelpers.GetValidator(cyclonedx.BOMFileFormatXML), - ID, "cyclonedx", "cyclone", - ) -} diff --git a/vendor/github.com/anchore/syft/syft/formats/formats.go b/vendor/github.com/anchore/syft/syft/formats/formats.go deleted file mode 100644 index e18b1a85..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/formats.go +++ /dev/null @@ -1,157 +0,0 @@ -package formats - -import ( - "bytes" - "errors" - "fmt" - "io" - "regexp" - "slices" - "strings" - - "github.com/anchore/syft/internal/log" - "github.com/anchore/syft/syft/formats/cyclonedxjson" - "github.com/anchore/syft/syft/formats/cyclonedxxml" - "github.com/anchore/syft/syft/formats/github" - "github.com/anchore/syft/syft/formats/spdxjson" - "github.com/anchore/syft/syft/formats/spdxtagvalue" - "github.com/anchore/syft/syft/formats/syftjson" - "github.com/anchore/syft/syft/formats/table" - "github.com/anchore/syft/syft/formats/template" - "github.com/anchore/syft/syft/formats/text" - "github.com/anchore/syft/syft/sbom" -) - -func Formats() []sbom.Format { - return []sbom.Format{ - syftjson.Format(), - github.Format(), - table.Format(), - text.Format(), - template.Format(), - cyclonedxxml.Format1_0(), - cyclonedxxml.Format1_1(), - cyclonedxxml.Format1_2(), - cyclonedxxml.Format1_3(), - cyclonedxxml.Format1_4(), - cyclonedxxml.Format1_5(), - cyclonedxjson.Format1_0(), - cyclonedxjson.Format1_1(), - cyclonedxjson.Format1_2(), - cyclonedxjson.Format1_3(), - cyclonedxjson.Format1_4(), - cyclonedxjson.Format1_5(), - spdxtagvalue.Format2_1(), - spdxtagvalue.Format2_2(), - spdxtagvalue.Format2_3(), - spdxjson.Format2_2(), - spdxjson.Format2_3(), - } -} - -func Identify(by []byte) sbom.Format { - for _, f := range Formats() { - if err := f.Validate(bytes.NewReader(by)); err != nil { - if !errors.Is(err, sbom.ErrValidationNotSupported) { - log.WithFields("error", err).Tracef("format validation for %s failed", f.ID()) - } - continue - } - return f - } - return nil -} - -// ByName accepts a name@version string, such as: -// -// spdx-json@2.1 or cyclonedx@1.5 -func ByName(name string) sbom.Format { - parts := strings.SplitN(name, "@", 2) - version := sbom.AnyVersion - if len(parts) > 1 { - version = parts[1] - } - return ByNameAndVersion(parts[0], version) -} - -func ByNameAndVersion(name string, version string) sbom.Format { - name = cleanFormatName(name) - var mostRecentFormat sbom.Format - for _, f := range Formats() { - for _, n := range f.IDs() { - if cleanFormatName(string(n)) == name && versionMatches(f.Version(), version) { - // if the version is not specified and the format is cyclonedx, then we want to return the most recent version up to 1.4 - // If more aliases like cdx are added this will not catch those - we want to eventually provide a way for - // formats to inform this function what their default version is - // TODO: remove this check when 1.5 is stable or default formats are designed. PR below should be merged. - // https://github.com/CycloneDX/cyclonedx-go/pull/90 - if version == sbom.AnyVersion && strings.Contains(string(n), "cyclone") { - if f.Version() == "1.5" { - continue - } - } - if mostRecentFormat == nil || f.Version() > mostRecentFormat.Version() { - mostRecentFormat = f - } - } - } - } - return mostRecentFormat -} - -func versionMatches(version string, match string) bool { - if version == sbom.AnyVersion || match == sbom.AnyVersion { - return true - } - - match = strings.ReplaceAll(match, ".", "\\.") - match = strings.ReplaceAll(match, "*", ".*") - match = fmt.Sprintf("^%s(\\..*)*$", match) - matcher, err := regexp.Compile(match) - if err != nil { - return false - } - return matcher.MatchString(version) -} - -func cleanFormatName(name string) string { - r := strings.NewReplacer("-", "", "_", "") - return strings.ToLower(r.Replace(name)) -} - -// Encode takes all SBOM elements and a format option and encodes an SBOM document. -func Encode(s sbom.SBOM, f sbom.Format) ([]byte, error) { - buff := bytes.Buffer{} - - if err := f.Encode(&buff, s); err != nil { - return nil, fmt.Errorf("unable to encode sbom: %w", err) - } - - return buff.Bytes(), nil -} - -// Decode takes a reader for an SBOM and generates all internal SBOM elements. -func Decode(reader io.Reader) (*sbom.SBOM, sbom.Format, error) { - by, err := io.ReadAll(reader) - if err != nil { - return nil, nil, fmt.Errorf("unable to read sbom: %w", err) - } - - f := Identify(by) - if f == nil { - return nil, nil, fmt.Errorf("unable to identify format") - } - - s, err := f.Decode(bytes.NewReader(by)) - return s, f, err -} - -func AllIDs() (ids []sbom.FormatID) { - for _, f := range Formats() { - if slices.Contains(ids, f.ID()) { - continue - } - ids = append(ids, f.ID()) - } - return ids -} diff --git a/vendor/github.com/anchore/syft/syft/formats/github/encoder.go b/vendor/github.com/anchore/syft/syft/formats/github/encoder.go deleted file mode 100644 index 6f8ff571..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/github/encoder.go +++ /dev/null @@ -1,190 +0,0 @@ -package github - -import ( - "fmt" - "strings" - "time" - - "github.com/mholt/archiver/v3" - - "github.com/anchore/packageurl-go" - "github.com/anchore/syft/internal/log" - "github.com/anchore/syft/syft/pkg" - "github.com/anchore/syft/syft/sbom" - "github.com/anchore/syft/syft/source" -) - -// toGithubModel converts the provided SBOM to a GitHub dependency model -func toGithubModel(s *sbom.SBOM) DependencySnapshot { - scanTime := time.Now().Format(time.RFC3339) // TODO is there a record of this somewhere? - v := s.Descriptor.Version - if v == "[not provided]" || v == "" { - v = "0.0.0-dev" - } - return DependencySnapshot{ - Version: 0, - // TODO allow property input to specify the Job, Sha, and Ref - Detector: DetectorMetadata{ - Name: s.Descriptor.Name, - URL: "https://github.com/anchore/syft", - Version: v, - }, - Metadata: toSnapshotMetadata(s), - Manifests: toGithubManifests(s), - Scanned: scanTime, - } -} - -// toSnapshotMetadata captures the linux distribution information and other metadata -func toSnapshotMetadata(s *sbom.SBOM) Metadata { - out := Metadata{} - - if s.Artifacts.LinuxDistribution != nil { - d := s.Artifacts.LinuxDistribution - qualifiers := packageurl.Qualifiers{} - if len(d.IDLike) > 0 { - qualifiers = append(qualifiers, packageurl.Qualifier{ - Key: "like", - Value: strings.Join(d.IDLike, ","), - }) - } - purl := packageurl.NewPackageURL("generic", "", d.ID, d.VersionID, qualifiers, "") - out["syft:distro"] = purl.ToString() - } - - return out -} - -func filesystem(p pkg.Package) string { - locations := p.Locations.ToSlice() - if len(locations) > 0 { - return locations[0].FileSystemID - } - return "" -} - -// toGithubManifests manifests, each of which represents a specific location that has dependencies -func toGithubManifests(s *sbom.SBOM) Manifests { - manifests := map[string]*Manifest{} - - for _, p := range s.Artifacts.Packages.Sorted() { - path := toPath(s.Source, p) - manifest, ok := manifests[path] - if !ok { - manifest = &Manifest{ - Name: path, - File: FileInfo{ - SourceLocation: path, - }, - Resolved: DependencyGraph{}, - } - fs := filesystem(p) - if fs != "" { - manifest.Metadata = Metadata{ - "syft:filesystem": fs, - } - } - manifests[path] = manifest - } - - name := dependencyName(p) - manifest.Resolved[name] = DependencyNode{ - PackageURL: p.PURL, - Metadata: toDependencyMetadata(p), - Relationship: toDependencyRelationshipType(p), - Scope: toDependencyScope(p), - Dependencies: toDependencies(s, p), - } - } - - out := Manifests{} - for k, v := range manifests { - out[k] = *v - } - return out -} - -// toPath Generates a string representation of the package location, optionally including the layer hash -func toPath(s source.Description, p pkg.Package) string { - inputPath := trimRelative(s.Name) - locations := p.Locations.ToSlice() - if len(locations) > 0 { - location := locations[0] - packagePath := location.RealPath - if location.VirtualPath != "" { - packagePath = location.VirtualPath - } - packagePath = strings.TrimPrefix(packagePath, "/") - switch metadata := s.Metadata.(type) { - case source.StereoscopeImageSourceMetadata: - image := strings.ReplaceAll(metadata.UserInput, ":/", "//") - return fmt.Sprintf("%s:/%s", image, packagePath) - case source.FileSourceMetadata: - path := trimRelative(metadata.Path) - if isArchive(metadata.Path) { - return fmt.Sprintf("%s:/%s", path, packagePath) - } - return path - case source.DirectorySourceMetadata: - path := trimRelative(metadata.Path) - if path != "" { - return fmt.Sprintf("%s/%s", path, packagePath) - } - return packagePath - } - } - return inputPath -} - -func trimRelative(s string) string { - s = strings.TrimPrefix(s, "./") - if s == "." { - s = "" - } - return s -} - -// isArchive returns true if the path appears to be an archive -func isArchive(path string) bool { - _, err := archiver.ByExtension(path) - return err == nil -} - -func toDependencies(s *sbom.SBOM, p pkg.Package) (out []string) { - for _, r := range s.Relationships { - if r.From.ID() == p.ID() { - if p, ok := r.To.(pkg.Package); ok { - out = append(out, dependencyName(p)) - } - } - } - return -} - -// dependencyName to make things a little nicer to read; this might end up being lossy -func dependencyName(p pkg.Package) string { - purl, err := packageurl.FromString(p.PURL) - if err != nil { - log.Warnf("Invalid PURL for package: '%s' PURL: '%s' (%w)", p.Name, p.PURL, err) - return "" - } - // don't use qualifiers for this - purl.Qualifiers = nil - return purl.ToString() -} - -func toDependencyScope(_ pkg.Package) DependencyScope { - return DependencyScopeRuntime -} - -func toDependencyRelationshipType(_ pkg.Package) DependencyRelationship { - return DependencyRelationshipDirect -} - -func toDependencyMetadata(_ pkg.Package) Metadata { - // We have limited properties: up to 8 with reasonably small values - // For now, we are encoding the location as part of the key, we are encoding PURLs with most - // of the other information Grype might need; and the distro information at the top level - // so we don't need anything here yet - return Metadata{} -} diff --git a/vendor/github.com/anchore/syft/syft/formats/github/format.go b/vendor/github.com/anchore/syft/syft/formats/github/format.go deleted file mode 100644 index 4e394671..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/github/format.go +++ /dev/null @@ -1,28 +0,0 @@ -package github - -import ( - "encoding/json" - "io" - - "github.com/anchore/syft/syft/sbom" -) - -const ID sbom.FormatID = "github-json" - -func Format() sbom.Format { - return sbom.NewFormat( - sbom.AnyVersion, - func(writer io.Writer, sbom sbom.SBOM) error { - bom := toGithubModel(&sbom) - - encoder := json.NewEncoder(writer) - encoder.SetEscapeHTML(false) - encoder.SetIndent("", " ") - - return encoder.Encode(bom) - }, - nil, - nil, - ID, "github", - ) -} diff --git a/vendor/github.com/anchore/syft/syft/formats/github/github_dependency_api.go b/vendor/github.com/anchore/syft/syft/formats/github/github_dependency_api.go deleted file mode 100644 index fe873d41..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/github/github_dependency_api.go +++ /dev/null @@ -1,78 +0,0 @@ -package github - -// Derived from: https://gist.github.com/reiddraper/fdab2883db0f372c146d1a750fc1c43f - -type DependencySnapshot struct { - Version int `json:"version"` - Job Job `json:"job,omitempty"` // !omitempty - Sha string `json:"sha,omitempty"` // !omitempty sha of the Git commit - Ref string `json:"ref,omitempty"` // !omitempty ref of the Git commit example "refs/heads/main" - Detector DetectorMetadata `json:"detector,omitempty"` - Metadata Metadata `json:"metadata,omitempty"` - Manifests Manifests `json:"manifests,omitempty"` - Scanned ISO8601Date `json:"scanned,omitempty"` -} - -type Job struct { - Correlator string `json:"correlator,omitempty"` // !omitempty - ID string `json:"id,omitempty"` // !omitempty - HTMLURL string `json:"html_url,omitempty"` -} - -type DetectorMetadata struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` - Version string `json:"version,omitempty"` -} - -type Manifests map[string]Manifest - -// Manifest A collection of related dependencies, either declared in a file, -// or representing a logical group of dependencies. -type Manifest struct { - Name string `json:"name"` - File FileInfo `json:"file"` - Metadata Metadata `json:"metadata,omitempty"` - Resolved DependencyGraph `json:"resolved,omitempty"` -} - -type FileInfo struct { - SourceLocation string `json:"source_location,omitempty"` -} - -// DependencyRelationship A notation of whether a dependency is requested directly -// by this manifest, or is a dependency of another dependency. -type DependencyRelationship string - -const ( - DependencyRelationshipDirect DependencyRelationship = "direct" - DependencyRelationshipIndirect DependencyRelationship = "indirect" -) - -// DependencyScope A notation of whether the dependency is required for the primary -// build artifact (runtime), or is only used for development. -// Future versions of this specification may allow for more granular -// scopes, like `runtimeserver`, `runtimeshipped`, -// `developmenttest`, `developmentbenchmark`. -type DependencyScope string - -const ( - DependencyScopeRuntime DependencyScope = "runtime" - DependencyScopeDevelopment DependencyScope = "development" -) - -type DependencyNode struct { - PackageURL string `json:"package_url,omitempty"` - Metadata Metadata `json:"metadata,omitempty"` - Relationship DependencyRelationship `json:"relationship,omitempty"` - Scope DependencyScope `json:"scope,omitempty"` - Dependencies []string `json:"dependencies,omitempty"` -} - -type DependencyGraph map[string]DependencyNode - -type ISO8601Date = string - -type Scalar interface{} // should be: null | boolean | string | number - -type Metadata map[string]Scalar diff --git a/vendor/github.com/anchore/syft/syft/formats/spdxjson/decoder.go b/vendor/github.com/anchore/syft/syft/formats/spdxjson/decoder.go deleted file mode 100644 index 28a477a1..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/spdxjson/decoder.go +++ /dev/null @@ -1,20 +0,0 @@ -package spdxjson - -import ( - "fmt" - "io" - - "github.com/spdx/tools-golang/json" - - "github.com/anchore/syft/syft/formats/common/spdxhelpers" - "github.com/anchore/syft/syft/sbom" -) - -func decoder(reader io.Reader) (s *sbom.SBOM, err error) { - doc, err := json.Read(reader) - if err != nil { - return nil, fmt.Errorf("unable to decode spdx-json: %w", err) - } - - return spdxhelpers.ToSyftModel(doc) -} diff --git a/vendor/github.com/anchore/syft/syft/formats/spdxjson/encoder.go b/vendor/github.com/anchore/syft/syft/formats/spdxjson/encoder.go deleted file mode 100644 index 8a2c89f7..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/spdxjson/encoder.go +++ /dev/null @@ -1,38 +0,0 @@ -package spdxjson - -import ( - "encoding/json" - "io" - - "github.com/spdx/tools-golang/convert" - "github.com/spdx/tools-golang/spdx/v2/v2_2" - - "github.com/anchore/syft/syft/formats/common/spdxhelpers" - "github.com/anchore/syft/syft/sbom" -) - -func encoder2_3(output io.Writer, s sbom.SBOM) error { - doc := spdxhelpers.ToFormatModel(s) - return encodeJSON(output, doc) -} - -func encoder2_2(output io.Writer, s sbom.SBOM) error { - doc := spdxhelpers.ToFormatModel(s) - - var out v2_2.Document - err := convert.Document(doc, &out) - if err != nil { - return err - } - - return encodeJSON(output, out) -} - -func encodeJSON(output io.Writer, doc interface{}) error { - enc := json.NewEncoder(output) - // prevent > and < from being escaped in the payload - enc.SetEscapeHTML(false) - enc.SetIndent("", " ") - - return enc.Encode(doc) -} diff --git a/vendor/github.com/anchore/syft/syft/formats/spdxjson/format.go b/vendor/github.com/anchore/syft/syft/formats/spdxjson/format.go deleted file mode 100644 index 26d543e4..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/spdxjson/format.go +++ /dev/null @@ -1,33 +0,0 @@ -package spdxjson - -import ( - "github.com/anchore/syft/syft/sbom" -) - -const ID sbom.FormatID = "spdx-json" - -var IDs = []sbom.FormatID{ID} - -// note: this format is LOSSY relative to the syftjson format - -func Format2_2() sbom.Format { - return sbom.NewFormat( - "2.2", - encoder2_2, - decoder, - validator, - IDs..., - ) -} - -func Format2_3() sbom.Format { - return sbom.NewFormat( - "2.3", - encoder2_3, - decoder, - validator, - IDs..., - ) -} - -var Format = Format2_3 diff --git a/vendor/github.com/anchore/syft/syft/formats/spdxjson/validator.go b/vendor/github.com/anchore/syft/syft/formats/spdxjson/validator.go deleted file mode 100644 index 77430906..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/spdxjson/validator.go +++ /dev/null @@ -1,10 +0,0 @@ -package spdxjson - -import ( - "io" -) - -func validator(reader io.Reader) error { - _, err := decoder(reader) - return err -} diff --git a/vendor/github.com/anchore/syft/syft/formats/spdxtagvalue/decoder.go b/vendor/github.com/anchore/syft/syft/formats/spdxtagvalue/decoder.go deleted file mode 100644 index 44ad09a7..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/spdxtagvalue/decoder.go +++ /dev/null @@ -1,20 +0,0 @@ -package spdxtagvalue - -import ( - "fmt" - "io" - - "github.com/spdx/tools-golang/tagvalue" - - "github.com/anchore/syft/syft/formats/common/spdxhelpers" - "github.com/anchore/syft/syft/sbom" -) - -func decoder(reader io.Reader) (*sbom.SBOM, error) { - doc, err := tagvalue.Read(reader) - if err != nil { - return nil, fmt.Errorf("unable to decode spdx-tag-value: %w", err) - } - - return spdxhelpers.ToSyftModel(doc) -} diff --git a/vendor/github.com/anchore/syft/syft/formats/spdxtagvalue/encoder.go b/vendor/github.com/anchore/syft/syft/formats/spdxtagvalue/encoder.go deleted file mode 100644 index 5949bfc8..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/spdxtagvalue/encoder.go +++ /dev/null @@ -1,38 +0,0 @@ -package spdxtagvalue - -import ( - "io" - - "github.com/spdx/tools-golang/convert" - "github.com/spdx/tools-golang/spdx/v2/v2_1" - "github.com/spdx/tools-golang/spdx/v2/v2_2" - "github.com/spdx/tools-golang/tagvalue" - - "github.com/anchore/syft/syft/formats/common/spdxhelpers" - "github.com/anchore/syft/syft/sbom" -) - -func encoder2_3(output io.Writer, s sbom.SBOM) error { - model := spdxhelpers.ToFormatModel(s) - return tagvalue.Write(model, output) -} - -func encoder2_2(output io.Writer, s sbom.SBOM) error { - model := spdxhelpers.ToFormatModel(s) - var out v2_2.Document - err := convert.Document(model, &out) - if err != nil { - return err - } - return tagvalue.Write(out, output) -} - -func encoder2_1(output io.Writer, s sbom.SBOM) error { - model := spdxhelpers.ToFormatModel(s) - var out v2_1.Document - err := convert.Document(model, &out) - if err != nil { - return err - } - return tagvalue.Write(out, output) -} diff --git a/vendor/github.com/anchore/syft/syft/formats/spdxtagvalue/format.go b/vendor/github.com/anchore/syft/syft/formats/spdxtagvalue/format.go deleted file mode 100644 index 46db72d6..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/spdxtagvalue/format.go +++ /dev/null @@ -1,42 +0,0 @@ -package spdxtagvalue - -import ( - "github.com/anchore/syft/syft/sbom" -) - -const ID sbom.FormatID = "spdx-tag-value" - -var IDs = []sbom.FormatID{ID, "spdx", "spdx-tv"} - -// note: this format is LOSSY relative to the syftjson format -func Format2_1() sbom.Format { - return sbom.NewFormat( - "2.1", - encoder2_1, - decoder, - validator, - IDs..., - ) -} - -func Format2_2() sbom.Format { - return sbom.NewFormat( - "2.2", - encoder2_2, - decoder, - validator, - IDs..., - ) -} - -func Format2_3() sbom.Format { - return sbom.NewFormat( - "2.3", - encoder2_3, - decoder, - validator, - IDs..., - ) -} - -var Format = Format2_3 diff --git a/vendor/github.com/anchore/syft/syft/formats/spdxtagvalue/validator.go b/vendor/github.com/anchore/syft/syft/formats/spdxtagvalue/validator.go deleted file mode 100644 index e68875f2..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/spdxtagvalue/validator.go +++ /dev/null @@ -1,10 +0,0 @@ -package spdxtagvalue - -import ( - "io" -) - -func validator(reader io.Reader) error { - _, err := decoder(reader) - return err -} diff --git a/vendor/github.com/anchore/syft/syft/formats/syftjson/decoder.go b/vendor/github.com/anchore/syft/syft/formats/syftjson/decoder.go deleted file mode 100644 index 845c1fc0..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/syftjson/decoder.go +++ /dev/null @@ -1,48 +0,0 @@ -package syftjson - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/Masterminds/semver" - - "github.com/anchore/syft/internal" - "github.com/anchore/syft/internal/log" - "github.com/anchore/syft/syft/formats/syftjson/model" - "github.com/anchore/syft/syft/sbom" -) - -func decoder(reader io.Reader) (*sbom.SBOM, error) { - dec := json.NewDecoder(reader) - - var doc model.Document - err := dec.Decode(&doc) - if err != nil { - return nil, fmt.Errorf("unable to decode syft-json: %w", err) - } - - if err := checkSupportedSchema(doc.Schema.Version, internal.JSONSchemaVersion); err != nil { - log.Warn(err) - } - - return toSyftModel(doc) -} - -func checkSupportedSchema(documentVerion string, parserVersion string) error { - documentV, err := semver.NewVersion(documentVerion) - if err != nil { - return fmt.Errorf("error comparing document schema version with parser schema version: %w", err) - } - - parserV, err := semver.NewVersion(parserVersion) - if err != nil { - return fmt.Errorf("error comparing document schema version with parser schema version: %w", err) - } - - if documentV.GreaterThan(parserV) { - return fmt.Errorf("document has schema version %s, but parser has older schema version (%s)", documentVerion, parserVersion) - } - - return nil -} diff --git a/vendor/github.com/anchore/syft/syft/formats/syftjson/encoder.go b/vendor/github.com/anchore/syft/syft/formats/syftjson/encoder.go deleted file mode 100644 index ae52818f..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/syftjson/encoder.go +++ /dev/null @@ -1,19 +0,0 @@ -package syftjson - -import ( - "encoding/json" - "io" - - "github.com/anchore/syft/syft/sbom" -) - -func encoder(output io.Writer, s sbom.SBOM) error { - doc := ToFormatModel(s) - - enc := json.NewEncoder(output) - // prevent > and < from being escaped in the payload - enc.SetEscapeHTML(false) - enc.SetIndent("", " ") - - return enc.Encode(&doc) -} diff --git a/vendor/github.com/anchore/syft/syft/formats/syftjson/format.go b/vendor/github.com/anchore/syft/syft/formats/syftjson/format.go deleted file mode 100644 index 5f336871..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/syftjson/format.go +++ /dev/null @@ -1,18 +0,0 @@ -package syftjson - -import ( - "github.com/anchore/syft/internal" - "github.com/anchore/syft/syft/sbom" -) - -const ID sbom.FormatID = "syft-json" - -func Format() sbom.Format { - return sbom.NewFormat( - internal.JSONSchemaVersion, - encoder, - decoder, - validator, - ID, "json", "syft", - ) -} diff --git a/vendor/github.com/anchore/syft/syft/formats/syftjson/validator.go b/vendor/github.com/anchore/syft/syft/formats/syftjson/validator.go deleted file mode 100644 index daf541b8..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/syftjson/validator.go +++ /dev/null @@ -1,31 +0,0 @@ -package syftjson - -import ( - "encoding/json" - "fmt" - "io" - "strings" - - "github.com/anchore/syft/syft/formats/syftjson/model" -) - -func validator(reader io.Reader) error { - type Document struct { - Schema model.Schema `json:"schema"` - } - - dec := json.NewDecoder(reader) - - var doc Document - err := dec.Decode(&doc) - if err != nil { - return fmt.Errorf("unable to decode: %w", err) - } - - // note: we accept all schema versions - // TODO: add per-schema version parsing - if strings.Contains(doc.Schema.URL, "anchore/syft") { - return nil - } - return fmt.Errorf("could not extract syft schema") -} diff --git a/vendor/github.com/anchore/syft/syft/formats/table/encoder.go b/vendor/github.com/anchore/syft/syft/formats/table/encoder.go deleted file mode 100644 index 7b6c817b..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/table/encoder.go +++ /dev/null @@ -1,79 +0,0 @@ -package table - -import ( - "fmt" - "io" - "sort" - "strings" - - "github.com/olekukonko/tablewriter" - - "github.com/anchore/syft/syft/sbom" -) - -func encoder(output io.Writer, s sbom.SBOM) error { - var rows [][]string - - columns := []string{"Name", "Version", "Type"} - for _, p := range s.Artifacts.Packages.Sorted() { - row := []string{ - p.Name, - p.Version, - string(p.Type), - } - rows = append(rows, row) - } - - if len(rows) == 0 { - _, err := fmt.Fprintln(output, "No packages discovered") - return err - } - - // sort by name, version, then type - sort.SliceStable(rows, func(i, j int) bool { - for col := 0; col < len(columns); col++ { - if rows[i][col] != rows[j][col] { - return rows[i][col] < rows[j][col] - } - } - return false - }) - rows = removeDuplicateRows(rows) - - table := tablewriter.NewWriter(output) - - table.SetHeader(columns) - table.SetHeaderLine(false) - table.SetBorder(false) - table.SetAutoWrapText(false) - table.SetAutoFormatHeaders(true) - table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) - table.SetAlignment(tablewriter.ALIGN_LEFT) - table.SetCenterSeparator("") - table.SetColumnSeparator("") - table.SetRowSeparator("") - table.SetTablePadding(" ") - table.SetNoWhiteSpace(true) - - table.AppendBulk(rows) - table.Render() - - return nil -} - -func removeDuplicateRows(items [][]string) [][]string { - seen := map[string][]string{} - var result [][]string - - for _, v := range items { - key := strings.Join(v, "|") - if seen[key] != nil { - // dup! - continue - } - - seen[key] = v - result = append(result, v) - } - return result -} diff --git a/vendor/github.com/anchore/syft/syft/formats/table/format.go b/vendor/github.com/anchore/syft/syft/formats/table/format.go deleted file mode 100644 index 7d962372..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/table/format.go +++ /dev/null @@ -1,17 +0,0 @@ -package table - -import ( - "github.com/anchore/syft/syft/sbom" -) - -const ID sbom.FormatID = "syft-table" - -func Format() sbom.Format { - return sbom.NewFormat( - sbom.AnyVersion, - encoder, - nil, - nil, - ID, "table", - ) -} diff --git a/vendor/github.com/anchore/syft/syft/formats/template/encoder.go b/vendor/github.com/anchore/syft/syft/formats/template/encoder.go deleted file mode 100644 index 6cdce172..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/template/encoder.go +++ /dev/null @@ -1,55 +0,0 @@ -package template - -import ( - "errors" - "fmt" - "os" - "reflect" - "text/template" - - "github.com/Masterminds/sprig/v3" - "github.com/mitchellh/go-homedir" -) - -func makeTemplateExecutor(templateFilePath string) (*template.Template, error) { - if templateFilePath == "" { - return nil, errors.New("no template file: please provide a template path") - } - - expandedPathToTemplateFile, err := homedir.Expand(templateFilePath) - if err != nil { - return nil, fmt.Errorf("unable to expand path %s", templateFilePath) - } - - templateContents, err := os.ReadFile(expandedPathToTemplateFile) - if err != nil { - return nil, fmt.Errorf("unable to get template content: %w", err) - } - - templateName := expandedPathToTemplateFile - tmpl, err := template.New(templateName).Funcs(funcMap).Parse(string(templateContents)) - if err != nil { - return nil, fmt.Errorf("unable to parse template: %w", err) - } - - return tmpl, nil -} - -// These are custom functions available to template authors. -var funcMap = func() template.FuncMap { - f := sprig.HermeticTxtFuncMap() - f["getLastIndex"] = func(collection interface{}) int { - if v := reflect.ValueOf(collection); v.Kind() == reflect.Slice { - return v.Len() - 1 - } - - return 0 - } - // Checks if a field is defined - f["hasField"] = func(obj interface{}, field string) bool { - t := reflect.TypeOf(obj) - _, ok := t.FieldByName(field) - return ok - } - return f -}() diff --git a/vendor/github.com/anchore/syft/syft/formats/template/format.go b/vendor/github.com/anchore/syft/syft/formats/template/format.go deleted file mode 100644 index 4d6fb0ab..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/template/format.go +++ /dev/null @@ -1,62 +0,0 @@ -package template - -import ( - "fmt" - "io" - - "github.com/anchore/syft/syft/formats/syftjson" - "github.com/anchore/syft/syft/sbom" -) - -const ID sbom.FormatID = "template" - -func Format() sbom.Format { - return OutputFormat{} -} - -// implementation of sbom.Format interface -// to make use of format options -type OutputFormat struct { - templateFilePath string -} - -func (f OutputFormat) ID() sbom.FormatID { - return ID -} - -func (f OutputFormat) IDs() []sbom.FormatID { - return []sbom.FormatID{ID} -} - -func (f OutputFormat) Version() string { - return sbom.AnyVersion -} - -func (f OutputFormat) String() string { - return fmt.Sprintf("template: " + f.templateFilePath) -} - -func (f OutputFormat) Decode(_ io.Reader) (*sbom.SBOM, error) { - return nil, sbom.ErrDecodingNotSupported -} - -func (f OutputFormat) Encode(output io.Writer, s sbom.SBOM) error { - tmpl, err := makeTemplateExecutor(f.templateFilePath) - if err != nil { - return err - } - - doc := syftjson.ToFormatModel(s) - return tmpl.Execute(output, doc) -} - -func (f OutputFormat) Validate(_ io.Reader) error { - return sbom.ErrValidationNotSupported -} - -// SetTemplatePath sets path for template file -func (f *OutputFormat) SetTemplatePath(filePath string) { - f.templateFilePath = filePath -} - -var _ sbom.Format = (*OutputFormat)(nil) diff --git a/vendor/github.com/anchore/syft/syft/formats/text/encoder.go b/vendor/github.com/anchore/syft/syft/formats/text/encoder.go deleted file mode 100644 index 1c19084d..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/text/encoder.go +++ /dev/null @@ -1,55 +0,0 @@ -package text - -import ( - "fmt" - "io" - "text/tabwriter" - - "github.com/anchore/syft/syft/sbom" - "github.com/anchore/syft/syft/source" -) - -func encoder(output io.Writer, s sbom.SBOM) error { - // init the tabular writer - w := new(tabwriter.Writer) - w.Init(output, 0, 8, 0, '\t', tabwriter.AlignRight) - - switch metadata := s.Source.Metadata.(type) { - case source.DirectorySourceMetadata: - fmt.Fprintf(w, "[Path: %s]\n", metadata.Path) - case source.FileSourceMetadata: - fmt.Fprintf(w, "[Path: %s]\n", metadata.Path) - case source.StereoscopeImageSourceMetadata: - fmt.Fprintln(w, "[Image]") - - for idx, l := range metadata.Layers { - fmt.Fprintln(w, " Layer:\t", idx) - fmt.Fprintln(w, " Digest:\t", l.Digest) - fmt.Fprintln(w, " Size:\t", l.Size) - fmt.Fprintln(w, " MediaType:\t", l.MediaType) - fmt.Fprintln(w) - w.Flush() - } - default: - return fmt.Errorf("unsupported source: %T", s.Source.Metadata) - } - - // populate artifacts... - rows := 0 - for _, p := range s.Artifacts.Packages.Sorted() { - fmt.Fprintf(w, "[%s]\n", p.Name) - fmt.Fprintln(w, " Version:\t", p.Version) - fmt.Fprintln(w, " Type:\t", string(p.Type)) - fmt.Fprintln(w, " Found by:\t", p.FoundBy) - fmt.Fprintln(w) - w.Flush() - rows++ - } - - if rows == 0 { - fmt.Fprintln(output, "No packages discovered") - return nil - } - - return nil -} diff --git a/vendor/github.com/anchore/syft/syft/formats/text/format.go b/vendor/github.com/anchore/syft/syft/formats/text/format.go deleted file mode 100644 index 387bbd52..00000000 --- a/vendor/github.com/anchore/syft/syft/formats/text/format.go +++ /dev/null @@ -1,17 +0,0 @@ -package text - -import ( - "github.com/anchore/syft/syft/sbom" -) - -const ID sbom.FormatID = "syft-text" - -func Format() sbom.Format { - return sbom.NewFormat( - sbom.AnyVersion, - encoder, - nil, - nil, - ID, "text", - ) -} diff --git a/vendor/github.com/anchore/syft/syft/internal/fileresolver/container_image_all_layers.go b/vendor/github.com/anchore/syft/syft/internal/fileresolver/container_image_all_layers.go index e66c92aa..4bf7a1d2 100644 --- a/vendor/github.com/anchore/syft/syft/internal/fileresolver/container_image_all_layers.go +++ b/vendor/github.com/anchore/syft/syft/internal/fileresolver/container_image_all_layers.go @@ -193,10 +193,10 @@ func (r *ContainerImageAllLayers) FileContentsByLocation(location file.Location) switch entry.Metadata.Type { case stereoscopeFile.TypeSymLink, stereoscopeFile.TypeHardLink: // the location we are searching may be a symlink, we should always work with the resolved file - newLocation := r.RelativeFileByPath(location, location.VirtualPath) + newLocation := r.RelativeFileByPath(location, location.AccessPath) if newLocation == nil { // this is a dead link - return nil, fmt.Errorf("no contents for location=%q", location.VirtualPath) + return nil, fmt.Errorf("no contents for location=%q", location.AccessPath) } location = *newLocation case stereoscopeFile.TypeDirectory: diff --git a/vendor/github.com/anchore/syft/syft/internal/fileresolver/directory.go b/vendor/github.com/anchore/syft/syft/internal/fileresolver/directory.go index 766d53c8..2d74d826 100644 --- a/vendor/github.com/anchore/syft/syft/internal/fileresolver/directory.go +++ b/vendor/github.com/anchore/syft/syft/internal/fileresolver/directory.go @@ -110,7 +110,7 @@ func (r Directory) FilesByPath(userPaths ...string) ([]file.Location, error) { continue } - // we should be resolving symlinks and preserving this information as a VirtualPath to the real file + // we should be resolving symlinks and preserving this information as a AccessPath to the real file ref, err := r.searchContext.SearchByPath(userStrPath, filetree.FollowBasenameLinks) if err != nil { log.Tracef("unable to evaluate symlink for path=%q : %+v", userPath, err) @@ -264,8 +264,9 @@ func (r *Directory) FilesByMIMEType(types ...string) ([]file.Location, error) { if uniqueFileIDs.Contains(*refVia.Reference) { continue } - location := file.NewLocationFromDirectory( + location := file.NewVirtualLocationFromDirectory( r.responsePath(string(refVia.Reference.RealPath)), + r.responsePath(string(refVia.RequestPath)), *refVia.Reference, ) uniqueFileIDs.Add(*refVia.Reference) diff --git a/vendor/github.com/anchore/syft/syft/internal/fileresolver/excluding_file.go b/vendor/github.com/anchore/syft/syft/internal/fileresolver/excluding_file.go index 34c4948a..cbccd364 100644 --- a/vendor/github.com/anchore/syft/syft/internal/fileresolver/excluding_file.go +++ b/vendor/github.com/anchore/syft/syft/internal/fileresolver/excluding_file.go @@ -83,7 +83,7 @@ func (r *excluding) AllLocations() <-chan file.Location { } func locationMatches(location *file.Location, exclusionFn excludeFn) bool { - return exclusionFn(location.RealPath) || exclusionFn(location.VirtualPath) + return exclusionFn(location.RealPath) || exclusionFn(location.AccessPath) } func filterLocations(locations []file.Location, err error, exclusionFn excludeFn) ([]file.Location, error) { diff --git a/vendor/github.com/anchore/syft/syft/internal/fileresolver/unindexed_directory.go b/vendor/github.com/anchore/syft/syft/internal/fileresolver/unindexed_directory.go index 68fdd796..5ef892cf 100644 --- a/vendor/github.com/anchore/syft/syft/internal/fileresolver/unindexed_directory.go +++ b/vendor/github.com/anchore/syft/syft/internal/fileresolver/unindexed_directory.go @@ -165,8 +165,8 @@ nextPath: for i := range out { existing := &out[i] if existing.RealPath == l.RealPath { - if l.VirtualPath == "" { - existing.VirtualPath = "" + if l.AccessPath == "" { + existing.AccessPath = "" } continue nextPath } @@ -261,13 +261,14 @@ func (u UnindexedDirectory) Write(location file.Location, reader io.Reader) erro func (u UnindexedDirectory) newLocation(filePath string, resolveLinks bool) *file.Location { filePath = path.Clean(filePath) - virtualPath := "" + virtualPath := filePath realPath := filePath if resolveLinks { paths := u.resolveLinks(filePath) if len(paths) > 1 { realPath = paths[len(paths)-1] + // TODO: this is not quite correct, as the equivalent of os.EvalSymlinks needs to be done (in the context of afero) if realPath != path.Clean(filePath) { virtualPath = paths[0] } diff --git a/vendor/github.com/anchore/syft/syft/internal/packagemetadata/completion_tester.go b/vendor/github.com/anchore/syft/syft/internal/packagemetadata/completion_tester.go new file mode 100644 index 00000000..08a285bc --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/internal/packagemetadata/completion_tester.go @@ -0,0 +1,69 @@ +package packagemetadata + +import ( + "reflect" + "testing" +) + +type CompletionTester struct { + saw []any + valid []any + ignore []any +} + +func NewCompletionTester(t testing.TB, ignore ...any) *CompletionTester { + tester := &CompletionTester{ + valid: AllTypes(), + ignore: ignore, + } + t.Cleanup(func() { + t.Helper() + tester.validate(t) + }) + return tester +} + +func (tr *CompletionTester) Tested(t testing.TB, m any) { + t.Helper() + + if m == nil { + return + } + if len(tr.valid) == 0 { + t.Fatal("no valid metadata types to test against") + } + ty := reflect.TypeOf(m) + for _, v := range tr.valid { + if reflect.TypeOf(v) == ty { + tr.saw = append(tr.saw, m) + return + } + } + + t.Fatalf("tested metadata type is not valid: %s", ty.Name()) +} + +func (tr *CompletionTester) validate(t testing.TB) { + t.Helper() + + count := make(map[reflect.Type]int) + for _, m := range tr.saw { + count[reflect.TypeOf(m)]++ + } + +validations: + for _, v := range tr.valid { + ty := reflect.TypeOf(v) + + for _, ignore := range tr.ignore { + if ty == reflect.TypeOf(ignore) { + // skip ignored types + continue validations + } + } + + if c, exists := count[ty]; c == 0 || !exists { + t.Errorf("metadata type %s is not covered by a test", ty.Name()) + } + } +} diff --git a/vendor/github.com/anchore/syft/syft/internal/packagemetadata/discover_type_names.go b/vendor/github.com/anchore/syft/syft/internal/packagemetadata/discover_type_names.go new file mode 100644 index 00000000..00d3c626 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/internal/packagemetadata/discover_type_names.go @@ -0,0 +1,181 @@ +package packagemetadata + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "os/exec" + "path/filepath" + "sort" + "strings" + "unicode" + + "github.com/scylladb/go-set/strset" +) + +// these are names of struct types in the pkg package that are not metadata types (thus should not be in the JSON schema) +var knownNonMetadataTypeNames = strset.New( + "Package", + "Collection", + "License", + "LicenseSet", +) + +func DiscoverTypeNames() ([]string, error) { + root, err := RepoRoot() + if err != nil { + return nil, err + } + files, err := filepath.Glob(filepath.Join(root, "syft/pkg/*.go")) + if err != nil { + return nil, err + } + return findMetadataDefinitionNames(files...) +} + +func RepoRoot() (string, error) { + root, err := exec.Command("git", "rev-parse", "--show-toplevel").Output() + if err != nil { + return "", fmt.Errorf("unable to find repo root dir: %+v", err) + } + absRepoRoot, err := filepath.Abs(strings.TrimSpace(string(root))) + if err != nil { + return "", fmt.Errorf("unable to get abs path to repo root: %w", err) + } + return absRepoRoot, nil +} + +func findMetadataDefinitionNames(paths ...string) ([]string, error) { + names := strset.New() + usedNames := strset.New() + for _, path := range paths { + metadataDefinitions, usedTypeNames, err := findMetadataDefinitionNamesInFile(path) + if err != nil { + return nil, err + } + + // useful for debugging... + // fmt.Println(path) + // fmt.Println("Defs:", metadataDefinitions) + // fmt.Println("Used Types:", usedTypeNames) + // fmt.Println() + + names.Add(metadataDefinitions...) + usedNames.Add(usedTypeNames...) + } + + // any definition that is used within another struct should not be considered a top-level metadata definition + names.Remove(usedNames.List()...) + + strNames := names.List() + sort.Strings(strNames) + + // note: 35 is a point-in-time gut check. This number could be updated if new metadata definitions are added, but is not required. + // it is really intended to catch any major issues with the generation process that would generate, say, 0 definitions. + if len(strNames) < 35 { + return nil, fmt.Errorf("not enough metadata definitions found (discovered: " + fmt.Sprintf("%d", len(strNames)) + ")") + } + + return strNames, nil +} + +func findMetadataDefinitionNamesInFile(path string) ([]string, []string, error) { + // set up the parser + fs := token.NewFileSet() + f, err := parser.ParseFile(fs, path, nil, parser.ParseComments) + if err != nil { + return nil, nil, err + } + + var metadataDefinitions []string + var usedTypeNames []string + for _, decl := range f.Decls { + // check if the declaration is a type declaration + spec, ok := decl.(*ast.GenDecl) + if !ok || spec.Tok != token.TYPE { + continue + } + + // loop over all types declared in the type declaration + for _, typ := range spec.Specs { + // check if the type is a struct type + spec, ok := typ.(*ast.TypeSpec) + if !ok || spec.Type == nil { + continue + } + + name := spec.Name.String() + + // only look for exported types + if !isMetadataTypeCandidate(name) { + continue + } + + structType := extractStructType(spec.Type) + if structType == nil { + continue + } + + metadataDefinitions = append(metadataDefinitions, name) + usedTypeNames = append(usedTypeNames, typeNamesUsedInStruct(structType)...) + } + } + return metadataDefinitions, usedTypeNames, nil +} + +func extractStructType(exp ast.Expr) *ast.StructType { + var structType *ast.StructType + switch ty := exp.(type) { + case *ast.StructType: + // this is a standard definition: + // type FooMetadata struct { ... } + structType = ty + case *ast.Ident: + if ty.Obj == nil { + return nil + } + + // this might be a type created from another type: + // type FooMetadata BarMetadata + // ... but we need to check that the other type definition is a struct type + typeSpec, ok := ty.Obj.Decl.(*ast.TypeSpec) + if !ok { + return nil + } + nestedStructType, ok := typeSpec.Type.(*ast.StructType) + if !ok { + return nil + } + structType = nestedStructType + } + return structType +} + +func typeNamesUsedInStruct(structType *ast.StructType) []string { + // recursively find all type names used in the struct type + var names []string + for i := range structType.Fields.List { + // capture names of all of the types (not field names) + ast.Inspect(structType.Fields.List[i].Type, func(n ast.Node) bool { + ident, ok := n.(*ast.Ident) + if !ok { + return true + } + + // add the type name to the list + names = append(names, ident.Name) + + // continue inspecting + return true + }) + } + + return names +} + +func isMetadataTypeCandidate(name string) bool { + return len(name) > 0 && + unicode.IsUpper(rune(name[0])) && // must be exported + !knownNonMetadataTypeNames.Has(name) +} diff --git a/vendor/github.com/anchore/syft/syft/internal/packagemetadata/generated.go b/vendor/github.com/anchore/syft/syft/internal/packagemetadata/generated.go new file mode 100644 index 00000000..d2e119c2 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/internal/packagemetadata/generated.go @@ -0,0 +1,48 @@ +// DO NOT EDIT: generated by syft/internal/packagemetadata/generate/main.go + +package packagemetadata + +import "github.com/anchore/syft/syft/pkg" + +// AllTypes returns a list of all pkg metadata types that syft supports (that are represented in the pkg.Package.Metadata field). +func AllTypes() []any { + return []any{ + pkg.AlpmDBEntry{}, + pkg.ApkDBEntry{}, + pkg.BinarySignature{}, + pkg.CocoaPodfileLockEntry{}, + pkg.ConanLockEntry{}, + pkg.ConanfileEntry{}, + pkg.ConaninfoEntry{}, + pkg.DartPubspecLockEntry{}, + pkg.DotnetDepsEntry{}, + pkg.DotnetPortableExecutableEntry{}, + pkg.DpkgDBEntry{}, + pkg.ElixirMixLockEntry{}, + pkg.ErlangRebarLockEntry{}, + pkg.GolangBinaryBuildinfoEntry{}, + pkg.GolangModuleEntry{}, + pkg.HackageStackYamlEntry{}, + pkg.HackageStackYamlLockEntry{}, + pkg.JavaArchive{}, + pkg.LinuxKernel{}, + pkg.LinuxKernelModule{}, + pkg.MicrosoftKbPatch{}, + pkg.NixStoreEntry{}, + pkg.NpmPackage{}, + pkg.NpmPackageLockEntry{}, + pkg.PhpComposerInstalledEntry{}, + pkg.PhpComposerLockEntry{}, + pkg.PortageEntry{}, + pkg.PythonPackage{}, + pkg.PythonPipfileLockEntry{}, + pkg.PythonRequirementsEntry{}, + pkg.RDescription{}, + pkg.RpmArchive{}, + pkg.RpmDBEntry{}, + pkg.RubyGemspec{}, + pkg.RustBinaryAuditEntry{}, + pkg.RustCargoLockEntry{}, + pkg.SwiftPackageManagerResolvedEntry{}, + } +} diff --git a/vendor/github.com/anchore/syft/syft/internal/packagemetadata/names.go b/vendor/github.com/anchore/syft/syft/internal/packagemetadata/names.go new file mode 100644 index 00000000..2bec9866 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/internal/packagemetadata/names.go @@ -0,0 +1,142 @@ +package packagemetadata + +import ( + "reflect" + "strings" + + "github.com/anchore/syft/syft/pkg" +) + +type jsonType struct { + ty any + name string + legacyNames []string + noLookupLegacyName string // legacy name that conflict with other types, thus should not affect the lookup +} + +func jsonNames(ty any, name string, legacyNames ...string) jsonType { + return jsonType{ + ty: ty, + name: name, + legacyNames: expandLegacyNameVariants(legacyNames...), + } +} + +func jsonNamesWithoutLookup(ty any, name string, noLookupLegacyName string) jsonType { + return jsonType{ + ty: ty, + name: name, + noLookupLegacyName: noLookupLegacyName, + } +} + +type jsonTypeMapping struct { + typeToName map[reflect.Type]string + typeToLegacyName map[reflect.Type]string + nameToType map[string]reflect.Type +} + +func makeJSONTypes(types ...jsonType) jsonTypeMapping { + out := jsonTypeMapping{ + typeToName: make(map[reflect.Type]string), + typeToLegacyName: make(map[reflect.Type]string), + nameToType: make(map[string]reflect.Type), + } + for _, t := range types { + typ := reflect.TypeOf(t.ty) + out.typeToName[typ] = t.name + if len(t.noLookupLegacyName) > 0 { + out.typeToLegacyName[typ] = t.noLookupLegacyName + } else if len(t.legacyNames) > 0 { + out.typeToLegacyName[typ] = t.legacyNames[0] + } + out.nameToType[strings.ToLower(t.name)] = typ + for _, name := range t.legacyNames { + out.nameToType[strings.ToLower(name)] = typ + } + } + return out +} + +// jsonNameFromType is lookup of all known package metadata types to their current JSON name and all previously known aliases. +// It is important that if a name needs to change that the old name is kept in this map (as an alias) for backwards +// compatibility to support decoding older JSON documents. +var jsonTypes = makeJSONTypes( + jsonNames(pkg.AlpmDBEntry{}, "alpm-db-entry", "AlpmMetadata"), + jsonNames(pkg.ApkDBEntry{}, "apk-db-entry", "ApkMetadata"), + jsonNames(pkg.BinarySignature{}, "binary-signature", "BinaryMetadata"), + jsonNames(pkg.CocoaPodfileLockEntry{}, "cocoa-podfile-lock-entry", "CocoapodsMetadataType"), + jsonNames(pkg.ConanLockEntry{}, "c-conan-lock-entry", "ConanLockMetadataType"), + jsonNames(pkg.ConanfileEntry{}, "c-conan-file-entry", "ConanMetadataType"), + jsonNames(pkg.ConaninfoEntry{}, "c-conan-info-entry"), + jsonNames(pkg.DartPubspecLockEntry{}, "dart-pubspec-lock-entry", "DartPubMetadata"), + jsonNames(pkg.DotnetDepsEntry{}, "dotnet-deps-entry", "DotnetDepsMetadata"), + jsonNames(pkg.DotnetPortableExecutableEntry{}, "dotnet-portable-executable-entry"), + jsonNames(pkg.DpkgDBEntry{}, "dpkg-db-entry", "DpkgMetadata"), + jsonNames(pkg.RubyGemspec{}, "ruby-gemspec", "GemMetadata"), + jsonNames(pkg.GolangBinaryBuildinfoEntry{}, "go-module-buildinfo-entry", "GolangBinMetadata", "GolangMetadata"), + jsonNames(pkg.GolangModuleEntry{}, "go-module-entry", "GolangModMetadata"), + jsonNames(pkg.HackageStackYamlLockEntry{}, "haskell-hackage-stack-lock-entry", "HackageMetadataType"), + jsonNamesWithoutLookup(pkg.HackageStackYamlEntry{}, "haskell-hackage-stack-entry", "HackageMetadataType"), // the legacy value is split into two types, where the other is preferred + jsonNames(pkg.JavaArchive{}, "java-archive", "JavaMetadata"), + jsonNames(pkg.MicrosoftKbPatch{}, "microsoft-kb-patch", "KbPatchMetadata"), + jsonNames(pkg.LinuxKernel{}, "linux-kernel-archive", "LinuxKernel"), + jsonNames(pkg.LinuxKernelModule{}, "linux-kernel-module", "LinuxKernelModule"), + jsonNames(pkg.ElixirMixLockEntry{}, "elixir-mix-lock-entry", "MixLockMetadataType"), + jsonNames(pkg.NixStoreEntry{}, "nix-store-entry", "NixStoreMetadata"), + jsonNames(pkg.NpmPackage{}, "javascript-npm-package", "NpmPackageJsonMetadata"), + jsonNames(pkg.NpmPackageLockEntry{}, "javascript-npm-package-lock-entry", "NpmPackageLockJsonMetadata"), + jsonNames(pkg.PhpComposerLockEntry{}, "php-composer-lock-entry", "PhpComposerJsonMetadata"), + jsonNamesWithoutLookup(pkg.PhpComposerInstalledEntry{}, "php-composer-installed-entry", "PhpComposerJsonMetadata"), // the legacy value is split into two types, where the other is preferred + jsonNames(pkg.PortageEntry{}, "portage-db-entry", "PortageMetadata"), + jsonNames(pkg.PythonPackage{}, "python-package", "PythonPackageMetadata"), + jsonNames(pkg.PythonPipfileLockEntry{}, "python-pipfile-lock-entry", "PythonPipfileLockMetadata"), + jsonNames(pkg.PythonRequirementsEntry{}, "python-pip-requirements-entry", "PythonRequirementsMetadata"), + jsonNames(pkg.ErlangRebarLockEntry{}, "erlang-rebar-lock-entry", "RebarLockMetadataType"), + jsonNames(pkg.RDescription{}, "r-description", "RDescriptionFileMetadataType"), + jsonNames(pkg.RpmDBEntry{}, "rpm-db-entry", "RpmMetadata", "RpmdbMetadata"), + jsonNamesWithoutLookup(pkg.RpmArchive{}, "rpm-archive", "RpmMetadata"), // the legacy value is split into two types, where the other is preferred + jsonNames(pkg.SwiftPackageManagerResolvedEntry{}, "swift-package-manager-lock-entry", "SwiftPackageManagerMetadata"), + jsonNames(pkg.RustCargoLockEntry{}, "rust-cargo-lock-entry", "RustCargoPackageMetadata"), + jsonNamesWithoutLookup(pkg.RustBinaryAuditEntry{}, "rust-cargo-audit-entry", "RustCargoPackageMetadata"), // the legacy value is split into two types, where the other is preferred +) + +func expandLegacyNameVariants(names ...string) []string { + var candidates []string + for _, name := range names { + candidates = append(candidates, name) + if strings.HasSuffix(name, "MetadataType") { + candidates = append(candidates, strings.TrimSuffix(name, "Type")) + } else if strings.HasSuffix(name, "Metadata") { + candidates = append(candidates, name+"Type") + } + } + return candidates +} + +func AllTypeNames() []string { + names := make([]string, 0) + for _, t := range AllTypes() { + names = append(names, reflect.TypeOf(t).Name()) + } + return names +} + +func JSONName(metadata any) string { + if name, exists := jsonTypes.typeToName[reflect.TypeOf(metadata)]; exists { + return name + } + return "" +} + +func JSONLegacyName(metadata any) string { + if name, exists := jsonTypes.typeToLegacyName[reflect.TypeOf(metadata)]; exists { + return name + } + return JSONName(metadata) +} + +func ReflectTypeFromJSONName(name string) reflect.Type { + name = strings.ToLower(name) + return jsonTypes.nameToType[name] +} diff --git a/vendor/github.com/anchore/syft/syft/internal/sourcemetadata/names.go b/vendor/github.com/anchore/syft/syft/internal/sourcemetadata/names.go index b33e7f94..391a9c2d 100644 --- a/vendor/github.com/anchore/syft/syft/internal/sourcemetadata/names.go +++ b/vendor/github.com/anchore/syft/syft/internal/sourcemetadata/names.go @@ -13,7 +13,7 @@ var jsonNameFromType = map[reflect.Type][]string{ reflect.TypeOf(source.StereoscopeImageSourceMetadata{}): {"image"}, } -func AllNames() []string { +func AllTypeNames() []string { names := make([]string, 0) for _, t := range AllTypes() { names = append(names, reflect.TypeOf(t).Name()) @@ -32,7 +32,7 @@ func ReflectTypeFromJSONName(name string) reflect.Type { name = strings.ToLower(name) for t, vs := range jsonNameFromType { for _, v := range vs { - if v == name { + if strings.ToLower(v) == name { return t } } diff --git a/vendor/github.com/anchore/syft/syft/linux/identify_release.go b/vendor/github.com/anchore/syft/syft/linux/identify_release.go index cf2da477..3ab8db90 100644 --- a/vendor/github.com/anchore/syft/syft/linux/identify_release.go +++ b/vendor/github.com/anchore/syft/syft/linux/identify_release.go @@ -71,7 +71,7 @@ func IdentifyRelease(resolver file.Resolver) *Release { } content, err := io.ReadAll(contentReader) - internal.CloseAndLogError(contentReader, location.VirtualPath) + internal.CloseAndLogError(contentReader, location.AccessPath) if err != nil { logger.WithFields("error", err, "path", location.RealPath).Trace("unable to read contents") continue diff --git a/vendor/github.com/anchore/syft/syft/pkg/alpm_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/alpm.go similarity index 88% rename from vendor/github.com/anchore/syft/syft/pkg/alpm_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/alpm.go index ff65078f..1f9873f3 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/alpm_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/alpm.go @@ -9,11 +9,12 @@ import ( "github.com/anchore/syft/syft/file" ) -var _ FileOwner = (*AlpmMetadata)(nil) +var _ FileOwner = (*AlpmDBEntry)(nil) const AlpmDBGlob = "**/var/lib/pacman/local/**/desc" -type AlpmMetadata struct { +// AlpmDBEntry is a struct that represents the package data stored in the pacman fla-filet stores for arch linux. +type AlpmDBEntry struct { BasePackage string `mapstructure:"base" json:"basepackage" cyclonedx:"basepackage"` Package string `mapstructure:"name" json:"package" cyclonedx:"package"` Version string `mapstructure:"version" json:"version" cyclonedx:"version"` @@ -39,7 +40,7 @@ type AlpmFileRecord struct { Digests []file.Digest `mapstructure:"digests" json:"digest,omitempty"` } -func (m AlpmMetadata) OwnedFiles() (result []string) { +func (m AlpmDBEntry) OwnedFiles() (result []string) { s := strset.New() for _, f := range m.Files { if f.Path != "" { diff --git a/vendor/github.com/anchore/syft/syft/pkg/apk_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/apk.go similarity index 91% rename from vendor/github.com/anchore/syft/syft/pkg/apk_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/apk.go index 4a3b55ab..2708e0c3 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/apk_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/apk.go @@ -15,14 +15,14 @@ import ( const ApkDBGlob = "**/lib/apk/db/installed" -var _ FileOwner = (*ApkMetadata)(nil) +var _ FileOwner = (*ApkDBEntry)(nil) -// ApkMetadata represents all captured data for a Alpine DB package entry. +// ApkDBEntry represents all captured data for the alpine linux package manager flat-file store. // See the following sources for more information: // - https://wiki.alpinelinux.org/wiki/Apk_spec // - https://git.alpinelinux.org/apk-tools/tree/src/package.c // - https://git.alpinelinux.org/apk-tools/tree/src/database.c -type ApkMetadata struct { +type ApkDBEntry struct { Package string `mapstructure:"P" json:"package"` OriginPackage string `mapstructure:"o" json:"originPackage" cyclonedx:"originPackage"` Maintainer string `mapstructure:"m" json:"maintainer"` @@ -41,9 +41,9 @@ type ApkMetadata struct { type spaceDelimitedStringSlice []string -func (m *ApkMetadata) UnmarshalJSON(data []byte) error { +func (m *ApkDBEntry) UnmarshalJSON(data []byte) error { var fields []reflect.StructField - t := reflect.TypeOf(ApkMetadata{}) + t := reflect.TypeOf(ApkDBEntry{}) for i := 0; i < t.NumField(); i++ { f := t.Field(i) if f.Name == "Dependencies" { @@ -102,7 +102,7 @@ type ApkFileRecord struct { Digest *file.Digest `json:"digest,omitempty"` } -func (m ApkMetadata) OwnedFiles() (result []string) { +func (m ApkDBEntry) OwnedFiles() (result []string) { s := strset.New() for _, f := range m.Files { if f.Path != "" { diff --git a/vendor/github.com/anchore/syft/syft/pkg/binary_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/binary_signature.go similarity index 56% rename from vendor/github.com/anchore/syft/syft/pkg/binary_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/binary_signature.go index a915acc5..ddbe4cff 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/binary_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/binary_signature.go @@ -2,10 +2,12 @@ package pkg import "github.com/anchore/syft/syft/file" -type BinaryMetadata struct { +// BinarySignature represents a set of matched values within a binary file. +type BinarySignature struct { Matches []ClassifierMatch `mapstructure:"Matches" json:"matches"` } +// ClassifierMatch represents a single matched value within a binary file and the "class" name the search pattern represents. type ClassifierMatch struct { Classifier string `mapstructure:"Classifier" json:"classifier"` Location file.Location `mapstructure:"Location" json:"location"` diff --git a/vendor/github.com/anchore/syft/syft/pkg/catalog.go b/vendor/github.com/anchore/syft/syft/pkg/catalog.go index 2120dfdf..1bca0fb4 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/catalog.go +++ b/vendor/github.com/anchore/syft/syft/pkg/catalog.go @@ -130,9 +130,9 @@ func (c *Collection) addPathsToIndex(p Package) { c.addPathToIndex(p.id, l.RealPath) observedPaths.Add(l.RealPath) } - if l.VirtualPath != "" && l.RealPath != l.VirtualPath && !observedPaths.Has(l.VirtualPath) { - c.addPathToIndex(p.id, l.VirtualPath) - observedPaths.Add(l.VirtualPath) + if l.AccessPath != "" && l.RealPath != l.AccessPath && !observedPaths.Has(l.AccessPath) { + c.addPathToIndex(p.id, l.AccessPath) + observedPaths.Add(l.AccessPath) } } } @@ -179,9 +179,9 @@ func (c *Collection) deletePathsFromIndex(p Package) { c.deletePathFromIndex(p.id, l.RealPath) observedPaths.Add(l.RealPath) } - if l.VirtualPath != "" && l.RealPath != l.VirtualPath && !observedPaths.Has(l.VirtualPath) { - c.deletePathFromIndex(p.id, l.VirtualPath) - observedPaths.Add(l.VirtualPath) + if l.AccessPath != "" && l.RealPath != l.AccessPath && !observedPaths.Has(l.AccessPath) { + c.deletePathFromIndex(p.id, l.AccessPath) + observedPaths.Add(l.AccessPath) } } } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/alpine/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/alpine/cataloger.go new file mode 100644 index 00000000..5ff537f2 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/alpine/cataloger.go @@ -0,0 +1,15 @@ +/* +Package alpine provides a concrete Cataloger implementations for packages relating to the Alpine linux distribution. +*/ +package alpine + +import ( + "github.com/anchore/syft/syft/pkg" + "github.com/anchore/syft/syft/pkg/cataloger/generic" +) + +// NewDBCataloger returns a new cataloger object initialized for Alpine package DB flat-file stores. +func NewDBCataloger() *generic.Cataloger { + return generic.NewCataloger("apk-db-cataloger"). + WithParserByGlobs(parseApkDB, pkg.ApkDBGlob) +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/apkdb/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/alpine/package.go similarity index 69% rename from vendor/github.com/anchore/syft/syft/pkg/cataloger/apkdb/package.go rename to vendor/github.com/anchore/syft/syft/pkg/cataloger/alpine/package.go index 8cb75bbc..69c8a8d1 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/apkdb/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/alpine/package.go @@ -1,4 +1,4 @@ -package apkdb +package alpine import ( "strings" @@ -20,14 +20,13 @@ func newPackage(d parsedData, release *linux.Release, dbLocation file.Location) } p := pkg.Package{ - Name: d.Package, - Version: d.Version, - Locations: file.NewLocationSet(dbLocation.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), - Licenses: pkg.NewLicenseSet(pkg.NewLicensesFromLocation(dbLocation, licenseStrings...)...), - PURL: packageURL(d.ApkMetadata, release), - Type: pkg.ApkPkg, - MetadataType: pkg.ApkMetadataType, - Metadata: d.ApkMetadata, + Name: d.Package, + Version: d.Version, + Locations: file.NewLocationSet(dbLocation.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), + Licenses: pkg.NewLicenseSet(pkg.NewLicensesFromLocation(dbLocation, licenseStrings...)...), + PURL: packageURL(d.ApkDBEntry, release), + Type: pkg.ApkPkg, + Metadata: d.ApkDBEntry, } p.SetID() @@ -36,7 +35,7 @@ func newPackage(d parsedData, release *linux.Release, dbLocation file.Location) } // packageURL returns the PURL for the specific Alpine package (see https://github.com/package-url/purl-spec) -func packageURL(m pkg.ApkMetadata, distro *linux.Release) string { +func packageURL(m pkg.ApkDBEntry, distro *linux.Release) string { if distro == nil { return "" } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/apkdb/parse_apk_db.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/alpine/parse_apk_db.go similarity index 98% rename from vendor/github.com/anchore/syft/syft/pkg/cataloger/apkdb/parse_apk_db.go rename to vendor/github.com/anchore/syft/syft/pkg/cataloger/alpine/parse_apk_db.go index fd4184f8..8f0ba9bc 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/apkdb/parse_apk_db.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/alpine/parse_apk_db.go @@ -1,4 +1,4 @@ -package apkdb +package alpine import ( "bufio" @@ -27,10 +27,10 @@ var ( type parsedData struct { License string `mapstructure:"L" json:"license"` - pkg.ApkMetadata + pkg.ApkDBEntry } -// parseApkDB parses packages from a given APK installed DB file. For more +// parseApkDB parses packages from a given APK "installed" flat-file DB. For more // information on specific fields, see https://wiki.alpinelinux.org/wiki/Apk_spec. // //nolint:funlen,gocognit @@ -390,7 +390,7 @@ func discoverPackageDependencies(pkgs []pkg.Package) (relationships []artifact.R lookup := make(map[string][]pkg.Package) // read "Provides" (p) and add as keys for lookup keys as well as package names for _, p := range pkgs { - apkg, ok := p.Metadata.(pkg.ApkMetadata) + apkg, ok := p.Metadata.(pkg.ApkDBEntry) if !ok { log.Warnf("cataloger failed to extract apk 'provides' metadata for package %+v", p.Name) continue @@ -404,7 +404,7 @@ func discoverPackageDependencies(pkgs []pkg.Package) (relationships []artifact.R // read "Pull Dependencies" (D) and match with keys for _, p := range pkgs { - apkg, ok := p.Metadata.(pkg.ApkMetadata) + apkg, ok := p.Metadata.(pkg.ApkDBEntry) if !ok { log.Warnf("cataloger failed to extract apk dependency metadata for package %+v", p.Name) continue diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/alpm/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/alpm/cataloger.go deleted file mode 100644 index 39bc7d81..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/alpm/cataloger.go +++ /dev/null @@ -1,13 +0,0 @@ -package alpm - -import ( - "github.com/anchore/syft/syft/pkg" - "github.com/anchore/syft/syft/pkg/cataloger/generic" -) - -const catalogerName = "alpmdb-cataloger" - -func NewAlpmdbCataloger() *generic.Cataloger { - return generic.NewCataloger(catalogerName). - WithParserByGlobs(parseAlpmDB, pkg.AlpmDBGlob) -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/apkdb/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/apkdb/cataloger.go deleted file mode 100644 index d3cea712..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/apkdb/cataloger.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Package apkdb provides a concrete Cataloger implementation for Alpine DB files. -*/ -package apkdb - -import ( - "github.com/anchore/syft/syft/pkg" - "github.com/anchore/syft/syft/pkg/cataloger/generic" -) - -const catalogerName = "apkdb-cataloger" - -// NewApkdbCataloger returns a new Alpine DB cataloger object. -func NewApkdbCataloger() *generic.Cataloger { - return generic.NewCataloger(catalogerName). - WithParserByGlobs(parseApkDB, pkg.ApkDBGlob) -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/arch/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/arch/cataloger.go new file mode 100644 index 00000000..0c00a4e2 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/arch/cataloger.go @@ -0,0 +1,15 @@ +/* +Package arch provides a concrete Cataloger implementations for packages relating to the Arch linux distribution. +*/ +package arch + +import ( + "github.com/anchore/syft/syft/pkg" + "github.com/anchore/syft/syft/pkg/cataloger/generic" +) + +// NewDBCataloger returns a new cataloger object initialized for arch linux pacman database flat-file stores. +func NewDBCataloger() *generic.Cataloger { + return generic.NewCataloger("alpm-db-cataloger"). + WithParserByGlobs(parseAlpmDB, pkg.AlpmDBGlob) +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/alpm/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/arch/package.go similarity index 73% rename from vendor/github.com/anchore/syft/syft/pkg/cataloger/alpm/package.go rename to vendor/github.com/anchore/syft/syft/pkg/cataloger/arch/package.go index 4ce9bd6b..f20a192f 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/alpm/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/arch/package.go @@ -1,4 +1,4 @@ -package alpm +package arch import ( "strings" @@ -13,14 +13,13 @@ func newPackage(m *parsedData, release *linux.Release, dbLocation file.Location) licenseCandidates := strings.Split(m.Licenses, "\n") p := pkg.Package{ - Name: m.Package, - Version: m.Version, - Locations: file.NewLocationSet(dbLocation), - Licenses: pkg.NewLicenseSet(pkg.NewLicensesFromLocation(dbLocation.WithoutAnnotations(), licenseCandidates...)...), - Type: pkg.AlpmPkg, - PURL: packageURL(m, release), - MetadataType: pkg.AlpmMetadataType, - Metadata: m.AlpmMetadata, + Name: m.Package, + Version: m.Version, + Locations: file.NewLocationSet(dbLocation), + Licenses: pkg.NewLicenseSet(pkg.NewLicensesFromLocation(dbLocation.WithoutAnnotations(), licenseCandidates...)...), + Type: pkg.AlpmPkg, + PURL: packageURL(m, release), + Metadata: m.AlpmDBEntry, } p.SetID() diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/alpm/parse_alpm_db.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/arch/parse_alpm_db.go similarity index 96% rename from vendor/github.com/anchore/syft/syft/pkg/cataloger/alpm/parse_alpm_db.go rename to vendor/github.com/anchore/syft/syft/pkg/cataloger/arch/parse_alpm_db.go index 86c6dd3d..53def263 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/alpm/parse_alpm_db.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/arch/parse_alpm_db.go @@ -1,4 +1,4 @@ -package alpm +package arch import ( "bufio" @@ -31,10 +31,11 @@ var ( ) type parsedData struct { - Licenses string `mapstructure:"license"` - pkg.AlpmMetadata `mapstructure:",squash"` + Licenses string `mapstructure:"license"` + pkg.AlpmDBEntry `mapstructure:",squash"` } +// parseAlpmDB parses the arch linux pacman database flat-files and returns the packages and relationships found within. func parseAlpmDB(resolver file.Resolver, env *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { data, err := parseAlpmDBEntry(reader) if err != nil { diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/binary/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/binary/cataloger.go index 0cf04b72..67bb9b44 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/binary/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/binary/cataloger.go @@ -1,3 +1,6 @@ +/* +Package binary provides a concrete Cataloger implementations for surfacing possible packages based on signatures found within binary files. +*/ package binary import ( @@ -61,8 +64,8 @@ func mergePackages(target *pkg.Package, extra *pkg.Package) { // add the locations target.Locations.Add(extra.Locations.ToSlice()...) // update the metadata to indicate which classifiers were used - meta, _ := target.Metadata.(pkg.BinaryMetadata) - if m, ok := extra.Metadata.(pkg.BinaryMetadata); ok { + meta, _ := target.Metadata.(pkg.BinarySignature) + if m, ok := extra.Metadata.(pkg.BinarySignature); ok { meta.Matches = append(meta.Matches, m.Matches...) } target.Metadata = meta diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/binary/classifier.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/binary/classifier.go index c98399f4..b06f38fe 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/binary/classifier.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/binary/classifier.go @@ -162,8 +162,8 @@ func sharedLibraryLookup(sharedLibraryPattern string, sharedLibraryMatcher evide locationSet := file.NewLocationSet(location) locationSet.Add(p.Locations.ToSlice()...) p.Locations = locationSet - meta, _ := p.Metadata.(pkg.BinaryMetadata) - p.Metadata = pkg.BinaryMetadata{ + meta, _ := p.Metadata.(pkg.BinarySignature) + p.Metadata = pkg.BinarySignature{ Matches: append([]pkg.ClassifierMatch{ { Classifier: classifier.Class, diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/binary/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/binary/package.go index a677b02a..9b3f1e22 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/binary/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/binary/package.go @@ -29,11 +29,10 @@ func newPackage(classifier classifier, location file.Location, matchMetadata map Locations: file.NewLocationSet( location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation), ), - Type: pkg.BinaryPkg, - CPEs: cpes, - FoundBy: catalogerName, - MetadataType: pkg.BinaryMetadataType, - Metadata: pkg.BinaryMetadata{ + Type: pkg.BinaryPkg, + CPEs: cpes, + FoundBy: catalogerName, + Metadata: pkg.BinarySignature{ Matches: []pkg.ClassifierMatch{ { Classifier: classifier.Class, diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/cataloger.go index c02c7e23..a66a0569 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/cataloger.go @@ -10,15 +10,16 @@ import ( "github.com/anchore/syft/internal/log" "github.com/anchore/syft/syft/pkg" - "github.com/anchore/syft/syft/pkg/cataloger/alpm" - "github.com/anchore/syft/syft/pkg/cataloger/apkdb" + "github.com/anchore/syft/syft/pkg/cataloger/alpine" + "github.com/anchore/syft/syft/pkg/cataloger/arch" "github.com/anchore/syft/syft/pkg/cataloger/binary" "github.com/anchore/syft/syft/pkg/cataloger/cpp" "github.com/anchore/syft/syft/pkg/cataloger/dart" - "github.com/anchore/syft/syft/pkg/cataloger/deb" + "github.com/anchore/syft/syft/pkg/cataloger/debian" "github.com/anchore/syft/syft/pkg/cataloger/dotnet" "github.com/anchore/syft/syft/pkg/cataloger/elixir" "github.com/anchore/syft/syft/pkg/cataloger/erlang" + "github.com/anchore/syft/syft/pkg/cataloger/gentoo" "github.com/anchore/syft/syft/pkg/cataloger/githubactions" "github.com/anchore/syft/syft/pkg/cataloger/golang" "github.com/anchore/syft/syft/pkg/cataloger/haskell" @@ -27,10 +28,9 @@ import ( "github.com/anchore/syft/syft/pkg/cataloger/kernel" "github.com/anchore/syft/syft/pkg/cataloger/nix" "github.com/anchore/syft/syft/pkg/cataloger/php" - "github.com/anchore/syft/syft/pkg/cataloger/portage" "github.com/anchore/syft/syft/pkg/cataloger/python" "github.com/anchore/syft/syft/pkg/cataloger/r" - "github.com/anchore/syft/syft/pkg/cataloger/rpm" + "github.com/anchore/syft/syft/pkg/cataloger/redhat" "github.com/anchore/syft/syft/pkg/cataloger/ruby" "github.com/anchore/syft/syft/pkg/cataloger/rust" "github.com/anchore/syft/syft/pkg/cataloger/sbom" @@ -42,59 +42,61 @@ const AllCatalogersPattern = "all" // ImageCatalogers returns a slice of locally implemented catalogers that are fit for detecting installations of packages. func ImageCatalogers(cfg Config) []pkg.Cataloger { return filterCatalogers([]pkg.Cataloger{ - alpm.NewAlpmdbCataloger(), - apkdb.NewApkdbCataloger(), + arch.NewDBCataloger(), + alpine.NewDBCataloger(), binary.NewCataloger(), - deb.NewDpkgdbCataloger(), + cpp.NewConanInfoCataloger(), + debian.NewDBCataloger(), dotnet.NewDotnetPortableExecutableCataloger(), golang.NewGoModuleBinaryCataloger(cfg.Golang), - java.NewJavaCataloger(cfg.Java()), + java.NewArchiveCataloger(cfg.JavaConfig()), java.NewNativeImageCataloger(), javascript.NewPackageCataloger(), nix.NewStoreCataloger(), php.NewComposerInstalledCataloger(), - portage.NewPortageCataloger(), - python.NewPythonPackageCataloger(), + gentoo.NewPortageCataloger(), + python.NewInstalledPackageCataloger(), r.NewPackageCataloger(), - rpm.NewRpmDBCataloger(), - ruby.NewGemSpecCataloger(), - sbom.NewSBOMCataloger(), + redhat.NewDBCataloger(), + ruby.NewInstalledGemSpecCataloger(), + sbom.NewCataloger(), }, cfg.Catalogers) } // DirectoryCatalogers returns a slice of locally implemented catalogers that are fit for detecting packages from index files (and select installations) func DirectoryCatalogers(cfg Config) []pkg.Cataloger { return filterCatalogers([]pkg.Cataloger{ - alpm.NewAlpmdbCataloger(), - apkdb.NewApkdbCataloger(), + arch.NewDBCataloger(), + alpine.NewDBCataloger(), binary.NewCataloger(), cpp.NewConanCataloger(), dart.NewPubspecLockCataloger(), - deb.NewDpkgdbCataloger(), + debian.NewDBCataloger(), dotnet.NewDotnetDepsCataloger(), dotnet.NewDotnetPortableExecutableCataloger(), elixir.NewMixLockCataloger(), erlang.NewRebarLockCataloger(), githubactions.NewActionUsageCataloger(), githubactions.NewWorkflowUsageCataloger(), - golang.NewGoModFileCataloger(cfg.Golang), + golang.NewGoModuleFileCataloger(cfg.Golang), golang.NewGoModuleBinaryCataloger(cfg.Golang), haskell.NewHackageCataloger(), - java.NewJavaCataloger(cfg.Java()), - java.NewJavaGradleLockfileCataloger(), - java.NewJavaPomCataloger(), + java.NewArchiveCataloger(cfg.JavaConfig()), + java.NewGradleLockfileCataloger(), + java.NewPomCataloger(), java.NewNativeImageCataloger(), javascript.NewLockCataloger(), nix.NewStoreCataloger(), php.NewComposerLockCataloger(), - portage.NewPortageCataloger(), - python.NewPythonIndexCataloger(cfg.Python), - python.NewPythonPackageCataloger(), - rpm.NewFileCataloger(), - rpm.NewRpmDBCataloger(), + gentoo.NewPortageCataloger(), + python.NewPackageCataloger(cfg.Python), + python.NewInstalledPackageCataloger(), + redhat.NewArchiveCataloger(), + redhat.NewDBCataloger(), ruby.NewGemFileLockCataloger(), + ruby.NewGemSpecCataloger(), rust.NewCargoLockCataloger(), - sbom.NewSBOMCataloger(), + sbom.NewCataloger(), swift.NewCocoapodsCataloger(), swift.NewSwiftPackageManagerCataloger(), }, cfg.Catalogers) @@ -103,24 +105,24 @@ func DirectoryCatalogers(cfg Config) []pkg.Cataloger { // AllCatalogers returns all implemented catalogers func AllCatalogers(cfg Config) []pkg.Cataloger { return filterCatalogers([]pkg.Cataloger{ - alpm.NewAlpmdbCataloger(), - apkdb.NewApkdbCataloger(), + arch.NewDBCataloger(), + alpine.NewDBCataloger(), binary.NewCataloger(), cpp.NewConanCataloger(), dart.NewPubspecLockCataloger(), - deb.NewDpkgdbCataloger(), + debian.NewDBCataloger(), dotnet.NewDotnetDepsCataloger(), dotnet.NewDotnetPortableExecutableCataloger(), elixir.NewMixLockCataloger(), erlang.NewRebarLockCataloger(), githubactions.NewActionUsageCataloger(), githubactions.NewWorkflowUsageCataloger(), - golang.NewGoModFileCataloger(cfg.Golang), + golang.NewGoModuleFileCataloger(cfg.Golang), golang.NewGoModuleBinaryCataloger(cfg.Golang), haskell.NewHackageCataloger(), - java.NewJavaCataloger(cfg.Java()), - java.NewJavaGradleLockfileCataloger(), - java.NewJavaPomCataloger(), + java.NewArchiveCataloger(cfg.JavaConfig()), + java.NewGradleLockfileCataloger(), + java.NewPomCataloger(), java.NewNativeImageCataloger(), javascript.NewLockCataloger(), javascript.NewPackageCataloger(), @@ -128,17 +130,18 @@ func AllCatalogers(cfg Config) []pkg.Cataloger { nix.NewStoreCataloger(), php.NewComposerInstalledCataloger(), php.NewComposerLockCataloger(), - portage.NewPortageCataloger(), - python.NewPythonIndexCataloger(cfg.Python), - python.NewPythonPackageCataloger(), + gentoo.NewPortageCataloger(), + python.NewPackageCataloger(cfg.Python), + python.NewInstalledPackageCataloger(), r.NewPackageCataloger(), - rpm.NewFileCataloger(), - rpm.NewRpmDBCataloger(), + redhat.NewArchiveCataloger(), + redhat.NewDBCataloger(), ruby.NewGemFileLockCataloger(), ruby.NewGemSpecCataloger(), + ruby.NewInstalledGemSpecCataloger(), rust.NewAuditBinaryCataloger(), rust.NewCargoLockCataloger(), - sbom.NewSBOMCataloger(), + sbom.NewCataloger(), swift.NewCocoapodsCataloger(), swift.NewSwiftPackageManagerCataloger(), }, cfg.Catalogers) diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/apk.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/apk.go index 2394c7d4..466532dd 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/apk.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/apk.go @@ -22,7 +22,7 @@ type upstreamCandidate struct { Type pkg.Type } -func upstreamCandidates(m pkg.ApkMetadata) (candidates []upstreamCandidate) { +func upstreamCandidates(m pkg.ApkDBEntry) (candidates []upstreamCandidate) { // Do not consider OriginPackage variations when generating CPE candidates for the child package // because doing so will result in false positives when matching to vulnerabilities in Grype since // it won't know to lookup apk fix entries using the OriginPackage name. @@ -60,7 +60,7 @@ func upstreamCandidates(m pkg.ApkMetadata) (candidates []upstreamCandidate) { } func candidateVendorsForAPK(p pkg.Package) fieldCandidateSet { - metadata, ok := p.Metadata.(pkg.ApkMetadata) + metadata, ok := p.Metadata.(pkg.ApkDBEntry) if !ok { return nil } @@ -101,7 +101,7 @@ func candidateVendorsForAPK(p pkg.Package) fieldCandidateSet { } func candidateProductsForAPK(p pkg.Package) fieldCandidateSet { - metadata, ok := p.Metadata.(pkg.ApkMetadata) + metadata, ok := p.Metadata.(pkg.ApkDBEntry) if !ok { return nil } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary/data/cpe-index.json b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary/data/cpe-index.json index 4d021dc1..e9eee86f 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary/data/cpe-index.json +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary/data/cpe-index.json @@ -143,6 +143,7 @@ "kanboard": "cpe:2.3:a:jenkins:kanboard:*:*:*:*:*:jenkins:*:*", "klocwork": "cpe:2.3:a:jenkins:klocwork_analysis:*:*:*:*:*:jenkins:*:*", "kubernetes": "cpe:2.3:a:jenkins:kubernetes:*:*:*:*:*:jenkins:*:*", + "kubernetes-cd": "cpe:2.3:a:jenkins:kubernetes_continuous_deploy:*:*:*:*:*:jenkins:*:*", "kubernetes-ci": "cpe:2.3:a:jenkins:kubernetes_ci:*:*:*:*:*:jenkins:*:*", "kubernetes-pipeline": "cpe:2.3:a:jenkins:kubernetes_pipeline:*:*:*:*:*:jenkins:*:*", "libvirt-slave": "cpe:2.3:a:jenkins:libvirt_slaves:*:*:*:*:*:jenkins:*:*", @@ -428,6 +429,8 @@ "decompress": "cpe:2.3:a:decompress_project:decompress:*:*:*:*:*:node.js:*:*", "deep-extend": "cpe:2.3:a:deep_extend_project:deep_extend:*:*:*:*:*:node.js:*:*", "deep-get-set": "cpe:2.3:a:deep-get-set_project:deep-get-set:*:*:*:*:*:node.js:*:*", + "deep-object-diff": "cpe:2.3:a:deep-object-diff_project:deep-object-diff:*:*:*:*:*:node.js:*:*", + "deep-parse-json": "cpe:2.3:a:deep-parse-json_project:deep-parse-json:*:*:*:*:*:node.js:*:*", "deep-set": "cpe:2.3:a:deep-set_project:deep-set:*:*:*:*:*:node.js:*:*", "deep.assign": "cpe:2.3:a:deep.assign_project:deep.assign:*:*:*:*:*:node.js:*:*", "deeply": "cpe:2.3:a:deeply_project:deeply:*:*:*:*:*:node.js:*:*", @@ -822,6 +825,7 @@ "qs": "cpe:2.3:a:qs_project:qs:*:*:*:*:*:node.js:*:*", "query-mysql": "cpe:2.3:a:query-mysql_project:query-mysql:*:*:*:*:*:node.js:*:*", "quickserver": "cpe:2.3:a:quickserver_project:quickserver:*:*:*:*:*:node.js:*:*", + "quill-mention": "cpe:2.3:a:quill-mention:quill_mention:*:*:*:*:*:node.js:*:*", "randomatic": "cpe:2.3:a:randomatic_project:randomatic:*:*:*:*:*:node.js:*:*", "rdf-graph-array": "cpe:2.3:a:rdf-graph-array_project:rdf-graph-array:*:*:*:*:*:node.js:*:*", "react-adal": "cpe:2.3:a:react-adal_project:react-adal:*:*:*:*:*:node.js:*:*", @@ -898,6 +902,7 @@ "shout": "cpe:2.3:a:shout_project:shout:*:*:*:*:*:node.js:*:*", "simple-markdown": "cpe:2.3:a:khanacademy:simple-markdown:*:*:*:*:*:node.js:*:*", "simple-npm-registry": "cpe:2.3:a:simple-npm-registry_project:simple-npm-registry:*:*:*:*:*:node.js:*:*", + "simple-plist": "cpe:2.3:a:simple-plist_project:simple-plist:*:*:*:*:*:*:*:*", "simplehttpserver": "cpe:2.3:a:simplehttpserver_project:simplehttpserver:*:*:*:*:*:node.js:*:*", "slimerjs-edge": "cpe:2.3:a:slimerjs-edge_project:slimerjs-edge:*:*:*:*:*:node.js:*:*", "slug": "cpe:2.3:a:slug_project:slug:*:*:*:*:*:node.js:*:*", @@ -1027,6 +1032,8 @@ "XML2Dict": "cpe:2.3:a:xml2dict_project:xml2dict:*:*:*:*:*:python:*:*", "aniso8601": "cpe:2.3:a:aniso8601_project:aniso8601:*:*:*:*:*:*:*:*", "api-res-py": "cpe:2.3:a:api-res-py_project:api-res-py:*:*:*:*:*:python:*:*", + "asyncua": "cpe:2.3:a:freeopcua:opcua-asyncio:*:*:*:*:*:python:*:*", + "cloudlabeling": "cpe:2.3:a:pypi:cloudlabeling:*:*:*:*:*:pypi:*:*", "cloudtoken": "cpe:2.3:a:atlassian:cloudtoken:*:*:*:*:*:*:*:*", "conference-scheduler-cli": "cpe:2.3:a:pyconuk:conference-scheduler-cli:*:*:*:*:*:*:*:*", "cryptography": "cpe:2.3:a:python-cryptography_project:python-cryptography:*:*:*:*:*:*:*:*", @@ -1037,6 +1044,7 @@ "d8s-uuids": "cpe:2.3:a:democritus_uuids_project:democritus_uuids:*:*:*:*:*:python:*:*", "decorator": "cpe:2.3:a:python:decorator:*:*:*:*:*:*:*:*", "drf-jwt": "cpe:2.3:a:styria:django-rest-framework-json_web_tokens:*:*:*:*:*:*:*:*", + "drxhello": "cpe:2.3:a:pypi:drxhello:*:*:*:*:*:pypi:*:*", "easy-parse": "cpe:2.3:a:easy-parse_project:easy-parse:*:*:*:*:*:python:*:*", "easy-xml": "cpe:2.3:a:easyxml_project:easyxml:*:*:*:*:*:python:*:*", "enum34": "cpe:2.3:a:python:enum34:*:*:*:*:*:*:*:*", @@ -1049,6 +1057,7 @@ "jw.util": "cpe:2.3:a:python:jw.util:*:*:*:*:*:python:*:*", "keymaker": "cpe:2.3:a:keymaker_project:keymaker:*:*:*:*:*:*:*:*", "ladon": "cpe:2.3:a:ladon_project:ladon:*:*:*:*:*:*:*:*", + "langchain-experimental": "cpe:2.3:a:langchain:langchain_experimental:*:*:*:*:*:python:*:*", "marshmallow": "cpe:2.3:a:marshmallow_project:marshmallow:*:*:*:*:*:python:*:*", "mpxj": "cpe:2.3:a:mpxj:mpxj:*:*:*:*:*:python:*:*", "networkx": "cpe:2.3:a:python:networkx:*:*:*:*:*:*:*:*", @@ -1058,6 +1067,7 @@ "ovirt-engine-sdk-python": "cpe:2.3:a:ovirt-engine-sdk-python_project:ovirt-engine-sdk-python:*:*:*:*:*:*:*:*", "passeo": "cpe:2.3:a:passeo_project:passeo:*:*:*:*:*:python:*:*", "pipreqs": "cpe:2.3:a:pipreqs_project:pipreqs:*:*:*:*:*:python:*:*", + "plone.namedfile": "cpe:2.3:a:plone:namedfile:*:*:*:*:*:*:*:*", "proxy.py": "cpe:2.3:a:proxy.py_project:proxy.py:*:*:*:*:*:*:*:*", "py-bcrypt": "cpe:2.3:a:python:py-bcrypt:*:*:*:*:*:*:*:*", "py-xml": "cpe:2.3:a:py-xml_project:py-xml:*:*:*:*:*:python:*:*", @@ -1089,7 +1099,8 @@ "validators": "cpe:2.3:a:validators_project:validators:*:*:*:*:*:python:*:*", "vault-cli": "cpe:2.3:a:vault-cli_project:vault-cli:*:*:*:*:*:python:*:*", "wmagent": "cpe:2.3:a:wmagent_project:wmagent:*:*:*:*:*:python:*:*", - "xmpp-http-upload": "cpe:2.3:a:xmpp-http-upload_project:xmpp-http-upload:*:*:*:*:*:*:*:*" + "xmpp-http-upload": "cpe:2.3:a:xmpp-http-upload_project:xmpp-http-upload:*:*:*:*:*:*:*:*", + "zibalPlatform": "cpe:2.3:a:zibal_project:zibal:*:*:*:*:*:pypi:*:*" }, "rubygems": { "Arabic-Prawn": "cpe:2.3:a:dynamixsolutions:arabic_prawn:*:*:*:*:*:ruby:*:*", @@ -1288,6 +1299,7 @@ "truetype": "cpe:2.3:a:truetype_project:truetype:*:*:*:*:*:rust:*:*", "trust-dns-proto": "cpe:2.3:a:trust-dns-proto_project:trust-dns-proto:*:*:*:*:*:*:*:*", "try-mutex": "cpe:2.3:a:try-mutex_project:try-mutex:*:*:*:*:*:rust:*:*", + "tungstenite": "cpe:2.3:a:snapview:tungstenite:*:*:*:*:*:rust:*:*", "unicycle": "cpe:2.3:a:unicycle_project:unicycle:*:*:*:*:*:rust:*:*", "uu_od": "cpe:2.3:a:uu_od_project:uu_od:*:*:*:*:*:rust:*:*", "v9": "cpe:2.3:a:v9_project:v9:*:*:*:*:*:rust:*:*", diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/generate.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/generate.go index 8d007cca..2077d7e3 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/generate.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/generate.go @@ -168,18 +168,18 @@ func candidateVendors(p pkg.Package) []string { } } - switch p.MetadataType { - case pkg.RpmMetadataType: + switch p.Metadata.(type) { + case pkg.RpmDBEntry: vendors.union(candidateVendorsForRPM(p)) - case pkg.GemMetadataType: + case pkg.RubyGemspec: vendors.union(candidateVendorsForRuby(p)) - case pkg.PythonPackageMetadataType: + case pkg.PythonPackage: vendors.union(candidateVendorsForPython(p)) - case pkg.JavaMetadataType: + case pkg.JavaArchive: vendors.union(candidateVendorsForJava(p)) - case pkg.ApkMetadataType: + case pkg.ApkDBEntry: vendors.union(candidateVendorsForAPK(p)) - case pkg.NpmPackageJSONMetadataType: + case pkg.NpmPackage: vendors.union(candidateVendorsForJavascript(p)) } @@ -217,12 +217,14 @@ func candidateVendors(p pkg.Package) []string { func candidateProducts(p pkg.Package) []string { products := newFieldCandidateSet(p.Name) + _, hasJavaMetadata := p.Metadata.(pkg.JavaArchive) + switch { case p.Language == pkg.Python: if !strings.HasPrefix(p.Name, "python") { products.addValue("python-" + p.Name) } - case p.Language == pkg.Java || p.MetadataType == pkg.JavaMetadataType: + case p.Language == pkg.Java || hasJavaMetadata: products.addValue(candidateProductsForJava(p)...) case p.Language == pkg.Go: // replace all candidates with only the golang-specific helper @@ -234,7 +236,7 @@ func candidateProducts(p pkg.Package) []string { } } - if p.MetadataType == pkg.ApkMetadataType { + if _, hasAPKMetadata := p.Metadata.(pkg.ApkDBEntry); hasAPKMetadata { products.union(candidateProductsForAPK(p)) } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/java.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/java.go index c8bde9f7..7920e38b 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/java.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/java.go @@ -55,7 +55,7 @@ func candidateVendorsForJava(p pkg.Package) fieldCandidateSet { func vendorsFromJavaManifestNames(p pkg.Package) fieldCandidateSet { vendors := newFieldCandidateSet() - metadata, ok := p.Metadata.(pkg.JavaMetadata) + metadata, ok := p.Metadata.(pkg.JavaArchive) if !ok { return vendors } @@ -159,7 +159,7 @@ func productsFromArtifactAndGroupIDs(artifactID string, groupIDs []string) []str } func artifactIDFromJavaPackage(p pkg.Package) string { - metadata, ok := p.Metadata.(pkg.JavaMetadata) + metadata, ok := p.Metadata.(pkg.JavaArchive) if !ok { return "" } @@ -177,7 +177,7 @@ func artifactIDFromJavaPackage(p pkg.Package) string { } func GroupIDsFromJavaPackage(p pkg.Package) (groupIDs []string) { - metadata, ok := p.Metadata.(pkg.JavaMetadata) + metadata, ok := p.Metadata.(pkg.JavaArchive) if !ok { return nil } @@ -188,7 +188,7 @@ func GroupIDsFromJavaPackage(p pkg.Package) (groupIDs []string) { // GroupIDsFromJavaMetadata returns the possible group IDs for a Java package // This function is similar to GroupIDFromJavaPackage, but returns all possible group IDs and is less strict // It is used as a way to generate possible candidates for CPE matching. -func GroupIDsFromJavaMetadata(pkgName string, metadata pkg.JavaMetadata) (groupIDs []string) { +func GroupIDsFromJavaMetadata(pkgName string, metadata pkg.JavaArchive) (groupIDs []string) { groupIDs = append(groupIDs, groupIDsFromPomProperties(metadata.PomProperties)...) groupIDs = append(groupIDs, groupIDsFromPomProject(metadata.PomProject)...) groupIDs = append(groupIDs, groupIDsFromJavaManifest(pkgName, metadata.Manifest)...) @@ -196,7 +196,7 @@ func GroupIDsFromJavaMetadata(pkgName string, metadata pkg.JavaMetadata) (groupI return groupIDs } -func groupIDsFromPomProperties(properties *pkg.PomProperties) (groupIDs []string) { +func groupIDsFromPomProperties(properties *pkg.JavaPomProperties) (groupIDs []string) { if properties == nil { return nil } @@ -214,7 +214,7 @@ func groupIDsFromPomProperties(properties *pkg.PomProperties) (groupIDs []string return groupIDs } -func groupIDsFromPomProject(project *pkg.PomProject) (groupIDs []string) { +func groupIDsFromPomProject(project *pkg.JavaPomProject) (groupIDs []string) { if project == nil { return nil } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/java_groupid_map.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/java_groupid_map.go index 634b4dce..c908142b 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/java_groupid_map.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/java_groupid_map.go @@ -70,4 +70,10 @@ var DefaultArtifactIDToGroupID = map[string]string{ "spring-webflow": "org.springframework.webflow", "spring-webflux": "org.springframework", "spring-webmvc": "org.springframework", + "spring-velocity-support": "org.apache.velocity", + "velocity": "org.apache.velocity", + "velocity-engine-core": "org.apache.velocity", + "velocity-engine-parent": "org.apache.velocity", + "velocity-engine-scripting": "org.apache.velocity", + "velocity-tools": "org.apache.velocity", } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/javascript.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/javascript.go index b95e741c..881bc658 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/javascript.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/javascript.go @@ -3,12 +3,12 @@ package cpe import "github.com/anchore/syft/syft/pkg" func candidateVendorsForJavascript(p pkg.Package) fieldCandidateSet { - if p.MetadataType != pkg.NpmPackageJSONMetadataType { + if _, ok := p.Metadata.(pkg.NpmPackage); !ok { return nil } vendors := newFieldCandidateSet() - metadata, ok := p.Metadata.(pkg.NpmPackageJSONMetadata) + metadata, ok := p.Metadata.(pkg.NpmPackage) if !ok { return nil } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/python.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/python.go index 58fd2adb..ca529981 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/python.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/python.go @@ -16,7 +16,7 @@ func additionalVendorsForPython(v string) (vendors []string) { } func candidateVendorsForPython(p pkg.Package) fieldCandidateSet { - metadata, ok := p.Metadata.(pkg.PythonPackageMetadata) + metadata, ok := p.Metadata.(pkg.PythonPackage) if !ok { return nil } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/rpm.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/rpm.go index d8c31654..64b1babf 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/rpm.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/rpm.go @@ -3,7 +3,7 @@ package cpe import "github.com/anchore/syft/syft/pkg" func candidateVendorsForRPM(p pkg.Package) fieldCandidateSet { - metadata, ok := p.Metadata.(pkg.RpmMetadata) + metadata, ok := p.Metadata.(pkg.RpmDBEntry) if !ok { return nil } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/ruby.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/ruby.go index 266a858e..90ed9406 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/ruby.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/ruby.go @@ -3,7 +3,7 @@ package cpe import "github.com/anchore/syft/syft/pkg" func candidateVendorsForRuby(p pkg.Package) fieldCandidateSet { - metadata, ok := p.Metadata.(pkg.GemMetadata) + metadata, ok := p.Metadata.(pkg.RubyGemspec) if !ok { return nil } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/config.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/config.go index df3b6397..3eced5e0 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/config.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/config.go @@ -13,6 +13,7 @@ type Config struct { Golang golang.GoCatalogerOpts LinuxKernel kernel.LinuxCatalogerConfig Python python.CatalogerConfig + Java java.CatalogerOpts Catalogers []string Parallelism int ExcludeBinaryOverlapByOwnership bool @@ -24,13 +25,20 @@ func DefaultConfig() Config { Parallelism: 1, LinuxKernel: kernel.DefaultLinuxCatalogerConfig(), Python: python.DefaultCatalogerConfig(), + Java: java.DefaultCatalogerOpts(), ExcludeBinaryOverlapByOwnership: true, } } -func (c Config) Java() java.Config { +// JavaConfig merges relevant config values from Config to return a java.Config struct. +// Values like IncludeUnindexedArchives and IncludeIndexedArchives are used across catalogers +// and are not specific to Java requiring this merge. +func (c Config) JavaConfig() java.Config { return java.Config{ SearchUnindexedArchives: c.Search.IncludeUnindexedArchives, SearchIndexedArchives: c.Search.IncludeIndexedArchives, + UseNetwork: c.Java.UseNetwork, + MavenBaseURL: c.Java.MavenURL, + MaxParentRecursiveDepth: c.Java.MaxParentRecursiveDepth, } } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/cataloger.go index 80c6b5b1..380206d3 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/cataloger.go @@ -1,14 +1,21 @@ +/* +Package cpp provides a concrete Cataloger implementations for the C/C++ language ecosystem. +*/ package cpp import ( "github.com/anchore/syft/syft/pkg/cataloger/generic" ) -const catalogerName = "conan-cataloger" - -// NewConanCataloger returns a new C++ conanfile.txt and conan.lock cataloger object. +// NewConanCataloger returns a new C/C++ conanfile.txt and conan.lock cataloger object. func NewConanCataloger() *generic.Cataloger { - return generic.NewCataloger(catalogerName). + return generic.NewCataloger("conan-cataloger"). WithParserByGlobs(parseConanfile, "**/conanfile.txt"). WithParserByGlobs(parseConanlock, "**/conan.lock") } + +// NewConanInfoCataloger returns a new C/C++ conaninfo.txt cataloger object. +func NewConanInfoCataloger() *generic.Cataloger { + return generic.NewCataloger("conan-info-cataloger"). + WithParserByGlobs(parseConaninfo, "**/conaninfo.txt") +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/package.go index b093c928..9a0b4be1 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/package.go @@ -14,11 +14,12 @@ type conanRef struct { User string Channel string Revision string + PackageID string Timestamp string } func splitConanRef(ref string) *conanRef { - // Conan ref format is: + // ConanfileEntry ref format is: // pkg/0.1@user/channel#rrev%timestamp // This method is based on conan's ref.loads method: // https://github.com/conan-io/conan/blob/release/2.0/conans/model/recipe_ref.py#L93C21-L93C21 @@ -32,6 +33,13 @@ func splitConanRef(ref string) *conanRef { cref.Timestamp = tokens[1] } + // package_id + tokens = strings.Split(text, ":") + text = tokens[0] + if len(tokens) == 2 { + cref.PackageID = tokens[1] + } + // revision tokens = strings.Split(text, "#") ref = tokens[0] @@ -58,43 +66,32 @@ func splitConanRef(ref string) *conanRef { return &cref } -func newConanfilePackage(m pkg.ConanMetadata, locations ...file.Location) *pkg.Package { - ref := splitConanRef(m.Ref) - if ref == nil { - return nil - } - - p := pkg.Package{ - Name: ref.Name, - Version: ref.Version, - Locations: file.NewLocationSet(locations...), - PURL: packageURL(ref), - Language: pkg.CPP, - Type: pkg.ConanPkg, - MetadataType: pkg.ConanMetadataType, - Metadata: m, - } +func newConanfilePackage(m pkg.ConanfileEntry, locations ...file.Location) *pkg.Package { + return newConanPackage(m.Ref, m, locations...) +} - p.SetID() +func newConanlockPackage(m pkg.ConanLockEntry, locations ...file.Location) *pkg.Package { + return newConanPackage(m.Ref, m, locations...) +} - return &p +func newConaninfoPackage(m pkg.ConaninfoEntry, locations ...file.Location) *pkg.Package { + return newConanPackage(m.Ref, m, locations...) } -func newConanlockPackage(m pkg.ConanLockMetadata, locations ...file.Location) *pkg.Package { - ref := splitConanRef(m.Ref) +func newConanPackage(refStr string, metadata any, locations ...file.Location) *pkg.Package { + ref := splitConanRef(refStr) if ref == nil { return nil } p := pkg.Package{ - Name: ref.Name, - Version: ref.Version, - Locations: file.NewLocationSet(locations...), - PURL: packageURL(ref), - Language: pkg.CPP, - Type: pkg.ConanPkg, - MetadataType: pkg.ConanLockMetadataType, - Metadata: m, + Name: ref.Name, + Version: ref.Version, + Locations: file.NewLocationSet(locations...), + PURL: packageURL(ref), + Language: pkg.CPP, + Type: pkg.ConanPkg, + Metadata: metadata, } p.SetID() diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/parse_conanfile.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/parse_conanfile.go index bf60706d..131c6896 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/parse_conanfile.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/parse_conanfile.go @@ -40,7 +40,7 @@ func parseConanfile(_ file.Resolver, _ *generic.Environment, reader file.Locatio inRequirements = false } - m := pkg.ConanMetadata{ + m := pkg.ConanfileEntry{ Ref: strings.Trim(line, "\n"), } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/parse_conaninfo.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/parse_conaninfo.go new file mode 100644 index 00000000..b5327393 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/parse_conaninfo.go @@ -0,0 +1,141 @@ +package cpp + +import ( + "bufio" + "errors" + "fmt" + "io" + "regexp" + "strings" + + "github.com/anchore/syft/syft/artifact" + "github.com/anchore/syft/syft/file" + "github.com/anchore/syft/syft/pkg" + "github.com/anchore/syft/syft/pkg/cataloger/generic" +) + +var _ generic.Parser = parseConaninfo + +func parseConanMetadataFromFilePath(path string) (pkg.ConaninfoEntry, error) { + // fullFilePath = str(reader.Location.AccessPath) + // Split the full patch into the folders we expect. I.e.: + // $HOME/.conan/data/////package//conaninfo.txt + re := regexp.MustCompile(`.*[/\\](?P[^/\\]+)[/\\](?P[^/\\]+)[/\\](?P[^/\\]+)[/\\](?P[^/\\]+)[/\\]package[/\\](?P[^/\\]+)[/\\]conaninfo\.txt`) + matches := re.FindStringSubmatch(path) + if len(matches) != 6 { + return pkg.ConaninfoEntry{}, fmt.Errorf("failed to get parent package info from conaninfo file path") + } + mainPackageRef := fmt.Sprintf("%s/%s@%s/%s", matches[1], matches[2], matches[3], matches[4]) + return pkg.ConaninfoEntry{ + Ref: mainPackageRef, + PackageID: matches[5], + }, nil +} + +func getRelationships(pkgs []pkg.Package, mainPackageRef pkg.Package) []artifact.Relationship { + var relationships []artifact.Relationship + for _, p := range pkgs { + // this is a pkg that package "main_package" depends on... make a relationship + relationships = append(relationships, artifact.Relationship{ + From: p, + To: mainPackageRef, + Type: artifact.DependencyOfRelationship, + }) + } + return relationships +} + +func parseFullRequiresLine(line string, reader file.LocationReadCloser, pkgs *[]pkg.Package) { + if len(line) == 0 { + return + } + + cref := splitConanRef(line) + + meta := pkg.ConaninfoEntry{ + Ref: line, + PackageID: cref.PackageID, + } + + p := newConaninfoPackage( + meta, + reader.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation), + ) + if p != nil { + *pkgs = append(*pkgs, *p) + } +} + +// parseConaninfo is a parser function for conaninfo.txt contents, returning all packages discovered. +// The conaninfo.txt file is typically present for an installed conan package under: +// $HOME/.conan/data/////package//conaninfo.txt +// Based on the relative path we can get: +// - package name +// - package version +// - package id +// - user +// - channel +// The conaninfo.txt gives: +// - package requires (full_requires) +// - recipe revision (recipe_hash) +func parseConaninfo(_ file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { + // First set the base package info by checking the relative path + fullFilePath := string(reader.Location.LocationData.Reference().RealPath) + if len(fullFilePath) == 0 { + fullFilePath = reader.Location.LocationData.RealPath + } + + mainMetadata, err := parseConanMetadataFromFilePath(fullFilePath) + if err != nil { + return nil, nil, err + } + + r := bufio.NewReader(reader) + inRequirements := false + inRecipeHash := false + var pkgs []pkg.Package + + for { + line, err := r.ReadString('\n') + switch { + case errors.Is(io.EOF, err): + mainPackage := newConaninfoPackage( + mainMetadata, + reader.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation), + ) + + mainPackageRef := *mainPackage + relationships := getRelationships(pkgs, mainPackageRef) + + pkgs = append(pkgs, mainPackageRef) + + return pkgs, relationships, nil + case err != nil: + return nil, nil, fmt.Errorf("failed to parse conaninfo.txt file: %w", err) + } + + switch { + case strings.Contains(line, "[full_requires]"): + inRequirements = true + inRecipeHash = false + continue + case strings.Contains(line, "[recipe_hash]"): + inRequirements = false + inRecipeHash = true + continue + case strings.ContainsAny(line, "[]") || strings.HasPrefix(strings.TrimSpace(line), "#"): + inRequirements = false + inRecipeHash = false + continue + } + + if inRequirements { + parseFullRequiresLine(strings.Trim(line, "\n "), reader, &pkgs) + } + if inRecipeHash { + // add recipe hash to the metadata ref + mainMetadata.Ref = mainMetadata.Ref + "#" + strings.Trim(line, "\n ") + inRecipeHash = false + } + } +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/parse_conanlock.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/parse_conanlock.go index ef1b3b1a..80a403ea 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/parse_conanlock.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/cpp/parse_conanlock.go @@ -47,7 +47,7 @@ func parseConanlock(_ file.Resolver, _ *generic.Environment, reader file.Locatio var parsedPkgRequires = map[artifact.ID][]string{} for idx, node := range cl.GraphLock.Nodes { - metadata := pkg.ConanLockMetadata{ + metadata := pkg.ConanLockEntry{ Ref: node.Ref, Options: parseOptions(node.Options), Path: node.Path, diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/dart/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/dart/cataloger.go index 5fbff6f0..c8c1af42 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/dart/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/dart/cataloger.go @@ -1,13 +1,14 @@ +/* +Package dart provides a concrete Cataloger implementations for the Dart language ecosystem. +*/ package dart import ( "github.com/anchore/syft/syft/pkg/cataloger/generic" ) -const catalogerName = "dartlang-lock-cataloger" - // NewPubspecLockCataloger returns a new Dartlang cataloger object base on pubspec lock files. func NewPubspecLockCataloger() *generic.Cataloger { - return generic.NewCataloger(catalogerName). + return generic.NewCataloger("dart-pubspec-lock-cataloger"). WithParserByGlobs(parsePubspecLock, "**/pubspec.lock") } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/dart/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/dart/package.go index f01d80f6..99fad413 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/dart/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/dart/package.go @@ -7,7 +7,7 @@ import ( ) func newPubspecLockPackage(name string, raw pubspecLockPackage, locations ...file.Location) pkg.Package { - metadata := pkg.DartPubMetadata{ + metadata := pkg.DartPubspecLockEntry{ Name: name, Version: raw.Version, HostedURL: raw.getHostedURL(), @@ -15,14 +15,13 @@ func newPubspecLockPackage(name string, raw pubspecLockPackage, locations ...fil } p := pkg.Package{ - Name: name, - Version: raw.Version, - Locations: file.NewLocationSet(locations...), - PURL: packageURL(metadata), - Language: pkg.Dart, - Type: pkg.DartPubPkg, - MetadataType: pkg.DartPubMetadataType, - Metadata: metadata, + Name: name, + Version: raw.Version, + Locations: file.NewLocationSet(locations...), + PURL: packageURL(metadata), + Language: pkg.Dart, + Type: pkg.DartPubPkg, + Metadata: metadata, } p.SetID() @@ -30,7 +29,7 @@ func newPubspecLockPackage(name string, raw pubspecLockPackage, locations ...fil return p } -func packageURL(m pkg.DartPubMetadata) string { +func packageURL(m pkg.DartPubspecLockEntry) string { var qualifiers packageurl.Qualifiers if m.HostedURL != "" { diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/debian/cataloger.go similarity index 53% rename from vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/cataloger.go rename to vendor/github.com/anchore/syft/syft/pkg/cataloger/debian/cataloger.go index 946abae9..4efb3a02 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/debian/cataloger.go @@ -1,17 +1,15 @@ /* -Package deb provides a concrete Cataloger implementation for Debian package DB status files. +Package debian provides a concrete Cataloger implementation relating to packages within the Debian linux distribution. */ -package deb +package debian import ( "github.com/anchore/syft/syft/pkg/cataloger/generic" ) -const catalogerName = "dpkgdb-cataloger" - -// NewDpkgdbCataloger returns a new Deb package cataloger capable of parsing DPKG status DB files. -func NewDpkgdbCataloger() *generic.Cataloger { - return generic.NewCataloger(catalogerName). +// NewDBCataloger returns a new Deb package cataloger capable of parsing DPKG status DB flat-file stores. +func NewDBCataloger() *generic.Cataloger { + return generic.NewCataloger("dpkg-db-cataloger"). // note: these globs have been intentionally split up in order to improve search performance, // please do NOT combine into: "**/var/lib/dpkg/{status,status.d/*}" WithParserByGlobs(parseDpkgDB, "**/var/lib/dpkg/status", "**/var/lib/dpkg/status.d/*", "**/lib/opkg/info/*.control", "**/lib/opkg/status") diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/debian/package.go similarity index 87% rename from vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/package.go rename to vendor/github.com/anchore/syft/syft/pkg/cataloger/debian/package.go index eb9b551c..1d62851e 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/debian/package.go @@ -1,4 +1,4 @@ -package deb +package debian import ( "fmt" @@ -22,18 +22,17 @@ const ( docsPath = "/usr/share/doc" ) -func newDpkgPackage(d pkg.DpkgMetadata, dbLocation file.Location, resolver file.Resolver, release *linux.Release) pkg.Package { +func newDpkgPackage(d pkg.DpkgDBEntry, dbLocation file.Location, resolver file.Resolver, release *linux.Release) pkg.Package { // TODO: separate pr to license refactor, but explore extracting dpkg-specific license parsing into a separate function licenses := make([]pkg.License, 0) p := pkg.Package{ - Name: d.Package, - Version: d.Version, - Licenses: pkg.NewLicenseSet(licenses...), - Locations: file.NewLocationSet(dbLocation.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), - PURL: packageURL(d, release), - Type: pkg.DebPkg, - MetadataType: pkg.DpkgMetadataType, - Metadata: d, + Name: d.Package, + Version: d.Version, + Licenses: pkg.NewLicenseSet(licenses...), + Locations: file.NewLocationSet(dbLocation.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), + PURL: packageURL(d, release), + Type: pkg.DebPkg, + Metadata: d, } if resolver != nil { @@ -52,7 +51,7 @@ func newDpkgPackage(d pkg.DpkgMetadata, dbLocation file.Location, resolver file. } // PackageURL returns the PURL for the specific Debian package (see https://github.com/package-url/purl-spec) -func packageURL(m pkg.DpkgMetadata, distro *linux.Release) string { +func packageURL(m pkg.DpkgDBEntry, distro *linux.Release) string { if distro == nil { return "" } @@ -87,7 +86,7 @@ func packageURL(m pkg.DpkgMetadata, distro *linux.Release) string { } func addLicenses(resolver file.Resolver, dbLocation file.Location, p *pkg.Package) { - metadata, ok := p.Metadata.(pkg.DpkgMetadata) + metadata, ok := p.Metadata.(pkg.DpkgDBEntry) if !ok { log.WithFields("package", p).Warn("unable to extract DPKG metadata to add licenses") return @@ -97,7 +96,7 @@ func addLicenses(resolver file.Resolver, dbLocation file.Location, p *pkg.Packag copyrightReader, copyrightLocation := fetchCopyrightContents(resolver, dbLocation, metadata) if copyrightReader != nil && copyrightLocation != nil { - defer internal.CloseAndLogError(copyrightReader, copyrightLocation.VirtualPath) + defer internal.CloseAndLogError(copyrightReader, copyrightLocation.AccessPath) // attach the licenses licenseStrs := parseLicensesFromCopyright(copyrightReader) for _, licenseStr := range licenseStrs { @@ -109,7 +108,7 @@ func addLicenses(resolver file.Resolver, dbLocation file.Location, p *pkg.Packag } func mergeFileListing(resolver file.Resolver, dbLocation file.Location, p *pkg.Package) { - metadata, ok := p.Metadata.(pkg.DpkgMetadata) + metadata, ok := p.Metadata.(pkg.DpkgDBEntry) if !ok { log.WithFields("package", p).Warn("unable to extract DPKG metadata to file listing") return @@ -140,7 +139,7 @@ loopNewFiles: p.Locations.Add(infoLocations...) } -func getAdditionalFileListing(resolver file.Resolver, dbLocation file.Location, m pkg.DpkgMetadata) ([]pkg.DpkgFileRecord, []file.Location) { +func getAdditionalFileListing(resolver file.Resolver, dbLocation file.Location, m pkg.DpkgDBEntry) ([]pkg.DpkgFileRecord, []file.Location) { // ensure the default value for a collection is never nil since this may be shown as JSON var files = make([]pkg.DpkgFileRecord, 0) var locations []file.Location @@ -148,7 +147,7 @@ func getAdditionalFileListing(resolver file.Resolver, dbLocation file.Location, md5Reader, md5Location := fetchMd5Contents(resolver, dbLocation, m) if md5Reader != nil && md5Location != nil { - defer internal.CloseAndLogError(md5Reader, md5Location.VirtualPath) + defer internal.CloseAndLogError(md5Reader, md5Location.AccessPath) // attach the file list files = append(files, parseDpkgMD5Info(md5Reader)...) @@ -159,7 +158,7 @@ func getAdditionalFileListing(resolver file.Resolver, dbLocation file.Location, conffilesReader, conffilesLocation := fetchConffileContents(resolver, dbLocation, m) if conffilesReader != nil && conffilesLocation != nil { - defer internal.CloseAndLogError(conffilesReader, conffilesLocation.VirtualPath) + defer internal.CloseAndLogError(conffilesReader, conffilesLocation.AccessPath) // attach the file list files = append(files, parseDpkgConffileInfo(conffilesReader)...) @@ -171,7 +170,7 @@ func getAdditionalFileListing(resolver file.Resolver, dbLocation file.Location, } //nolint:dupl -func fetchMd5Contents(resolver file.Resolver, dbLocation file.Location, m pkg.DpkgMetadata) (io.ReadCloser, *file.Location) { +func fetchMd5Contents(resolver file.Resolver, dbLocation file.Location, m pkg.DpkgDBEntry) (io.ReadCloser, *file.Location) { var md5Reader io.ReadCloser var err error @@ -215,7 +214,7 @@ func fetchMd5Contents(resolver file.Resolver, dbLocation file.Location, m pkg.Dp } //nolint:dupl -func fetchConffileContents(resolver file.Resolver, dbLocation file.Location, m pkg.DpkgMetadata) (io.ReadCloser, *file.Location) { +func fetchConffileContents(resolver file.Resolver, dbLocation file.Location, m pkg.DpkgDBEntry) (io.ReadCloser, *file.Location) { var reader io.ReadCloser var err error @@ -250,7 +249,7 @@ func fetchConffileContents(resolver file.Resolver, dbLocation file.Location, m p return reader, &l } -func fetchCopyrightContents(resolver file.Resolver, dbLocation file.Location, m pkg.DpkgMetadata) (io.ReadCloser, *file.Location) { +func fetchCopyrightContents(resolver file.Resolver, dbLocation file.Location, m pkg.DpkgDBEntry) (io.ReadCloser, *file.Location) { if resolver == nil { return nil, nil } @@ -274,7 +273,7 @@ func fetchCopyrightContents(resolver file.Resolver, dbLocation file.Location, m return reader, &l } -func md5Key(metadata pkg.DpkgMetadata) string { +func md5Key(metadata pkg.DpkgDBEntry) string { contentKey := metadata.Package if metadata.Architecture != "" && metadata.Architecture != "all" { contentKey = contentKey + ":" + metadata.Architecture diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/parse_copyright.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/debian/parse_copyright.go similarity index 99% rename from vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/parse_copyright.go rename to vendor/github.com/anchore/syft/syft/pkg/cataloger/debian/parse_copyright.go index 176e6aef..3f02131b 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/parse_copyright.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/debian/parse_copyright.go @@ -1,4 +1,4 @@ -package deb +package debian import ( "bufio" diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/parse_dpkg_db.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/debian/parse_dpkg_db.go similarity index 95% rename from vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/parse_dpkg_db.go rename to vendor/github.com/anchore/syft/syft/pkg/cataloger/debian/parse_dpkg_db.go index 03c66c58..ab278184 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/parse_dpkg_db.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/debian/parse_dpkg_db.go @@ -1,4 +1,4 @@ -package deb +package debian import ( "bufio" @@ -24,6 +24,7 @@ var ( sourceRegexp = regexp.MustCompile(`(?P\S+)( \((?P.*)\))?`) ) +// parseDpkgDB reads a dpkg database "status" file (and surrounding data files) and returns the packages and relationships found. func parseDpkgDB(resolver file.Resolver, env *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { metadata, err := parseDpkgStatus(reader) if err != nil { @@ -39,9 +40,9 @@ func parseDpkgDB(resolver file.Resolver, env *generic.Environment, reader file.L } // parseDpkgStatus is a parser function for Debian DB status contents, returning all Debian packages listed. -func parseDpkgStatus(reader io.Reader) ([]pkg.DpkgMetadata, error) { +func parseDpkgStatus(reader io.Reader) ([]pkg.DpkgDBEntry, error) { buffedReader := bufio.NewReader(reader) - var metadata []pkg.DpkgMetadata + var metadata []pkg.DpkgDBEntry continueProcessing := true for continueProcessing { @@ -80,7 +81,7 @@ type dpkgExtractedMetadata struct { } // parseDpkgStatusEntry returns an individual Dpkg entry, or returns errEndOfPackages if there are no more packages to parse from the reader. -func parseDpkgStatusEntry(reader *bufio.Reader) (*pkg.DpkgMetadata, error) { +func parseDpkgStatusEntry(reader *bufio.Reader) (*pkg.DpkgDBEntry, error) { var retErr error dpkgFields, err := extractAllFields(reader) if err != nil { @@ -109,7 +110,7 @@ func parseDpkgStatusEntry(reader *bufio.Reader) (*pkg.DpkgMetadata, error) { return nil, retErr } - entry := pkg.DpkgMetadata{ + entry := pkg.DpkgDBEntry{ Package: raw.Package, Source: raw.Source, Version: raw.Version, @@ -246,7 +247,7 @@ func associateRelationships(pkgs []pkg.Package) (relationships []artifact.Relati // read provided and add as keys for lookup keys as well as package names for _, p := range pkgs { - meta, ok := p.Metadata.(pkg.DpkgMetadata) + meta, ok := p.Metadata.(pkg.DpkgDBEntry) if !ok { log.Warnf("cataloger failed to extract dpkg 'provides' metadata for package %+v", p.Name) continue @@ -260,7 +261,7 @@ func associateRelationships(pkgs []pkg.Package) (relationships []artifact.Relati // read "Depends" and "Pre-Depends" and match with keys for _, p := range pkgs { - meta, ok := p.Metadata.(pkg.DpkgMetadata) + meta, ok := p.Metadata.(pkg.DpkgDBEntry) if !ok { log.Warnf("cataloger failed to extract dpkg 'dependency' metadata for package %+v", p.Name) continue diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/parse_dpkg_info_files.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/debian/parse_dpkg_info_files.go similarity index 98% rename from vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/parse_dpkg_info_files.go rename to vendor/github.com/anchore/syft/syft/pkg/cataloger/debian/parse_dpkg_info_files.go index 105c47da..d9d6802b 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/parse_dpkg_info_files.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/debian/parse_dpkg_info_files.go @@ -1,4 +1,4 @@ -package deb +package debian import ( "bufio" diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/cataloger.go index 938ccfba..85b20673 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/cataloger.go @@ -1,3 +1,6 @@ +/* +Package dotnet provides a concrete Cataloger implementation relating to packages within the C#/.NET language/runtime ecosystem. +*/ package dotnet import ( @@ -10,6 +13,7 @@ func NewDotnetDepsCataloger() *generic.Cataloger { WithParserByGlobs(parseDotnetDeps, "**/*.deps.json") } +// NewDotnetPortableExecutableCataloger returns a new Dotnet cataloger object base on portable executable files. func NewDotnetPortableExecutableCataloger() *generic.Cataloger { return generic.NewCataloger("dotnet-portable-executable-cataloger"). WithParserByGlobs(parseDotnetPortableExecutable, "**/*.dll", "**/*.exe") diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/package.go index a7b3b209..021fb745 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/package.go @@ -13,7 +13,7 @@ import ( func newDotnetDepsPackage(nameVersion string, lib dotnetDepsLibrary, locations ...file.Location) *pkg.Package { name, version := extractNameAndVersion(nameVersion) - m := pkg.DotnetDepsMetadata{ + m := pkg.DotnetDepsEntry{ Name: name, Version: version, Path: lib.Path, @@ -22,14 +22,13 @@ func newDotnetDepsPackage(nameVersion string, lib dotnetDepsLibrary, locations . } p := &pkg.Package{ - Name: name, - Version: version, - Locations: file.NewLocationSet(locations...), - PURL: packageURL(m), - Language: pkg.Dotnet, - Type: pkg.DotnetPkg, - MetadataType: pkg.DotnetDepsMetadataType, - Metadata: m, + Name: name, + Version: version, + Locations: file.NewLocationSet(locations...), + PURL: packageURL(m), + Language: pkg.Dotnet, + Type: pkg.DotnetPkg, + Metadata: m, } p.SetID() @@ -58,7 +57,7 @@ func createNameAndVersion(name, version string) (nameVersion string) { return } -func packageURL(m pkg.DotnetDepsMetadata) string { +func packageURL(m pkg.DotnetDepsEntry) string { var qualifiers packageurl.Qualifiers return packageurl.NewPackageURL( diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/parse_dotnet_deps.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/parse_dotnet_deps.go index e99811ac..fc768780 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/parse_dotnet_deps.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/parse_dotnet_deps.go @@ -14,6 +14,12 @@ import ( var _ generic.Parser = parseDotnetDeps +type dotnetDeps struct { + RuntimeTarget dotnetRuntimeTarget `json:"runtimeTarget"` + Targets map[string]map[string]dotnetDepsTarget `json:"targets"` + Libraries map[string]dotnetDepsLibrary `json:"libraries"` +} + type dotnetRuntimeTarget struct { Name string `json:"name"` } @@ -22,11 +28,6 @@ type dotnetDepsTarget struct { Dependencies map[string]string `json:"dependencies"` Runtime map[string]struct{} `json:"runtime"` } -type dotnetDeps struct { - RuntimeTarget dotnetRuntimeTarget `json:"runtimeTarget"` - Targets map[string]map[string]dotnetDepsTarget `json:"targets"` - Libraries map[string]dotnetDepsLibrary `json:"libraries"` -} type dotnetDepsLibrary struct { Type string `json:"type"` @@ -48,9 +49,9 @@ func parseDotnetDeps(_ file.Resolver, _ *generic.Environment, reader file.Locati return nil, nil, fmt.Errorf("failed to parse deps.json file: %w", err) } - rootName := getDepsJSONFilePrefix(reader.AccessPath()) + rootName := getDepsJSONFilePrefix(reader.Path()) if rootName == "" { - return nil, nil, fmt.Errorf("unable to determine root package name from deps.json file: %s", reader.AccessPath()) + return nil, nil, fmt.Errorf("unable to determine root package name from deps.json file: %s", reader.Path()) } var rootPkg *pkg.Package for nameVersion, lib := range depsDoc.Libraries { @@ -64,7 +65,7 @@ func parseDotnetDeps(_ file.Resolver, _ *generic.Environment, reader file.Locati } } if rootPkg == nil { - return nil, nil, fmt.Errorf("unable to determine root package from deps.json file: %s", reader.AccessPath()) + return nil, nil, fmt.Errorf("unable to determine root package from deps.json file: %s", reader.Path()) } pkgs = append(pkgs, *rootPkg) pkgMap[createNameAndVersion(rootPkg.Name, rootPkg.Version)] = *rootPkg diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/parse_dotnet_portable_executable.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/parse_dotnet_portable_executable.go index b57b2f06..ca28d521 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/parse_dotnet_portable_executable.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/parse_dotnet_portable_executable.go @@ -3,6 +3,8 @@ package dotnet import ( "fmt" "io" + "regexp" + "strings" "github.com/saferwall/pe" @@ -40,16 +42,26 @@ func parseDotnetPortableExecutable(_ file.Resolver, _ *generic.Environment, f fi return nil, nil, nil } - name := versionResources["FileDescription"] - if name == "" { - log.Tracef("unable to find FileDescription in PE file: %s", f.RealPath) + dotNetPkg, err := buildDotNetPackage(versionResources, f) + if err != nil { + // this is not a fatal error, just log and continue + // TODO: consider this case for "known unknowns" (same goes for cases below) + log.Tracef("unable to build dotnet package: %w", err) return nil, nil, nil } - version := versionResources["FileVersion"] - if version == "" { - log.Tracef("unable to find FileVersion in PE file: %s", f.RealPath) - return nil, nil, nil + return []pkg.Package{dotNetPkg}, nil, nil +} + +func buildDotNetPackage(versionResources map[string]string, f file.LocationReadCloser) (dnpkg pkg.Package, err error) { + name := findName(versionResources) + if name == "" { + return dnpkg, fmt.Errorf("unable to find FileDescription, or ProductName in PE file: %s", f.RealPath) + } + + version := findVersion(versionResources) + if strings.TrimSpace(version) == "" { + return dnpkg, fmt.Errorf("unable to find FileVersion in PE file: %s", f.RealPath) } purl := packageurl.NewPackageURL( @@ -61,7 +73,7 @@ func parseDotnetPortableExecutable(_ file.Resolver, _ *generic.Environment, f fi "", ).ToString() - metadata := pkg.DotnetPortableExecutableMetadata{ + metadata := pkg.DotnetPortableExecutableEntry{ AssemblyVersion: versionResources["Assembly Version"], LegalCopyright: versionResources["LegalCopyright"], Comments: versionResources["Comments"], @@ -71,17 +83,45 @@ func parseDotnetPortableExecutable(_ file.Resolver, _ *generic.Environment, f fi ProductVersion: versionResources["ProductVersion"], } - p := pkg.Package{ - Name: name, - Version: version, - Locations: file.NewLocationSet(f.Location), - Type: pkg.DotnetPkg, - PURL: purl, - MetadataType: pkg.DotnetPortableExecutableMetadataType, - Metadata: metadata, + dnpkg = pkg.Package{ + Name: name, + Version: version, + Locations: file.NewLocationSet(f.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), + Type: pkg.DotnetPkg, + Language: pkg.Dotnet, + PURL: purl, + Metadata: metadata, } - p.SetID() + dnpkg.SetID() + + return dnpkg, nil +} + +func findVersion(versionResources map[string]string) string { + for _, key := range []string{"FileVersion"} { + if version, ok := versionResources[key]; ok { + if strings.TrimSpace(version) == "" { + continue + } + fields := strings.Fields(version) + if len(fields) > 0 { + return fields[0] + } + } + } + return "" +} - return []pkg.Package{p}, nil, nil +func findName(versionResources map[string]string) string { + for _, key := range []string{"FileDescription", "ProductName"} { + if name, ok := versionResources[key]; ok { + if strings.TrimSpace(name) == "" { + continue + } + trimmed := strings.TrimSpace(name) + return regexp.MustCompile(`[^a-zA-Z0-9.]+`).ReplaceAllString(trimmed, "") + } + } + return "" } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/elixir/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/elixir/cataloger.go index f4e62eff..9ddf5c2b 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/elixir/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/elixir/cataloger.go @@ -1,5 +1,5 @@ /* -Package elixir provides a concrete Cataloger implementation for elixir specific package manger files. +Package elixir provides a concrete Cataloger implementation relating to packages within the Elixir language ecosystem. */ package elixir @@ -7,10 +7,8 @@ import ( "github.com/anchore/syft/syft/pkg/cataloger/generic" ) -const catalogerName = "elixir-mix-lock-cataloger" - -// NewMixLockCataloger returns parses mix.lock files and returns packages +// NewMixLockCataloger returns a cataloger object for Elixir mix.lock files. func NewMixLockCataloger() *generic.Cataloger { - return generic.NewCataloger(catalogerName). + return generic.NewCataloger("elixir-mix-lock-cataloger"). WithParserByGlobs(parseMixLock, "**/mix.lock") } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/elixir/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/elixir/package.go index 85dcd1f4..50cd3f28 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/elixir/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/elixir/package.go @@ -6,16 +6,15 @@ import ( "github.com/anchore/syft/syft/pkg" ) -func newPackage(d pkg.MixLockMetadata, locations ...file.Location) pkg.Package { +func newPackage(d pkg.ElixirMixLockEntry, locations ...file.Location) pkg.Package { p := pkg.Package{ - Name: d.Name, - Version: d.Version, - Language: pkg.Elixir, - Locations: file.NewLocationSet(locations...), - PURL: packageURL(d), - Type: pkg.HexPkg, - MetadataType: pkg.MixLockMetadataType, - Metadata: d, + Name: d.Name, + Version: d.Version, + Language: pkg.Elixir, + Locations: file.NewLocationSet(locations...), + PURL: packageURL(d), + Type: pkg.HexPkg, + Metadata: d, } p.SetID() @@ -23,7 +22,7 @@ func newPackage(d pkg.MixLockMetadata, locations ...file.Location) pkg.Package { return p } -func packageURL(m pkg.MixLockMetadata) string { +func packageURL(m pkg.ElixirMixLockEntry) string { var qualifiers packageurl.Qualifiers return packageurl.NewPackageURL( diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/elixir/parse_mix_lock.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/elixir/parse_mix_lock.go index 46b4f4aa..446b620b 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/elixir/parse_mix_lock.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/elixir/parse_mix_lock.go @@ -45,7 +45,7 @@ func parseMixLock(_ file.Resolver, _ *generic.Environment, reader file.LocationR packages = append(packages, newPackage( - pkg.MixLockMetadata{ + pkg.ElixirMixLockEntry{ Name: name, Version: version, PkgHash: hash, diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/erlang/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/erlang/cataloger.go index 3e3b54f8..8eba5a7c 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/erlang/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/erlang/cataloger.go @@ -1,5 +1,5 @@ /* -Package erlang provides a concrete Cataloger implementation for erlang specific package manger files. +Package erlang provides a concrete Cataloger implementation relating to packages within the Erlang language ecosystem. */ package erlang @@ -7,10 +7,8 @@ import ( "github.com/anchore/syft/syft/pkg/cataloger/generic" ) -const catalogerName = "erlang-rebar-lock-cataloger" - -// NewRebarLockCataloger returns parses rebar.lock files and returns packages. +// NewRebarLockCataloger returns a new cataloger instance for Erlang rebar.lock files. func NewRebarLockCataloger() *generic.Cataloger { - return generic.NewCataloger(catalogerName). + return generic.NewCataloger("erlang-rebar-lock-cataloger"). WithParserByGlobs(parseRebarLock, "**/rebar.lock") } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/erlang/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/erlang/package.go index 5fa28e59..c9009876 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/erlang/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/erlang/package.go @@ -6,16 +6,15 @@ import ( "github.com/anchore/syft/syft/pkg" ) -func newPackage(d pkg.RebarLockMetadata, locations ...file.Location) pkg.Package { +func newPackage(d pkg.ErlangRebarLockEntry, locations ...file.Location) pkg.Package { p := pkg.Package{ - Name: d.Name, - Version: d.Version, - Language: pkg.Erlang, - Locations: file.NewLocationSet(locations...), - PURL: packageURL(d), - Type: pkg.HexPkg, - MetadataType: pkg.RebarLockMetadataType, - Metadata: d, + Name: d.Name, + Version: d.Version, + Language: pkg.Erlang, + Locations: file.NewLocationSet(locations...), + PURL: packageURL(d), + Type: pkg.HexPkg, + Metadata: d, } p.SetID() @@ -23,7 +22,7 @@ func newPackage(d pkg.RebarLockMetadata, locations ...file.Location) pkg.Package return p } -func packageURL(m pkg.RebarLockMetadata) string { +func packageURL(m pkg.ErlangRebarLockEntry) string { var qualifiers packageurl.Qualifiers return packageurl.NewPackageURL( diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/erlang/parse_rebar_lock.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/erlang/parse_rebar_lock.go index a2066f2c..b4000f4b 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/erlang/parse_rebar_lock.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/erlang/parse_rebar_lock.go @@ -49,7 +49,7 @@ func parseRebarLock(_ file.Resolver, _ *generic.Environment, reader file.Locatio } p := newPackage( - pkg.RebarLockMetadata{ + pkg.ErlangRebarLockEntry{ Name: name, Version: version, }, @@ -72,7 +72,7 @@ func parseRebarLock(_ file.Resolver, _ *generic.Environment, reader file.Locatio log.WithFields("package", name).Warn("unable find source package") continue } - metadata, ok := sourcePkg.Metadata.(pkg.RebarLockMetadata) + metadata, ok := sourcePkg.Metadata.(pkg.ErlangRebarLockEntry) if !ok { log.WithFields("package", name).Warn("unable to extract rebar.lock metadata to add hash metadata") continue diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/generic/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/generic/cataloger.go index b898133f..41faaf28 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/generic/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/generic/cataloger.go @@ -127,7 +127,7 @@ func (c *Cataloger) Catalog(resolver file.Resolver) ([]pkg.Package, []artifact.R } discoveredPackages, discoveredRelationships, err := parser(resolver, &env, file.NewLocationReadCloser(location, contentReader)) - internal.CloseAndLogError(contentReader, location.VirtualPath) + internal.CloseAndLogError(contentReader, location.AccessPath) if err != nil { logger.WithFields("location", location.RealPath, "error", err).Warnf("cataloger failed") continue diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/gentoo/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/gentoo/cataloger.go new file mode 100644 index 00000000..67c9a218 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/gentoo/cataloger.go @@ -0,0 +1,14 @@ +/* +Package gentoo provides a concrete Cataloger implementation related to packages within the Gentoo linux ecosystem. +*/ +package gentoo + +import ( + "github.com/anchore/syft/syft/pkg/cataloger/generic" +) + +// NewPortageCataloger returns a new cataloger object initialized for Gentoo Portage package manager files (a flat-file store). +func NewPortageCataloger() *generic.Cataloger { + return generic.NewCataloger("portage-cataloger"). + WithParserByGlobs(parsePortageContents, "**/var/db/pkg/*/*/CONTENTS") +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/portage/parse_portage_contents.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/gentoo/parse_portage_contents.go similarity index 94% rename from vendor/github.com/anchore/syft/syft/pkg/cataloger/portage/parse_portage_contents.go rename to vendor/github.com/anchore/syft/syft/pkg/cataloger/gentoo/parse_portage_contents.go index 35c1ecb5..5da6e8c3 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/portage/parse_portage_contents.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/gentoo/parse_portage_contents.go @@ -1,4 +1,4 @@ -package portage +package gentoo import ( "bufio" @@ -23,6 +23,7 @@ var ( _ generic.Parser = parsePortageContents ) +// parses individual CONTENTS files from the portage flat-file store (e.g. /var/db/pkg/*/*/CONTENTS). func parsePortageContents(resolver file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { cpvMatch := cpvRe.FindStringSubmatch(reader.Location.RealPath) if cpvMatch == nil { @@ -42,9 +43,8 @@ func parsePortageContents(resolver file.Resolver, _ *generic.Environment, reader Locations: file.NewLocationSet( reader.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation), ), - Type: pkg.PortagePkg, - MetadataType: pkg.PortageMetadataType, - Metadata: pkg.PortageMetadata{ + Type: pkg.PortagePkg, + Metadata: pkg.PortageEntry{ // ensure the default value for a collection is never nil since this may be shown as JSON Files: make([]pkg.PortageFileRecord, 0), }, @@ -65,7 +65,7 @@ func addFiles(resolver file.Resolver, dbLocation file.Location, p *pkg.Package) return } - entry, ok := p.Metadata.(pkg.PortageMetadata) + entry, ok := p.Metadata.(pkg.PortageEntry) if !ok { return } @@ -130,7 +130,7 @@ func addSize(resolver file.Resolver, dbLocation file.Location, p *pkg.Package) { return } - entry, ok := p.Metadata.(pkg.PortageMetadata) + entry, ok := p.Metadata.(pkg.PortageEntry) if !ok { return } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/portage/purl.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/gentoo/purl.go similarity index 95% rename from vendor/github.com/anchore/syft/syft/pkg/cataloger/portage/purl.go rename to vendor/github.com/anchore/syft/syft/pkg/cataloger/gentoo/purl.go index 9ff016b9..dc6fd626 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/portage/purl.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/gentoo/purl.go @@ -1,4 +1,4 @@ -package portage +package gentoo import ( "github.com/anchore/packageurl-go" diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/githubactions/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/githubactions/cataloger.go index 825d3942..e4844108 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/githubactions/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/githubactions/cataloger.go @@ -1,3 +1,6 @@ +/* +Package githubactions provides a concrete Cataloger implementation for GitHub Actions packages (both actions and workflows). +*/ package githubactions import "github.com/anchore/syft/syft/pkg/cataloger/generic" diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/cataloger.go index d1d11715..d5616457 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/cataloger.go @@ -1,5 +1,5 @@ /* -Package golang provides a concrete Cataloger implementation for go.mod files. +Package golang provides a concrete Cataloger implementation relating to packages within the Go language ecosystem. */ package golang @@ -19,19 +19,19 @@ import ( var versionCandidateGroups = regexp.MustCompile(`(?P\d+(\.\d+)?(\.\d+)?)(?P\w*)`) -// NewGoModFileCataloger returns a new Go module cataloger object. -func NewGoModFileCataloger(opts GoCatalogerOpts) pkg.Cataloger { +// NewGoModuleFileCataloger returns a new cataloger object that searches within go.mod files. +func NewGoModuleFileCataloger(opts GoCatalogerOpts) pkg.Cataloger { c := goModCataloger{ licenses: newGoLicenses(opts), } return &progressingCataloger{ progress: c.licenses.progress, - cataloger: generic.NewCataloger("go-mod-file-cataloger"). + cataloger: generic.NewCataloger("go-module-file-cataloger"). WithParserByGlobs(c.parseGoModFile, "**/go.mod"), } } -// NewGoModuleBinaryCataloger returns a new Golang cataloger object. +// NewGoModuleBinaryCataloger returns a new cataloger object that searches within binaries built by the go compiler. func NewGoModuleBinaryCataloger(opts GoCatalogerOpts) pkg.Cataloger { c := goBinaryCataloger{ licenses: newGoLicenses(opts), @@ -58,7 +58,7 @@ func (p *progressingCataloger) Catalog(resolver file.Resolver) ([]pkg.Package, [ goCompilerPkgs := []pkg.Package{} totalLocations := file.NewLocationSet() for _, goPkg := range pkgs { - mValue, ok := goPkg.Metadata.(pkg.GolangBinMetadata) + mValue, ok := goPkg.Metadata.(pkg.GolangBinaryBuildinfoEntry) if !ok { continue } @@ -82,15 +82,15 @@ func newGoStdLib(version string, location file.LocationSet) *pkg.Package { return nil } goCompilerPkg := &pkg.Package{ - Name: "stdlib", - Version: version, - PURL: packageURL("stdlib", strings.TrimPrefix(version, "go")), - CPEs: []cpe.CPE{stdlibCpe}, - Locations: location, - Language: pkg.Go, - Type: pkg.GoModulePkg, - MetadataType: pkg.GolangBinMetadataType, - Metadata: pkg.GolangBinMetadata{ + Name: "stdlib", + Version: version, + PURL: packageURL("stdlib", strings.TrimPrefix(version, "go")), + CPEs: []cpe.CPE{stdlibCpe}, + Locations: location, + Licenses: pkg.NewLicenseSet(pkg.NewLicense("BSD-3-Clause")), + Language: pkg.Go, + Type: pkg.GoModulePkg, + Metadata: pkg.GolangBinaryBuildinfoEntry{ GoCompiledVersion: version, }, } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/licenses.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/licenses.go index 7fef0dbc..d805649c 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/licenses.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/licenses.go @@ -32,7 +32,7 @@ type goLicenses struct { opts GoCatalogerOpts localModCacheResolver file.WritableResolver progress *monitor.CatalogerTask - licenseFileNames *strset.Set + lowerLicenseFileNames *strset.Set } func newGoLicenses(opts GoCatalogerOpts) goLicenses { @@ -44,10 +44,18 @@ func newGoLicenses(opts GoCatalogerOpts) goLicenses { RemoveOnCompletion: true, Title: "Downloading go mod", }, - licenseFileNames: strset.New(licenses.FileNames()...), + lowerLicenseFileNames: strset.New(lowercaseLicenseFiles()...), } } +func lowercaseLicenseFiles() []string { + fileNames := licenses.FileNames() + for i := range fileNames { + fileNames[i] = strings.ToLower(fileNames[i]) + } + return fileNames +} + func remotesForModule(proxies []string, noProxy []string, module string) []string { for _, pattern := range noProxy { if matched, err := path.Match(pattern, module); err == nil && matched { @@ -158,7 +166,7 @@ func (c *goLicenses) findLicenses(resolver file.Resolver, globMatch string) (out for _, l := range locations { fileName := path.Base(l.RealPath) - if c.licenseFileNames.Has(fileName) { + if c.lowerLicenseFileNames.Has(strings.ToLower(fileName)) { contents, err := resolver.FileContentsByLocation(l) if err != nil { return nil, err diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/package.go index 37942563..d159fe4f 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/package.go @@ -22,15 +22,14 @@ func (c *goBinaryCataloger) newGoBinaryPackage(resolver file.Resolver, dep *debu } p := pkg.Package{ - Name: dep.Path, - Version: dep.Version, - Licenses: pkg.NewLicenseSet(licenses...), - PURL: packageURL(dep.Path, dep.Version), - Language: pkg.Go, - Type: pkg.GoModulePkg, - Locations: file.NewLocationSet(locations...), - MetadataType: pkg.GolangBinMetadataType, - Metadata: pkg.GolangBinMetadata{ + Name: dep.Path, + Version: dep.Version, + Licenses: pkg.NewLicenseSet(licenses...), + PURL: packageURL(dep.Path, dep.Version), + Language: pkg.Go, + Type: pkg.GoModulePkg, + Locations: file.NewLocationSet(locations...), + Metadata: pkg.GolangBinaryBuildinfoEntry{ GoCompiledVersion: goVersion, H1Digest: dep.Sum, Architecture: architecture, diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/parse_go_binary.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/parse_go_binary.go index 5ccaaae3..567c28cd 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/parse_go_binary.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/parse_go_binary.go @@ -47,7 +47,7 @@ type goBinaryCataloger struct { licenses goLicenses } -// Catalog is given an object to resolve file references and content, this function returns any discovered Packages after analyzing rpm db installation. +// parseGoBinary catalogs packages found in the "buildinfo" section of a binary built by the go compiler. func (c *goBinaryCataloger) parseGoBinary(resolver file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { var pkgs []pkg.Package @@ -87,7 +87,7 @@ func (c *goBinaryCataloger) makeGoMainPackage(resolver file.Resolver, mod *exten timestamp, hasTimestamp := gbs["vcs.time"] var ldflags string - if metadata, ok := main.Metadata.(pkg.GolangBinMetadata); ok { + if metadata, ok := main.Metadata.(pkg.GolangBinaryBuildinfoEntry); ok { // we've found a specific version from the ldflags! use it as the version. // why not combine that with the pseudo version (e.g. v1.2.3-0.20210101000000-abcdef123456)? // short answer: we're assuming that if a specific semver was provided in the ldflags that diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/parse_go_mod.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/parse_go_mod.go index 7ef4ac0a..cf780d11 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/parse_go_mod.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/golang/parse_go_mod.go @@ -48,15 +48,14 @@ func (c *goModCataloger) parseGoModFile(resolver file.Resolver, _ *generic.Envir } packages[m.Mod.Path] = pkg.Package{ - Name: m.Mod.Path, - Version: m.Mod.Version, - Licenses: pkg.NewLicenseSet(licenses...), - Locations: file.NewLocationSet(reader.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), - PURL: packageURL(m.Mod.Path, m.Mod.Version), - Language: pkg.Go, - Type: pkg.GoModulePkg, - MetadataType: pkg.GolangModMetadataType, - Metadata: pkg.GolangModMetadata{ + Name: m.Mod.Path, + Version: m.Mod.Version, + Licenses: pkg.NewLicenseSet(licenses...), + Locations: file.NewLocationSet(reader.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), + PURL: packageURL(m.Mod.Path, m.Mod.Version), + Language: pkg.Go, + Type: pkg.GoModulePkg, + Metadata: pkg.GolangModuleEntry{ H1Digest: digests[fmt.Sprintf("%s %s", m.Mod.Path, m.Mod.Version)], }, } @@ -70,15 +69,14 @@ func (c *goModCataloger) parseGoModFile(resolver file.Resolver, _ *generic.Envir } packages[m.New.Path] = pkg.Package{ - Name: m.New.Path, - Version: m.New.Version, - Licenses: pkg.NewLicenseSet(licenses...), - Locations: file.NewLocationSet(reader.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), - PURL: packageURL(m.New.Path, m.New.Version), - Language: pkg.Go, - Type: pkg.GoModulePkg, - MetadataType: pkg.GolangModMetadataType, - Metadata: pkg.GolangModMetadata{ + Name: m.New.Path, + Version: m.New.Version, + Licenses: pkg.NewLicenseSet(licenses...), + Locations: file.NewLocationSet(reader.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), + PURL: packageURL(m.New.Path, m.New.Version), + Language: pkg.Go, + Type: pkg.GoModulePkg, + Metadata: pkg.GolangModuleEntry{ H1Digest: digests[fmt.Sprintf("%s %s", m.New.Path, m.New.Version)], }, } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/cataloger.go index a7638ee8..40b4bbc4 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/cataloger.go @@ -1,3 +1,6 @@ +/* +Package haskell provides a concrete Cataloger implementation relating to packages within the Haskell language ecosystem. +*/ package haskell import ( @@ -6,7 +9,7 @@ import ( // TODO: it seems that the stack.yaml/stack.lock/cabal.project.freeze have different purposes and could have different installation intentions // (some describe intent and are meant to be used by a tool to resolve more dependencies while others describe the actual installed state). -// This hints at splittin these into multiple catalogers, but for now we'll keep them together. +// This hints at splitting these into multiple catalogers, but for now we'll keep them together. // NewHackageCataloger returns a new Haskell cataloger object. func NewHackageCataloger() *generic.Cataloger { diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/package.go index ed47921b..6594ce21 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/package.go @@ -6,19 +6,15 @@ import ( "github.com/anchore/syft/syft/pkg" ) -func newPackage(name, version string, m *pkg.HackageMetadata, locations ...file.Location) pkg.Package { +func newPackage(name, version string, m any, location file.Location) pkg.Package { p := pkg.Package{ Name: name, Version: version, - Locations: file.NewLocationSet(locations...), + Locations: file.NewLocationSet(location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), PURL: packageURL(name, version), Language: pkg.Haskell, Type: pkg.HackagePkg, - } - - if m != nil { - p.MetadataType = pkg.HackageMetadataType - p.Metadata = *m + Metadata: m, } p.SetID() diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/parse_cabal_freeze.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/parse_cabal_freeze.go index abb2c82c..05dc1dcc 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/parse_cabal_freeze.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/parse_cabal_freeze.go @@ -52,7 +52,7 @@ func parseCabalFreeze(_ file.Resolver, _ *generic.Environment, reader file.Locat pkgName, pkgVersion, nil, - reader.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation), + reader.Location, ), ) } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/parse_stack_lock.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/parse_stack_lock.go index 3eabd797..40dc6aa6 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/parse_stack_lock.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/parse_stack_lock.go @@ -7,6 +7,7 @@ import ( "gopkg.in/yaml.v3" + "github.com/anchore/syft/internal/log" "github.com/anchore/syft/syft/artifact" "github.com/anchore/syft/syft/file" "github.com/anchore/syft/syft/pkg" @@ -47,7 +48,8 @@ func parseStackLock(_ file.Resolver, _ *generic.Environment, reader file.Locatio var lockFile stackLock if err := yaml.Unmarshal(bytes, &lockFile); err != nil { - return nil, nil, fmt.Errorf("failed to parse stack.yaml.lock file: %w", err) + log.WithFields("error", err).Tracef("failed to parse stack.yaml.lock file %q", reader.RealPath) + return nil, nil, nil } var ( @@ -67,11 +69,11 @@ func parseStackLock(_ file.Resolver, _ *generic.Environment, reader file.Locatio newPackage( pkgName, pkgVersion, - &pkg.HackageMetadata{ + pkg.HackageStackYamlLockEntry{ PkgHash: pkgHash, SnapshotURL: snapshotURL, }, - reader.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation), + reader.Location, ), ) } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/parse_stack_yaml.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/parse_stack_yaml.go index c31bc6a5..eb174c7d 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/parse_stack_yaml.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/haskell/parse_stack_yaml.go @@ -6,6 +6,7 @@ import ( "gopkg.in/yaml.v3" + "github.com/anchore/syft/internal/log" "github.com/anchore/syft/syft/artifact" "github.com/anchore/syft/syft/file" "github.com/anchore/syft/syft/pkg" @@ -28,7 +29,8 @@ func parseStackYaml(_ file.Resolver, _ *generic.Environment, reader file.Locatio var stackFile stackYaml if err := yaml.Unmarshal(bytes, &stackFile); err != nil { - return nil, nil, fmt.Errorf("failed to parse stack.yaml file: %w", err) + log.WithFields("error", err).Tracef("failed to parse stack.yaml file %q", reader.RealPath) + return nil, nil, nil } var pkgs []pkg.Package @@ -39,10 +41,10 @@ func parseStackYaml(_ file.Resolver, _ *generic.Environment, reader file.Locatio newPackage( pkgName, pkgVersion, - &pkg.HackageMetadata{ + pkg.HackageStackYamlEntry{ PkgHash: pkgHash, }, - reader.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation), + reader.Location, ), ) } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/archive_parser.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/archive_parser.go index 5f0d94e3..5b169614 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/archive_parser.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/archive_parser.go @@ -3,9 +3,15 @@ package java import ( "crypto" "fmt" + "io" + "net/http" + "net/url" "os" "path" "strings" + "time" + + "github.com/vifraa/gopom" intFile "github.com/anchore/syft/internal/file" "github.com/anchore/syft/internal/licenses" @@ -16,8 +22,6 @@ import ( "github.com/anchore/syft/syft/pkg/cataloger/generic" ) -var _ generic.Parser = parseJavaArchive - var archiveFormatGlobs = []string{ "**/*.jar", "**/*.war", @@ -49,11 +53,20 @@ type archiveParser struct { contentPath string fileInfo archiveFilename detectNested bool + cfg Config +} + +type genericArchiveParserAdapter struct { + cfg Config +} + +func newGenericArchiveParserAdapter(cfg Config) genericArchiveParserAdapter { + return genericArchiveParserAdapter{cfg: cfg} } // parseJavaArchive is a parser function for java archive contents, returning all Java libraries and nested archives. -func parseJavaArchive(_ file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { - parser, cleanupFn, err := newJavaArchiveParser(reader, true) +func (gap genericArchiveParserAdapter) parseJavaArchive(_ file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { + parser, cleanupFn, err := newJavaArchiveParser(reader, true, gap.cfg) // note: even on error, we should always run cleanup functions defer cleanupFn() if err != nil { @@ -72,9 +85,9 @@ func uniquePkgKey(groupID string, p *pkg.Package) string { // newJavaArchiveParser returns a new java archive parser object for the given archive. Can be configured to discover // and parse nested archives or ignore them. -func newJavaArchiveParser(reader file.LocationReadCloser, detectNested bool) (*archiveParser, func(), error) { +func newJavaArchiveParser(reader file.LocationReadCloser, detectNested bool, cfg Config) (*archiveParser, func(), error) { // fetch the last element of the virtual path - virtualElements := strings.Split(reader.AccessPath(), ":") + virtualElements := strings.Split(reader.Path(), ":") currentFilepath := virtualElements[len(virtualElements)-1] contentPath, archivePath, cleanupFn, err := saveArchiveToTmp(currentFilepath, reader) @@ -94,6 +107,7 @@ func newJavaArchiveParser(reader file.LocationReadCloser, detectNested bool) (*a contentPath: contentPath, fileInfo: newJavaArchiveFilename(currentFilepath), detectNested: detectNested, + cfg: cfg, }, cleanupFn, nil } @@ -136,7 +150,7 @@ func (j *archiveParser) parse() ([]pkg.Package, []artifact.Relationship, error) // jar, we wait until the conclusion of the parsing process before synthesizing pURLs. for i := range pkgs { p := &pkgs[i] - if m, ok := p.Metadata.(pkg.JavaMetadata); ok { + if m, ok := p.Metadata.(pkg.JavaArchive); ok { p.PURL = packageURL(p.Name, p.Version, m) } else { log.WithFields("package", p.String()).Warn("unable to extract java metadata to generate purl") @@ -150,7 +164,7 @@ func (j *archiveParser) parse() ([]pkg.Package, []artifact.Relationship, error) // discoverMainPackage parses the root Java manifest used as the parent package to all discovered nested packages. func (j *archiveParser) discoverMainPackage() (*pkg.Package, error) { // search and parse java manifest files - manifestMatches := j.fileManifest.GlobMatch(manifestGlob) + manifestMatches := j.fileManifest.GlobMatch(false, manifestGlob) if len(manifestMatches) > 1 { return nil, fmt.Errorf("found multiple manifests in the jar: %+v", manifestMatches) } else if len(manifestMatches) == 0 { @@ -192,10 +206,9 @@ func (j *archiveParser) discoverMainPackage() (*pkg.Package, error) { Locations: file.NewLocationSet( j.location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation), ), - Type: j.fileInfo.pkgType(), - MetadataType: pkg.JavaMetadataType, - Metadata: pkg.JavaMetadata{ - VirtualPath: j.location.AccessPath(), + Type: j.fileInfo.pkgType(), + Metadata: pkg.JavaArchive{ + VirtualPath: j.location.Path(), Manifest: manifest, ArchiveDigests: digests, }, @@ -237,43 +250,85 @@ func (j *archiveParser) parseLicenses(manifest *pkg.JavaManifest) ([]pkg.License } } + // If we didn't find any licenses in the archive so far, we'll try again in Maven Central using groupIDFromJavaMetadata + if len(licenses) == 0 && j.cfg.UseNetwork { + licenses = findLicenseFromJavaMetadata(name, manifest, version, j, licenses) + } + return licenses, name, version, nil } +func findLicenseFromJavaMetadata(name string, manifest *pkg.JavaManifest, version string, j *archiveParser, licenses []pkg.License) []pkg.License { + var groupID = name + if gID := groupIDFromJavaMetadata(name, pkg.JavaArchive{Manifest: manifest}); gID != "" { + groupID = gID + } + pomLicenses, err := recursivelyFindLicensesFromParentPom(groupID, name, version, j.cfg) + if err != nil { + log.Tracef("unable to get parent pom from Maven central: %v", err) + } + + if len(pomLicenses) == 0 { + // Try removing the last part of the groupId, as sometimes it duplicates the artifactId + packages := strings.Split(groupID, ".") + groupID = strings.Join(packages[:len(packages)-1], ".") + pomLicenses, err = recursivelyFindLicensesFromParentPom(groupID, name, version, j.cfg) + if err != nil { + log.Tracef("unable to get parent pom from Maven central: %v", err) + } + } + + if len(pomLicenses) > 0 { + pkgLicenses := pkg.NewLicensesFromLocation(j.location, pomLicenses...) + if pkgLicenses != nil { + licenses = append(licenses, pkgLicenses...) + } + } + return licenses +} + type parsedPomProject struct { - *pkg.PomProject + *pkg.JavaPomProject Licenses []pkg.License } func (j *archiveParser) guessMainPackageNameAndVersionFromPomInfo() (name, version string, licenses []pkg.License) { - pomPropertyMatches := j.fileManifest.GlobMatch(pomPropertiesGlob) - pomMatches := j.fileManifest.GlobMatch(pomXMLGlob) - var pomPropertiesObject pkg.PomProperties - var pomProjectObject parsedPomProject - if len(pomPropertyMatches) == 1 || len(pomMatches) == 1 { - // we have exactly 1 pom.properties or pom.xml in the archive; assume it represents the - // package we're scanning if the names seem like a plausible match - properties, _ := pomPropertiesByParentPath(j.archivePath, j.location, pomPropertyMatches) - projects, _ := pomProjectByParentPath(j.archivePath, j.location, pomMatches) - - for parentPath, propertiesObj := range properties { - if artifactIDMatchesFilename(propertiesObj.ArtifactID, j.fileInfo.name) { - pomPropertiesObject = propertiesObj - if proj, exists := projects[parentPath]; exists { - pomProjectObject = proj - } + pomPropertyMatches := j.fileManifest.GlobMatch(false, pomPropertiesGlob) + pomMatches := j.fileManifest.GlobMatch(false, pomXMLGlob) + var pomPropertiesObject pkg.JavaPomProperties + var pomProjectObject *parsedPomProject + + // Find the pom.properties/pom.xml if the names seem like a plausible match + properties, _ := pomPropertiesByParentPath(j.archivePath, j.location, pomPropertyMatches) + projects, _ := pomProjectByParentPath(j.archivePath, j.location, pomMatches) + + for parentPath, propertiesObj := range properties { + if artifactIDMatchesFilename(propertiesObj.ArtifactID, j.fileInfo.name) { + pomPropertiesObject = propertiesObj + if proj, exists := projects[parentPath]; exists { + pomProjectObject = proj + break } } } + name = pomPropertiesObject.ArtifactID - if name == "" && pomProjectObject.PomProject != nil { + if name == "" && pomProjectObject != nil { name = pomProjectObject.ArtifactID } version = pomPropertiesObject.Version - if version == "" && pomProjectObject.PomProject != nil { + if version == "" && pomProjectObject != nil { version = pomProjectObject.Version } - return name, version, pomProjectObject.Licenses + if pomProjectObject != nil && j.cfg.UseNetwork { + findPomLicenses(pomProjectObject, j.cfg) + } + + if pomProjectObject != nil { + licenses = pomProjectObject.Licenses + } + + return name, version, licenses } func artifactIDMatchesFilename(artifactID, fileName string) bool { @@ -283,6 +338,117 @@ func artifactIDMatchesFilename(artifactID, fileName string) bool { return strings.HasPrefix(artifactID, fileName) || strings.HasSuffix(fileName, artifactID) } +func findPomLicenses(pomProjectObject *parsedPomProject, cfg Config) { + // If we don't have any licenses until now, and if we have a parent Pom, then we'll check the parent pom in maven central for licenses. + if pomProjectObject != nil && pomProjectObject.Parent != nil && len(pomProjectObject.Licenses) == 0 { + parentLicenses, err := recursivelyFindLicensesFromParentPom( + pomProjectObject.Parent.GroupID, + pomProjectObject.Parent.ArtifactID, + pomProjectObject.Parent.Version, + cfg) + if err != nil { + // We don't want to abort here as the parent pom might not exist in Maven Central, we'll just log the error + log.Tracef("unable to get parent pom from Maven central: %v", err) + return + } + if len(parentLicenses) > 0 { + for _, licenseName := range parentLicenses { + pomProjectObject.Licenses = append(pomProjectObject.Licenses, pkg.NewLicenseFromFields(licenseName, "", nil)) + } + } + } +} + +func formatMavenPomURL(groupID, artifactID, version, mavenBaseURL string) (requestURL string, err error) { + // groupID needs to go from maven.org -> maven/org + urlPath := strings.Split(groupID, ".") + artifactPom := fmt.Sprintf("%s-%s.pom", artifactID, version) + urlPath = append(urlPath, artifactID, version, artifactPom) + + // ex:"https://repo1.maven.org/maven2/groupID/artifactID/artifactPom + requestURL, err = url.JoinPath(mavenBaseURL, urlPath...) + if err != nil { + return requestURL, fmt.Errorf("could not construct maven url: %w", err) + } + return requestURL, err +} + +func recursivelyFindLicensesFromParentPom(groupID, artifactID, version string, cfg Config) ([]string, error) { + var licenses []string + // As there can be nested parent poms, we'll recursively check for licenses until we reach the max depth + for i := 0; i < cfg.MaxParentRecursiveDepth; i++ { + parentPom, err := getPomFromMavenCentral(groupID, artifactID, version, cfg.MavenBaseURL) + if err != nil { + return nil, err + } + parentLicenses := parseLicensesFromPom(parentPom) + if len(parentLicenses) > 0 || parentPom == nil || parentPom.Parent == nil { + licenses = parentLicenses + break + } + + groupID = *parentPom.Parent.GroupID + artifactID = *parentPom.Parent.ArtifactID + version = *parentPom.Parent.Version + } + + return licenses, nil +} + +func getPomFromMavenCentral(groupID, artifactID, version, mavenBaseURL string) (*gopom.Project, error) { + requestURL, err := formatMavenPomURL(groupID, artifactID, version, mavenBaseURL) + if err != nil { + return nil, err + } + log.Tracef("trying to fetch parent pom from Maven central %s", requestURL) + + mavenRequest, err := http.NewRequest(http.MethodGet, requestURL, nil) + if err != nil { + return nil, fmt.Errorf("unable to format request for Maven central: %w", err) + } + + httpClient := &http.Client{ + Timeout: time.Second * 10, + } + + resp, err := httpClient.Do(mavenRequest) + if err != nil { + return nil, fmt.Errorf("unable to get pom from Maven central: %w", err) + } + defer func() { + if err := resp.Body.Close(); err != nil { + log.Errorf("unable to close body: %+v", err) + } + }() + + bytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to parse pom from Maven central: %w", err) + } + + pom, err := decodePomXML(strings.NewReader(string(bytes))) + if err != nil { + return nil, fmt.Errorf("unable to parse pom from Maven central: %w", err) + } + + return &pom, nil +} + +func parseLicensesFromPom(pom *gopom.Project) []string { + var licenses []string + if pom != nil && pom.Licenses != nil { + for _, license := range *pom.Licenses { + if license.Name != nil { + licenses = append(licenses, *license.Name) + } else if license.URL != nil { + licenses = append(licenses, *license.URL) + } + } + } + + return licenses +} + // discoverPkgsFromAllMavenFiles parses Maven POM properties/xml for a given // parent package, returning all listed Java packages found for each pom // properties discovered and potentially updating the given parentPkg with new @@ -295,13 +461,13 @@ func (j *archiveParser) discoverPkgsFromAllMavenFiles(parentPkg *pkg.Package) ([ var pkgs []pkg.Package // pom.properties - properties, err := pomPropertiesByParentPath(j.archivePath, j.location, j.fileManifest.GlobMatch(pomPropertiesGlob)) + properties, err := pomPropertiesByParentPath(j.archivePath, j.location, j.fileManifest.GlobMatch(false, pomPropertiesGlob)) if err != nil { return nil, err } // pom.xml - projects, err := pomProjectByParentPath(j.archivePath, j.location, j.fileManifest.GlobMatch(pomXMLGlob)) + projects, err := pomProjectByParentPath(j.archivePath, j.location, j.fileManifest.GlobMatch(false, pomXMLGlob)) if err != nil { return nil, err } @@ -309,10 +475,10 @@ func (j *archiveParser) discoverPkgsFromAllMavenFiles(parentPkg *pkg.Package) ([ for parentPath, propertiesObj := range properties { var pomProject *parsedPomProject if proj, exists := projects[parentPath]; exists { - pomProject = &proj + pomProject = proj } - pkgFromPom := newPackageFromMavenData(propertiesObj, pomProject, parentPkg, j.location) + pkgFromPom := newPackageFromMavenData(propertiesObj, pomProject, parentPkg, j.location, j.cfg) if pkgFromPom != nil { pkgs = append(pkgs, *pkgFromPom) } @@ -340,10 +506,10 @@ func getDigestsFromArchive(archivePath string) ([]file.Digest, error) { func (j *archiveParser) getLicenseFromFileInArchive() ([]pkg.License, error) { var fileLicenses []pkg.License for _, filename := range licenses.FileNames() { - licenseMatches := j.fileManifest.GlobMatch("/META-INF/" + filename) + licenseMatches := j.fileManifest.GlobMatch(true, "/META-INF/"+filename) if len(licenseMatches) == 0 { // Try the root directory if it's not in META-INF - licenseMatches = j.fileManifest.GlobMatch("/" + filename) + licenseMatches = j.fileManifest.GlobMatch(true, "/"+filename) } if len(licenseMatches) > 0 { @@ -371,36 +537,36 @@ func (j *archiveParser) getLicenseFromFileInArchive() ([]pkg.License, error) { func (j *archiveParser) discoverPkgsFromNestedArchives(parentPkg *pkg.Package) ([]pkg.Package, []artifact.Relationship, error) { // we know that all java archives are zip formatted files, so we can use the shared zip helper - return discoverPkgsFromZip(j.location, j.archivePath, j.contentPath, j.fileManifest, parentPkg) + return discoverPkgsFromZip(j.location, j.archivePath, j.contentPath, j.fileManifest, parentPkg, j.cfg) } // discoverPkgsFromZip finds Java archives within Java archives, returning all listed Java packages found and // associating each discovered package to the given parent package. -func discoverPkgsFromZip(location file.Location, archivePath, contentPath string, fileManifest intFile.ZipFileManifest, parentPkg *pkg.Package) ([]pkg.Package, []artifact.Relationship, error) { +func discoverPkgsFromZip(location file.Location, archivePath, contentPath string, fileManifest intFile.ZipFileManifest, parentPkg *pkg.Package, cfg Config) ([]pkg.Package, []artifact.Relationship, error) { // search and parse pom.properties files & fetch the contents - openers, err := intFile.ExtractFromZipToUniqueTempFile(archivePath, contentPath, fileManifest.GlobMatch(archiveFormatGlobs...)...) + openers, err := intFile.ExtractFromZipToUniqueTempFile(archivePath, contentPath, fileManifest.GlobMatch(false, archiveFormatGlobs...)...) if err != nil { return nil, nil, fmt.Errorf("unable to extract files from zip: %w", err) } - return discoverPkgsFromOpeners(location, openers, parentPkg) + return discoverPkgsFromOpeners(location, openers, parentPkg, cfg) } // discoverPkgsFromOpeners finds Java archives within the given files and associates them with the given parent package. -func discoverPkgsFromOpeners(location file.Location, openers map[string]intFile.Opener, parentPkg *pkg.Package) ([]pkg.Package, []artifact.Relationship, error) { +func discoverPkgsFromOpeners(location file.Location, openers map[string]intFile.Opener, parentPkg *pkg.Package, cfg Config) ([]pkg.Package, []artifact.Relationship, error) { var pkgs []pkg.Package var relationships []artifact.Relationship for pathWithinArchive, archiveOpener := range openers { - nestedPkgs, nestedRelationships, err := discoverPkgsFromOpener(location, pathWithinArchive, archiveOpener) + nestedPkgs, nestedRelationships, err := discoverPkgsFromOpener(location, pathWithinArchive, archiveOpener, cfg) if err != nil { - log.WithFields("location", location.AccessPath()).Warnf("unable to discover java packages from opener: %+v", err) + log.WithFields("location", location.Path()).Warnf("unable to discover java packages from opener: %+v", err) continue } // attach the parent package to all discovered packages that are not already associated with a java archive for _, p := range nestedPkgs { - if metadata, ok := p.Metadata.(pkg.JavaMetadata); ok { + if metadata, ok := p.Metadata.(pkg.JavaArchive); ok { if metadata.Parent == nil { metadata.Parent = parentPkg } @@ -416,7 +582,7 @@ func discoverPkgsFromOpeners(location file.Location, openers map[string]intFile. } // discoverPkgsFromOpener finds Java archives within the given file. -func discoverPkgsFromOpener(location file.Location, pathWithinArchive string, archiveOpener intFile.Opener) ([]pkg.Package, []artifact.Relationship, error) { +func discoverPkgsFromOpener(location file.Location, pathWithinArchive string, archiveOpener intFile.Opener, cfg Config) ([]pkg.Package, []artifact.Relationship, error) { archiveReadCloser, err := archiveOpener.Open() if err != nil { return nil, nil, fmt.Errorf("unable to open archived file from tempdir: %w", err) @@ -427,10 +593,11 @@ func discoverPkgsFromOpener(location file.Location, pathWithinArchive string, ar } }() - nestedPath := fmt.Sprintf("%s:%s", location.AccessPath(), pathWithinArchive) + nestedPath := fmt.Sprintf("%s:%s", location.Path(), pathWithinArchive) nestedLocation := file.NewLocationFromCoordinates(location.Coordinates) - nestedLocation.VirtualPath = nestedPath - nestedPkgs, nestedRelationships, err := parseJavaArchive(nil, nil, file.LocationReadCloser{ + nestedLocation.AccessPath = nestedPath + gap := newGenericArchiveParserAdapter(cfg) + nestedPkgs, nestedRelationships, err := gap.parseJavaArchive(nil, nil, file.LocationReadCloser{ Location: nestedLocation, ReadCloser: archiveReadCloser, }) @@ -441,17 +608,17 @@ func discoverPkgsFromOpener(location file.Location, pathWithinArchive string, ar return nestedPkgs, nestedRelationships, nil } -func pomPropertiesByParentPath(archivePath string, location file.Location, extractPaths []string) (map[string]pkg.PomProperties, error) { +func pomPropertiesByParentPath(archivePath string, location file.Location, extractPaths []string) (map[string]pkg.JavaPomProperties, error) { contentsOfMavenPropertiesFiles, err := intFile.ContentsFromZip(archivePath, extractPaths...) if err != nil { return nil, fmt.Errorf("unable to extract maven files: %w", err) } - propertiesByParentPath := make(map[string]pkg.PomProperties) + propertiesByParentPath := make(map[string]pkg.JavaPomProperties) for filePath, fileContents := range contentsOfMavenPropertiesFiles { pomProperties, err := parsePomProperties(filePath, strings.NewReader(fileContents)) if err != nil { - log.WithFields("contents-path", filePath, "location", location.AccessPath()).Warnf("failed to parse pom.properties: %+v", err) + log.WithFields("contents-path", filePath, "location", location.Path()).Warnf("failed to parse pom.properties: %+v", err) continue } @@ -470,18 +637,18 @@ func pomPropertiesByParentPath(archivePath string, location file.Location, extra return propertiesByParentPath, nil } -func pomProjectByParentPath(archivePath string, location file.Location, extractPaths []string) (map[string]parsedPomProject, error) { +func pomProjectByParentPath(archivePath string, location file.Location, extractPaths []string) (map[string]*parsedPomProject, error) { contentsOfMavenProjectFiles, err := intFile.ContentsFromZip(archivePath, extractPaths...) if err != nil { return nil, fmt.Errorf("unable to extract maven files: %w", err) } - projectByParentPath := make(map[string]parsedPomProject) + projectByParentPath := make(map[string]*parsedPomProject) for filePath, fileContents := range contentsOfMavenProjectFiles { // TODO: when we support locations of paths within archives we should start passing the specific pom.xml location object instead of the top jar pomProject, err := parsePomXMLProject(filePath, strings.NewReader(fileContents), location) if err != nil { - log.WithFields("contents-path", filePath, "location", location.AccessPath()).Warnf("failed to parse pom.xml: %+v", err) + log.WithFields("contents-path", filePath, "location", location.Path()).Warnf("failed to parse pom.xml: %+v", err) continue } @@ -489,23 +656,24 @@ func pomProjectByParentPath(archivePath string, location file.Location, extractP continue } - if pomProject.Version == "" || pomProject.ArtifactID == "" { + // If we don't have a version, then maybe the parent pom has it... + if (pomProject.Parent == nil && pomProject.Version == "") || pomProject.ArtifactID == "" { // TODO: if there is no parentPkg (no java manifest) one of these poms could be the parent. We should discover the right parent and attach the correct info accordingly to each discovered package continue } - projectByParentPath[path.Dir(filePath)] = *pomProject + projectByParentPath[path.Dir(filePath)] = pomProject } return projectByParentPath, nil } // newPackageFromMavenData processes a single Maven POM properties for a given parent package, returning all listed Java packages found and // associating each discovered package to the given parent package. Note the pom.xml is optional, the pom.properties is not. -func newPackageFromMavenData(pomProperties pkg.PomProperties, parsedPomProject *parsedPomProject, parentPkg *pkg.Package, location file.Location) *pkg.Package { +func newPackageFromMavenData(pomProperties pkg.JavaPomProperties, parsedPomProject *parsedPomProject, parentPkg *pkg.Package, location file.Location, cfg Config) *pkg.Package { // keep the artifact name within the virtual path if this package does not match the parent package vPathSuffix := "" groupID := "" - if parentMetadata, ok := parentPkg.Metadata.(pkg.JavaMetadata); ok { + if parentMetadata, ok := parentPkg.Metadata.(pkg.JavaArchive); ok { groupID = groupIDFromJavaMetadata(parentPkg.Name, parentMetadata) } @@ -521,12 +689,15 @@ func newPackageFromMavenData(pomProperties pkg.PomProperties, parsedPomProject * // https://github.com/anchore/syft/issues/1944 vPathSuffix += ":" + pomProperties.GroupID + ":" + pomProperties.ArtifactID } - virtualPath := location.AccessPath() + vPathSuffix + virtualPath := location.Path() + vPathSuffix - var pkgPomProject *pkg.PomProject + var pkgPomProject *pkg.JavaPomProject licenses := make([]pkg.License, 0) if parsedPomProject != nil { - pkgPomProject = parsedPomProject.PomProject + if cfg.UseNetwork { + findPomLicenses(parsedPomProject, cfg) + } + pkgPomProject = parsedPomProject.JavaPomProject licenses = append(licenses, parsedPomProject.Licenses...) } @@ -536,11 +707,10 @@ func newPackageFromMavenData(pomProperties pkg.PomProperties, parsedPomProject * Locations: file.NewLocationSet( location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation), ), - Licenses: pkg.NewLicenseSet(licenses...), - Language: pkg.Java, - Type: pomProperties.PkgTypeIndicated(), - MetadataType: pkg.JavaMetadataType, - Metadata: pkg.JavaMetadata{ + Licenses: pkg.NewLicenseSet(licenses...), + Language: pkg.Java, + Type: pomProperties.PkgTypeIndicated(), + Metadata: pkg.JavaArchive{ VirtualPath: virtualPath, PomProperties: &pomProperties, PomProject: pkgPomProject, @@ -557,8 +727,8 @@ func newPackageFromMavenData(pomProperties pkg.PomProperties, parsedPomProject * } func packageIdentitiesMatch(p pkg.Package, parentPkg *pkg.Package) bool { - metadata, ok := p.Metadata.(pkg.JavaMetadata) - parentMetadata, parentOk := parentPkg.Metadata.(pkg.JavaMetadata) + metadata, ok := p.Metadata.(pkg.JavaArchive) + parentMetadata, parentOk := parentPkg.Metadata.(pkg.JavaArchive) if !ok || !parentOk { switch { case !ok: @@ -608,14 +778,14 @@ func updateParentPackage(p pkg.Package, parentPkg *pkg.Package) { // we may have learned more about the type via data in the pom properties parentPkg.Type = p.Type - metadata, ok := p.Metadata.(pkg.JavaMetadata) + metadata, ok := p.Metadata.(pkg.JavaArchive) if !ok { return } pomPropertiesCopy := *metadata.PomProperties // keep the pom properties, but don't overwrite existing pom properties - parentMetadata, ok := parentPkg.Metadata.(pkg.JavaMetadata) + parentMetadata, ok := parentPkg.Metadata.(pkg.JavaArchive) if ok && parentMetadata.PomProperties == nil { parentMetadata.PomProperties = &pomPropertiesCopy parentPkg.Metadata = parentMetadata diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/cataloger.go index be880a75..2b543bf8 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/cataloger.go @@ -1,5 +1,5 @@ /* -Package java provides a concrete Cataloger implementation for Java archives (jar, war, ear, par, sar, jpi, hpi, and native-image formats). +Package java provides a concrete Cataloger implementation for packages relating to the Java language ecosystem. */ package java @@ -7,35 +7,37 @@ import ( "github.com/anchore/syft/syft/pkg/cataloger/generic" ) -// NewJavaCataloger returns a new Java archive cataloger object. -func NewJavaCataloger(cfg Config) *generic.Cataloger { - c := generic.NewCataloger("java-cataloger"). - WithParserByGlobs(parseJavaArchive, archiveFormatGlobs...) +// NewArchiveCataloger returns a new Java archive cataloger object for detecting packages with archives (jar, war, ear, par, sar, jpi, hpi, and native-image formats) +func NewArchiveCataloger(cfg Config) *generic.Cataloger { + gap := newGenericArchiveParserAdapter(cfg) + + c := generic.NewCataloger("java-archive-cataloger"). + WithParserByGlobs(gap.parseJavaArchive, archiveFormatGlobs...) if cfg.SearchIndexedArchives { // java archives wrapped within zip files - c.WithParserByGlobs(parseZipWrappedJavaArchive, genericZipGlobs...) + gzp := newGenericZipWrappedJavaArchiveParser(cfg) + c.WithParserByGlobs(gzp.parseZipWrappedJavaArchive, genericZipGlobs...) } if cfg.SearchUnindexedArchives { // java archives wrapped within tar files - c.WithParserByGlobs(parseTarWrappedJavaArchive, genericTarGlobs...) + gtp := newGenericTarWrappedJavaArchiveParser(cfg) + c.WithParserByGlobs(gtp.parseTarWrappedJavaArchive, genericTarGlobs...) } return c } -// NewJavaPomCataloger returns a cataloger capable of parsing -// dependencies from a pom.xml file. +// NewPomCataloger returns a cataloger capable of parsing dependencies from a pom.xml file. // Pom files list dependencies that maybe not be locally installed yet. -func NewJavaPomCataloger() *generic.Cataloger { +func NewPomCataloger() *generic.Cataloger { return generic.NewCataloger("java-pom-cataloger"). WithParserByGlobs(parserPomXML, "**/pom.xml") } -// NewJavaGradleLockfileCataloger returns a cataloger capable of parsing -// dependencies from a gradle.lockfile file. -// older versions of lockfiles aren't supported yet -func NewJavaGradleLockfileCataloger() *generic.Cataloger { +// NewGradleLockfileCataloger returns a cataloger capable of parsing dependencies from a gradle.lockfile file. +// Note: Older versions of lockfiles aren't supported yet +func NewGradleLockfileCataloger() *generic.Cataloger { return generic.NewCataloger("java-gradle-lockfile-cataloger"). WithParserByGlobs(parseGradleLockfile, gradleLockfileGlob) } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/config.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/config.go index 84b940ac..bfa526f9 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/config.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/config.go @@ -3,4 +3,7 @@ package java type Config struct { SearchUnindexedArchives bool SearchIndexedArchives bool + UseNetwork bool + MavenBaseURL string + MaxParentRecursiveDepth int } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/graalvm_native_image_cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/graalvm_native_image_cataloger.go index db462ea8..d2d441ef 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/graalvm_native_image_cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/graalvm_native_image_cataloger.go @@ -123,14 +123,13 @@ func getPackage(component nativeImageComponent) pkg.Package { cpes = append(cpes, c) } return pkg.Package{ - Name: component.Name, - Version: component.Version, - Language: pkg.Java, - Type: pkg.GraalVMNativeImagePkg, - MetadataType: pkg.JavaMetadataType, - FoundBy: nativeImageCatalogerName, - Metadata: pkg.JavaMetadata{ - PomProperties: &pkg.PomProperties{ + Name: component.Name, + Version: component.Version, + Language: pkg.Java, + Type: pkg.GraalVMNativeImagePkg, + FoundBy: nativeImageCatalogerName, + Metadata: pkg.JavaArchive{ + PomProperties: &pkg.JavaPomProperties{ GroupID: component.Group, }, }, diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/options.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/options.go new file mode 100644 index 00000000..3c580a2b --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/options.go @@ -0,0 +1,36 @@ +package java + +const MavenBaseURL = "https://repo1.maven.org/maven2" + +type CatalogerOpts struct { + UseNetwork bool + MavenURL string + MaxParentRecursiveDepth int +} + +func (j CatalogerOpts) WithUseNetwork(input bool) CatalogerOpts { + j.UseNetwork = input + return j +} + +func (j CatalogerOpts) WithMavenURL(input string) CatalogerOpts { + if input != "" { + j.MavenURL = input + } + return j +} + +func (j CatalogerOpts) WithMaxParentRecursiveDepth(input int) CatalogerOpts { + if input > 0 { + j.MaxParentRecursiveDepth = input + } + return j +} + +func DefaultCatalogerOpts() CatalogerOpts { + return CatalogerOpts{ + UseNetwork: false, + MavenURL: MavenBaseURL, + MaxParentRecursiveDepth: 5, + } +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/package_url.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/package_url.go index df1baf79..012037d2 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/package_url.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/package_url.go @@ -9,7 +9,7 @@ import ( ) // PackageURL returns the PURL for the specific java package (see https://github.com/package-url/purl-spec) -func packageURL(name, version string, metadata pkg.JavaMetadata) string { +func packageURL(name, version string, metadata pkg.JavaArchive) string { var groupID = name if gID := groupIDFromJavaMetadata(name, metadata); gID != "" { @@ -32,7 +32,7 @@ func packageURL(name, version string, metadata pkg.JavaMetadata) string { // 2. The group ID from the POM project // 3. The group ID from a select map of known group IDs // 4. The group ID from the Java manifest -func groupIDFromJavaMetadata(pkgName string, metadata pkg.JavaMetadata) (groupID string) { +func groupIDFromJavaMetadata(pkgName string, metadata pkg.JavaArchive) (groupID string) { if groupID = groupIDFromPomProperties(metadata.PomProperties); groupID != "" { return groupID } @@ -79,7 +79,7 @@ func groupIDFromJavaManifest(manifest *pkg.JavaManifest) (groupID string) { return groupID } -func groupIDFromPomProperties(properties *pkg.PomProperties) (groupID string) { +func groupIDFromPomProperties(properties *pkg.JavaPomProperties) (groupID string) { if properties == nil { return groupID } @@ -97,7 +97,7 @@ func groupIDFromPomProperties(properties *pkg.PomProperties) (groupID string) { return groupID } -func groupIDFromPomProject(project *pkg.PomProject) (groupID string) { +func groupIDFromPomProject(project *pkg.JavaPomProject) (groupID string) { if project == nil { return groupID } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_gradle_lockfile.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_gradle_lockfile.go index 3506b44b..82a473a7 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_gradle_lockfile.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_gradle_lockfile.go @@ -46,25 +46,28 @@ func parseGradleLockfile(_ file.Resolver, _ *generic.Environment, reader file.Lo dependencies = append(dependencies, dep) } } + // map the dependencies for _, dep := range dependencies { + archive := pkg.JavaArchive{ + PomProject: &pkg.JavaPomProject{ + GroupID: dep.Group, + ArtifactID: dep.Name, + Version: dep.Version, + Name: dep.Name, + }, + } + mappedPkg := pkg.Package{ Name: dep.Name, Version: dep.Version, Locations: file.NewLocationSet( reader.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation), ), - Language: pkg.Java, - Type: pkg.JavaPkg, - MetadataType: pkg.JavaMetadataType, - Metadata: pkg.JavaMetadata{ - PomProject: &pkg.PomProject{ - GroupID: dep.Group, - ArtifactID: dep.Name, - Version: dep.Version, - Name: dep.Name, - }, - }, + Language: pkg.Java, + Type: pkg.JavaPkg, + PURL: packageURL(dep.Name, dep.Version, archive), + Metadata: archive, } mappedPkg.SetID() pkgs = append(pkgs, mappedPkg) diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_java_manifest.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_java_manifest.go index 688e59de..14a3a363 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_java_manifest.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_java_manifest.go @@ -32,7 +32,7 @@ func parseJavaManifest(path string, reader io.Reader) (*pkg.JavaManifest, error) line := scanner.Text() // empty lines denote section separators - if strings.TrimSpace(line) == "" { + if line == "" { // we don't want to allocate a new section map that won't necessarily be used, do that once there is // a non-empty line to process @@ -46,7 +46,7 @@ func parseJavaManifest(path string, reader io.Reader) (*pkg.JavaManifest, error) // this is a continuation if lastKey == "" { - log.Warnf("java manifest %q: found continuation with no previous key: %q", path, line) + log.Debugf("java manifest %q: found continuation with no previous key: %q", path, line) continue } @@ -58,7 +58,7 @@ func parseJavaManifest(path string, reader io.Reader) (*pkg.JavaManifest, error) // this is a new key-value pair idx := strings.Index(line, ":") if idx == -1 { - log.Warnf("java manifest %q: unable to split java manifest key-value pairs: %q", path, line) + log.Debugf("java manifest %q: unable to split java manifest key-value pairs: %q", path, line) continue } @@ -95,7 +95,7 @@ func parseJavaManifest(path string, reader io.Reader) (*pkg.JavaManifest, error) // per the manifest spec (https://docs.oracle.com/en/java/javase/11/docs/specs/jar/jar.html#jar-manifest) // this should never happen. If it does, we want to know about it, but not necessarily stop // cataloging entirely... for this reason we only log. - log.Warnf("java manifest section found without a name: %s", path) + log.Debugf("java manifest section found without a name: %s", path) name = strconv.Itoa(i) } else { delete(s, "Name") @@ -108,7 +108,7 @@ func parseJavaManifest(path string, reader io.Reader) (*pkg.JavaManifest, error) return &manifest, nil } -func selectName(manifest *pkg.JavaManifest, filenameObj archiveFilename) string { +func extractNameFromApacheMavenBundlePlugin(manifest *pkg.JavaManifest) string { // special case: from https://svn.apache.org/repos/asf/felix/releases/maven-bundle-plugin-1.2.0/doc/maven-bundle-plugin-bnd.html // " is assumed to be "${groupId}.${artifactId}"." // @@ -127,10 +127,17 @@ func selectName(manifest *pkg.JavaManifest, filenameObj archiveFilename) string // if manifest != nil { if strings.Contains(manifest.Main["Created-By"], "Apache Maven Bundle Plugin") { - if v := manifest.Main["Bundle-SymbolicName"]; v != "" { + if symbolicName := manifest.Main["Bundle-SymbolicName"]; symbolicName != "" { + // It is possible that `Bundle-SymbolicName` is just the groupID (like in the case of + // https://repo1.maven.org/maven2/com/google/oauth-client/google-oauth-client/1.25.0/google-oauth-client-1.25.0.jar), + // so if `Implementation-Vendor-Id` is equal to `Bundle-SymbolicName`, bail on this logic + if vendorID := manifest.Main["Implementation-Vendor-Id"]; vendorID != "" && vendorID == symbolicName { + return "" + } + // the problem with this approach is that we don't have a strong indication of the artifactId // not having a "." in it. However, by convention it is unlikely that an artifactId would have a ".". - fields := strings.Split(v, ".") + fields := strings.Split(symbolicName, ".") // grab the last field, this is the artifactId. Note: because of [3] we do not know if this value is // correct. That is, a group id of "commons-logging" may have caused BND to swap out the reference to @@ -143,6 +150,15 @@ func selectName(manifest *pkg.JavaManifest, filenameObj archiveFilename) string } } + return "" +} + +func selectName(manifest *pkg.JavaManifest, filenameObj archiveFilename) string { + name := extractNameFromApacheMavenBundlePlugin(manifest) + if name != "" { + return name + } + // the filename tends to be the next-best reference for the package name if filenameObj.name != "" { if strings.Contains(filenameObj.name, ".") { diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_pom_properties.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_pom_properties.go index 248e0de9..460a2df9 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_pom_properties.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_pom_properties.go @@ -13,8 +13,8 @@ import ( const pomPropertiesGlob = "*pom.properties" -func parsePomProperties(path string, reader io.Reader) (*pkg.PomProperties, error) { - var props pkg.PomProperties +func parsePomProperties(path string, reader io.Reader) (*pkg.JavaPomProperties, error) { + var props pkg.JavaPomProperties propMap := make(map[string]string) scanner := bufio.NewScanner(reader) for scanner.Scan() { diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_pom_xml.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_pom_xml.go index eb0f9d9b..75376521 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_pom_xml.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_pom_xml.go @@ -83,7 +83,7 @@ func newPomProject(path string, p gopom.Project, location file.Location) *parsed log.WithFields("path", path, "artifactID", artifactID, "name", name, "projectURL", projectURL).Trace("parsing pom.xml") return &parsedPomProject{ - PomProject: &pkg.PomProject{ + JavaPomProject: &pkg.JavaPomProject{ Path: path, Parent: pomParent(p, p.Parent), GroupID: resolveProperty(p, p.GroupID, "groupId"), @@ -98,8 +98,8 @@ func newPomProject(path string, p gopom.Project, location file.Location) *parsed } func newPackageFromPom(pom gopom.Project, dep gopom.Dependency, locations ...file.Location) pkg.Package { - m := pkg.JavaMetadata{ - PomProperties: &pkg.PomProperties{ + m := pkg.JavaArchive{ + PomProperties: &pkg.JavaPomProperties{ GroupID: resolveProperty(pom, dep.GroupID, "groupId"), ArtifactID: resolveProperty(pom, dep.ArtifactID, "artifactId"), Scope: resolveProperty(pom, dep.Scope, "scope"), @@ -110,14 +110,13 @@ func newPackageFromPom(pom gopom.Project, dep gopom.Dependency, locations ...fil version := resolveProperty(pom, dep.Version, "version") p := pkg.Package{ - Name: name, - Version: version, - Locations: file.NewLocationSet(locations...), - PURL: packageURL(name, version, m), - Language: pkg.Java, - Type: pkg.JavaPkg, // TODO: should we differentiate between packages from jar/war/zip versus packages from a pom.xml that were not installed yet? - MetadataType: pkg.JavaMetadataType, - Metadata: m, + Name: name, + Version: version, + Locations: file.NewLocationSet(locations...), + PURL: packageURL(name, version, m), + Language: pkg.Java, + Type: pkg.JavaPkg, // TODO: should we differentiate between packages from jar/war/zip versus packages from a pom.xml that were not installed yet? + Metadata: m, } p.SetID() @@ -169,13 +168,13 @@ func getUtf8Reader(content io.Reader) (io.Reader, error) { return inputReader, nil } -func pomParent(pom gopom.Project, parent *gopom.Parent) (result *pkg.PomParent) { +func pomParent(pom gopom.Project, parent *gopom.Parent) (result *pkg.JavaPomParent) { if parent == nil { return nil } artifactID := safeString(parent.ArtifactID) - result = &pkg.PomParent{ + result = &pkg.JavaPomParent{ GroupID: resolveProperty(pom, parent.GroupID, "groupId"), ArtifactID: artifactID, Version: resolveProperty(pom, parent.Version, "version"), diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/tar_wrapped_archive_parser.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/tar_wrapped_archive_parser.go index 05ab6dd2..195940d8 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/tar_wrapped_archive_parser.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/tar_wrapped_archive_parser.go @@ -45,8 +45,19 @@ var genericTarGlobs = []string{ // note: for compressed tars this is an extremely expensive operation and can lead to performance degradation. This is // due to the fact that there is no central directory header (say as in zip), which means that in order to get // a file listing within the archive you must decompress the entire archive and seek through all of the entries. -func parseTarWrappedJavaArchive(_ file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { - contentPath, archivePath, cleanupFn, err := saveArchiveToTmp(reader.AccessPath(), reader) + +type genericTarWrappedJavaArchiveParser struct { + cfg Config +} + +func newGenericTarWrappedJavaArchiveParser(cfg Config) genericTarWrappedJavaArchiveParser { + return genericTarWrappedJavaArchiveParser{ + cfg: cfg, + } +} + +func (gtp genericTarWrappedJavaArchiveParser) parseTarWrappedJavaArchive(_ file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { + contentPath, archivePath, cleanupFn, err := saveArchiveToTmp(reader.Path(), reader) // note: even on error, we should always run cleanup functions defer cleanupFn() if err != nil { @@ -54,14 +65,14 @@ func parseTarWrappedJavaArchive(_ file.Resolver, _ *generic.Environment, reader } // look for java archives within the tar archive - return discoverPkgsFromTar(reader.Location, archivePath, contentPath) + return discoverPkgsFromTar(reader.Location, archivePath, contentPath, gtp.cfg) } -func discoverPkgsFromTar(location file.Location, archivePath, contentPath string) ([]pkg.Package, []artifact.Relationship, error) { +func discoverPkgsFromTar(location file.Location, archivePath, contentPath string, cfg Config) ([]pkg.Package, []artifact.Relationship, error) { openers, err := intFile.ExtractGlobsFromTarToUniqueTempFile(archivePath, contentPath, archiveFormatGlobs...) if err != nil { return nil, nil, fmt.Errorf("unable to extract files from tar: %w", err) } - return discoverPkgsFromOpeners(location, openers, nil) + return discoverPkgsFromOpeners(location, openers, nil, cfg) } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/zip_wrapped_archive_parser.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/zip_wrapped_archive_parser.go index 930427f3..763c1724 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/zip_wrapped_archive_parser.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/zip_wrapped_archive_parser.go @@ -17,8 +17,19 @@ var genericZipGlobs = []string{ // TODO: when the generic archive cataloger is implemented, this should be removed (https://github.com/anchore/syft/issues/246) // parseZipWrappedJavaArchive is a parser function for java archive contents contained within arbitrary zip files. -func parseZipWrappedJavaArchive(_ file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { - contentPath, archivePath, cleanupFn, err := saveArchiveToTmp(reader.AccessPath(), reader) + +type genericZipWrappedJavaArchiveParser struct { + cfg Config +} + +func newGenericZipWrappedJavaArchiveParser(cfg Config) genericZipWrappedJavaArchiveParser { + return genericZipWrappedJavaArchiveParser{ + cfg: cfg, + } +} + +func (gzp genericZipWrappedJavaArchiveParser) parseZipWrappedJavaArchive(_ file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { + contentPath, archivePath, cleanupFn, err := saveArchiveToTmp(reader.Path(), reader) // note: even on error, we should always run cleanup functions defer cleanupFn() if err != nil { @@ -35,5 +46,5 @@ func parseZipWrappedJavaArchive(_ file.Resolver, _ *generic.Environment, reader } // look for java archives within the zip archive - return discoverPkgsFromZip(reader.Location, archivePath, contentPath, fileManifest, nil) + return discoverPkgsFromZip(reader.Location, archivePath, contentPath, fileManifest, nil, gzp.cfg) } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/cataloger.go index 2109eb19..56127e37 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/cataloger.go @@ -1,5 +1,5 @@ /* -Package javascript provides a concrete Cataloger implementation for JavaScript ecosystem files (yarn and npm). +Package javascript provides a concrete Cataloger implementation for packages relating to the JavaScript language ecosystem. */ package javascript @@ -7,13 +7,13 @@ import ( "github.com/anchore/syft/syft/pkg/cataloger/generic" ) -// NewPackageCataloger returns a new JavaScript cataloger object based on detection of npm based packages. +// NewPackageCataloger returns a new cataloger object for NPM. func NewPackageCataloger() *generic.Cataloger { return generic.NewCataloger("javascript-package-cataloger"). WithParserByGlobs(parsePackageJSON, "**/package.json") } -// NewLockCataloger returns a new JavaScript cataloger object based on detection of lock files. +// NewLockCataloger returns a new cataloger object for NPM (and NPM-adjacent, such as yarn) lock files. func NewLockCataloger() *generic.Cataloger { return generic.NewCataloger("javascript-lock-cataloger"). WithParserByGlobs(parsePackageLock, "**/package-lock.json"). diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/package.go index 4eaea055..9a1331ac 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/package.go @@ -20,15 +20,14 @@ func newPackageJSONPackage(u packageJSON, indexLocation file.Location) pkg.Packa license := pkg.NewLicensesFromLocation(indexLocation, licenseCandidates...) p := pkg.Package{ - Name: u.Name, - Version: u.Version, - PURL: packageURL(u.Name, u.Version), - Locations: file.NewLocationSet(indexLocation), - Language: pkg.JavaScript, - Licenses: pkg.NewLicenseSet(license...), - Type: pkg.NpmPkg, - MetadataType: pkg.NpmPackageJSONMetadataType, - Metadata: pkg.NpmPackageJSONMetadata{ + Name: u.Name, + Version: u.Version, + PURL: packageURL(u.Name, u.Version), + Locations: file.NewLocationSet(indexLocation), + Language: pkg.JavaScript, + Licenses: pkg.NewLicenseSet(license...), + Type: pkg.NpmPkg, + Metadata: pkg.NpmPackage{ Name: u.Name, Version: u.Version, Description: u.Description, @@ -64,14 +63,13 @@ func newPackageLockV1Package(resolver file.Resolver, location file.Location, nam resolver, location, pkg.Package{ - Name: name, - Version: version, - Locations: file.NewLocationSet(location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), - PURL: packageURL(name, version), - Language: pkg.JavaScript, - Type: pkg.NpmPkg, - MetadataType: pkg.NpmPackageLockJSONMetadataType, - Metadata: pkg.NpmPackageLockJSONMetadata{Resolved: u.Resolved, Integrity: u.Integrity}, + Name: name, + Version: version, + Locations: file.NewLocationSet(location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), + PURL: packageURL(name, version), + Language: pkg.JavaScript, + Type: pkg.NpmPkg, + Metadata: pkg.NpmPackageLockEntry{Resolved: u.Resolved, Integrity: u.Integrity}, }, ) } @@ -81,15 +79,14 @@ func newPackageLockV2Package(resolver file.Resolver, location file.Location, nam resolver, location, pkg.Package{ - Name: name, - Version: u.Version, - Locations: file.NewLocationSet(location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), - Licenses: pkg.NewLicenseSet(pkg.NewLicensesFromLocation(location, u.License...)...), - PURL: packageURL(name, u.Version), - Language: pkg.JavaScript, - Type: pkg.NpmPkg, - MetadataType: pkg.NpmPackageLockJSONMetadataType, - Metadata: pkg.NpmPackageLockJSONMetadata{Resolved: u.Resolved, Integrity: u.Integrity}, + Name: name, + Version: u.Version, + Locations: file.NewLocationSet(location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), + Licenses: pkg.NewLicenseSet(pkg.NewLicensesFromLocation(location, u.License...)...), + PURL: packageURL(name, u.Version), + Language: pkg.JavaScript, + Type: pkg.NpmPkg, + Metadata: pkg.NpmPackageLockEntry{Resolved: u.Resolved, Integrity: u.Integrity}, }, ) } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/parse_package_json.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/parse_package_json.go index 0c05aedc..d0e7edbc 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/parse_package_json.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/parse_package_json.go @@ -64,7 +64,7 @@ func parsePackageJSON(_ file.Resolver, _ *generic.Environment, reader file.Locat } if !p.hasNameAndVersionValues() { - log.Debugf("encountered package.json file without a name and/or version field, ignoring (path=%q)", reader.AccessPath()) + log.Debugf("encountered package.json file without a name and/or version field, ignoring (path=%q)", reader.Path()) return nil, nil, nil } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/parse_package_lock.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/parse_package_lock.go index 91663b1b..45c5a0c1 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/parse_package_lock.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/parse_package_lock.go @@ -47,7 +47,7 @@ type packageLockLicense []string func parsePackageLock(resolver file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { // in the case we find package-lock.json files in the node_modules directories, skip those // as the whole purpose of the lock file is for the specific dependencies of the root project - if pathContainsNodeModulesDirectory(reader.AccessPath()) { + if pathContainsNodeModulesDirectory(reader.Path()) { return nil, nil, nil } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/parse_yarn_lock.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/parse_yarn_lock.go index e5fecff8..d42490ed 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/parse_yarn_lock.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/javascript/parse_yarn_lock.go @@ -46,7 +46,7 @@ const ( func parseYarnLock(resolver file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { // in the case we find yarn.lock files in the node_modules directories, skip those // as the whole purpose of the lock file is for the specific dependencies of the project - if pathContainsNodeModulesDirectory(reader.AccessPath()) { + if pathContainsNodeModulesDirectory(reader.Path()) { return nil, nil, nil } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/kernel/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/kernel/cataloger.go index 67c5bb5b..e5e187d4 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/kernel/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/kernel/cataloger.go @@ -91,7 +91,7 @@ func createKernelToModuleRelationships(kernelPackages, modulePackages []pkg.Pack modulesByKernelVersion := make(map[string][]*pkg.Package) for idx, p := range modulePackages { - m, ok := p.Metadata.(pkg.LinuxKernelModuleMetadata) + m, ok := p.Metadata.(pkg.LinuxKernelModule) if !ok { log.Debug("linux-kernel-module package found without metadata: %s@%s", p.Name, p.Version) continue diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/kernel/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/kernel/package.go index 92dcb5ef..2d3123ff 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/kernel/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/kernel/package.go @@ -10,15 +10,14 @@ import ( const linuxKernelPackageName = "linux-kernel" -func newLinuxKernelPackage(metadata pkg.LinuxKernelMetadata, archiveLocation file.Location) pkg.Package { +func newLinuxKernelPackage(metadata pkg.LinuxKernel, archiveLocation file.Location) pkg.Package { p := pkg.Package{ - Name: linuxKernelPackageName, - Version: metadata.Version, - Locations: file.NewLocationSet(archiveLocation.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), - PURL: packageURL(linuxKernelPackageName, metadata.Version), - Type: pkg.LinuxKernelPkg, - MetadataType: pkg.LinuxKernelMetadataType, - Metadata: metadata, + Name: linuxKernelPackageName, + Version: metadata.Version, + Locations: file.NewLocationSet(archiveLocation.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), + PURL: packageURL(linuxKernelPackageName, metadata.Version), + Type: pkg.LinuxKernelPkg, + Metadata: metadata, } p.SetID() @@ -26,16 +25,15 @@ func newLinuxKernelPackage(metadata pkg.LinuxKernelMetadata, archiveLocation fil return p } -func newLinuxKernelModulePackage(metadata pkg.LinuxKernelModuleMetadata, kmLocation file.Location) pkg.Package { +func newLinuxKernelModulePackage(metadata pkg.LinuxKernelModule, kmLocation file.Location) pkg.Package { p := pkg.Package{ - Name: metadata.Name, - Version: metadata.Version, - Locations: file.NewLocationSet(kmLocation.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), - Licenses: pkg.NewLicenseSet(pkg.NewLicensesFromLocation(kmLocation, metadata.License)...), - PURL: packageURL(metadata.Name, metadata.Version), - Type: pkg.LinuxKernelModulePkg, - MetadataType: pkg.LinuxKernelModuleMetadataType, - Metadata: metadata, + Name: metadata.Name, + Version: metadata.Version, + Locations: file.NewLocationSet(kmLocation.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), + Licenses: pkg.NewLicenseSet(pkg.NewLicensesFromLocation(kmLocation, metadata.License)...), + PURL: packageURL(metadata.Name, metadata.Version), + Type: pkg.LinuxKernelModulePkg, + Metadata: metadata, } p.SetID() diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/kernel/parse_linux_kernel_file.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/kernel/parse_linux_kernel_file.go index 54c26eb4..4d4f1669 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/kernel/parse_linux_kernel_file.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/kernel/parse_linux_kernel_file.go @@ -42,7 +42,7 @@ func parseLinuxKernelFile(_ file.Resolver, _ *generic.Environment, reader file.L }, nil, nil } -func parseLinuxKernelMetadata(magicType []string) (p pkg.LinuxKernelMetadata) { +func parseLinuxKernelMetadata(magicType []string) (p pkg.LinuxKernel) { // Linux kernel x86 boot executable bzImage, // version 5.10.121-linuxkit (root@buildkitsandbox) #1 SMP Fri Dec 2 10:35:42 UTC 2022, // RO-rootFS, diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/kernel/parse_linux_kernel_module_file.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/kernel/parse_linux_kernel_module_file.go index 34974f62..53e91002 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/kernel/parse_linux_kernel_module_file.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/kernel/parse_linux_kernel_module_file.go @@ -37,7 +37,7 @@ func parseLinuxKernelModuleFile(_ file.Resolver, _ *generic.Environment, reader }, nil, nil } -func parseLinuxKernelModuleMetadata(r unionreader.UnionReader) (p *pkg.LinuxKernelModuleMetadata, err error) { +func parseLinuxKernelModuleMetadata(r unionreader.UnionReader) (p *pkg.LinuxKernelModule, err error) { // filename: /lib/modules/5.15.0-1031-aws/kernel/zfs/zzstd.ko // version: 1.4.5a // license: Dual BSD/GPL @@ -64,7 +64,7 @@ func parseLinuxKernelModuleMetadata(r unionreader.UnionReader) (p *pkg.LinuxKern // retpoline: Y // name: 8821cu // vermagic: 5.10.121-linuxkit SMP mod_unload - p = &pkg.LinuxKernelModuleMetadata{ + p = &pkg.LinuxKernelModule{ Parameters: make(map[string]pkg.LinuxKernelModuleParameter), } f, err := elf.NewFile(r) @@ -100,7 +100,7 @@ func parseLinuxKernelModuleMetadata(r unionreader.UnionReader) (p *pkg.LinuxKern return p, nil } -func addLinuxKernelModuleEntry(k *pkg.LinuxKernelModuleMetadata, entry []byte) error { +func addLinuxKernelModuleEntry(k *pkg.LinuxKernelModule, entry []byte) error { if len(entry) == 0 { return nil } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/nix/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/nix/cataloger.go index 5d920f23..562ca732 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/nix/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/nix/cataloger.go @@ -1,3 +1,6 @@ +/* +Package nix provides a concrete Cataloger implementation for packages within the Nix packaging ecosystem. +*/ package nix import ( @@ -11,10 +14,7 @@ import ( "github.com/anchore/syft/syft/pkg" ) -const ( - catalogerName = "nix-store-cataloger" - nixStoreGlob = "**/nix/store/*" -) +const catalogerName = "nix-store-cataloger" // StoreCataloger finds package outputs installed in the Nix store location (/nix/store/*). type StoreCataloger struct{} @@ -32,7 +32,7 @@ func (c *StoreCataloger) Catalog(resolver file.Resolver) ([]pkg.Package, []artif var pkgs []pkg.Package var filesByPath = make(map[string]*file.LocationSet) for location := range resolver.AllLocations() { - matchesStorePath, err := doublestar.Match(nixStoreGlob, location.RealPath) + matchesStorePath, err := doublestar.Match("**/nix/store/*", location.RealPath) if err != nil { return nil, nil, fmt.Errorf("failed to match nix store path: %w", err) } @@ -81,7 +81,7 @@ func (c *StoreCataloger) Catalog(resolver file.Resolver) ([]pkg.Package, []artif } func appendFiles(p *pkg.Package, location ...file.Location) { - metadata, ok := p.Metadata.(pkg.NixStoreMetadata) + metadata, ok := p.Metadata.(pkg.NixStoreEntry) if !ok { log.WithFields("package", p.Name).Warn("nix package metadata missing") return diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/nix/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/nix/package.go index 090dfe13..56f8acfa 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/nix/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/nix/package.go @@ -8,14 +8,13 @@ import ( func newNixStorePackage(storePath nixStorePath, locations ...file.Location) pkg.Package { p := pkg.Package{ - Name: storePath.name, - Version: storePath.version, - FoundBy: catalogerName, - Locations: file.NewLocationSet(locations...), - Type: pkg.NixPkg, - PURL: packageURL(storePath), - MetadataType: pkg.NixStoreMetadataType, - Metadata: pkg.NixStoreMetadata{ + Name: storePath.name, + Version: storePath.version, + FoundBy: catalogerName, + Locations: file.NewLocationSet(locations...), + Type: pkg.NixPkg, + PURL: packageURL(storePath), + Metadata: pkg.NixStoreEntry{ OutputHash: storePath.outputHash, Output: storePath.output, }, diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/php/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/php/cataloger.go index 5beba45a..bbdf61a5 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/php/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/php/cataloger.go @@ -1,5 +1,5 @@ /* -Package php provides a concrete Cataloger implementation for PHP ecosystem files. +Package php provides a concrete Cataloger implementation relating to packages within the PHP language ecosystem. */ package php @@ -7,6 +7,9 @@ import ( "github.com/anchore/syft/syft/pkg/cataloger/generic" ) +// Note about the distinction between composer.lock and installed.json: composer.lock and installed.json have different +// semantic meanings. The lock file represents what should be installed, whereas the installed file represents what is installed. + // NewComposerInstalledCataloger returns a new cataloger for PHP installed.json files. func NewComposerInstalledCataloger() *generic.Cataloger { return generic.NewCataloger("php-composer-installed-cataloger"). diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/php/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/php/package.go index 7255d58d..7e2a58ef 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/php/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/php/package.go @@ -8,44 +8,59 @@ import ( "github.com/anchore/syft/syft/pkg" ) -func newComposerLockPackage(m parsedData, indexLocation file.Location) pkg.Package { +func newComposerLockPackage(pd parsedLockData, indexLocation file.Location) pkg.Package { p := pkg.Package{ - Name: m.Name, - Version: m.Version, - Locations: file.NewLocationSet(indexLocation), - Licenses: pkg.NewLicenseSet(pkg.NewLicensesFromLocation(indexLocation, m.License...)...), - PURL: packageURL(m), - Language: pkg.PHP, - Type: pkg.PhpComposerPkg, - MetadataType: pkg.PhpComposerJSONMetadataType, - Metadata: m.PhpComposerJSONMetadata, + Name: pd.Name, + Version: pd.Version, + Locations: file.NewLocationSet(indexLocation.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), + Licenses: pkg.NewLicenseSet(pkg.NewLicensesFromLocation(indexLocation, pd.License...)...), + PURL: packageURL(pd.Name, pd.Version), + Language: pkg.PHP, + Type: pkg.PhpComposerPkg, + Metadata: pd.PhpComposerLockEntry, } p.SetID() return p } -func packageURL(m parsedData) string { - var name, vendor string - fields := strings.Split(m.Name, "/") +func newComposerInstalledPackage(pd parsedInstalledData, indexLocation file.Location) pkg.Package { + p := pkg.Package{ + Name: pd.Name, + Version: pd.Version, + Locations: file.NewLocationSet(indexLocation.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), + Licenses: pkg.NewLicenseSet(pkg.NewLicensesFromLocation(indexLocation, pd.License...)...), + PURL: packageURL(pd.Name, pd.Version), + Language: pkg.PHP, + Type: pkg.PhpComposerPkg, + Metadata: pd.PhpComposerInstalledEntry, + } + + p.SetID() + return p +} + +func packageURL(name, version string) string { + var pkgName, vendor string + fields := strings.Split(name, "/") switch len(fields) { case 0: return "" case 1: - name = m.Name + pkgName = name case 2: vendor = fields[0] - name = fields[1] + pkgName = fields[1] default: vendor = fields[0] - name = strings.Join(fields[1:], "-") + pkgName = strings.Join(fields[1:], "-") } pURL := packageurl.NewPackageURL( packageurl.TypeComposer, vendor, - name, - m.Version, + pkgName, + version, nil, "") return pURL.ToString() diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/php/parse_composer_lock.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/php/parse_composer_lock.go index 836befe1..52347fc6 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/php/parse_composer_lock.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/php/parse_composer_lock.go @@ -14,14 +14,14 @@ import ( var _ generic.Parser = parseComposerLock -type parsedData struct { +type parsedLockData struct { License []string `json:"license"` - pkg.PhpComposerJSONMetadata + pkg.PhpComposerLockEntry } type composerLock struct { - Packages []parsedData `json:"packages"` - PackageDev []parsedData `json:"packages-dev"` + Packages []parsedLockData `json:"packages"` + PackageDev []parsedLockData `json:"packages-dev"` // TODO: these are not currently included as packages in the SBOM... should they be? } // parseComposerLock is a parser function for Composer.lock contents, returning "Default" php packages discovered. @@ -36,20 +36,15 @@ func parseComposerLock(_ file.Resolver, _ *generic.Environment, reader file.Loca } else if err != nil { return nil, nil, fmt.Errorf("failed to parse composer.lock file: %w", err) } - for _, m := range lock.Packages { + for _, pd := range lock.Packages { pkgs = append( pkgs, newComposerLockPackage( - m, - reader.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation), + pd, + reader.Location, ), ) } - - // TODO: did we omit this on purpose? - // for _, m := range lock.PackageDev { - // pkgs = append(pkgs, newComposerLockPackage(m, reader.Location)) - //} } return pkgs, nil, nil diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/php/parse_installed_json.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/php/parse_installed_json.go index 060e0190..e15490f0 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/php/parse_installed_json.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/php/parse_installed_json.go @@ -16,19 +16,24 @@ var _ generic.Parser = parseComposerLock // Note: composer version 2 introduced a new structure for the installed.json file, so we support both type installedJSONComposerV2 struct { - Packages []parsedData `json:"packages"` + Packages []parsedInstalledData `json:"packages"` +} + +type parsedInstalledData struct { + License []string `json:"license"` + pkg.PhpComposerInstalledEntry } func (w *installedJSONComposerV2) UnmarshalJSON(data []byte) error { type compv2 struct { - Packages []parsedData `json:"packages"` + Packages []parsedInstalledData `json:"packages"` } compv2er := new(compv2) err := json.Unmarshal(data, &compv2er) if err != nil { // If we had an err or, we may be dealing with a composer v.1 installed.json // which should be all arrays - var packages []parsedData + var packages []parsedInstalledData err := json.Unmarshal(data, &packages) if err != nil { return err @@ -52,11 +57,12 @@ func parseInstalledJSON(_ file.Resolver, _ *generic.Environment, reader file.Loc } else if err != nil { return nil, nil, fmt.Errorf("failed to parse installed.json file: %w", err) } - for _, pkgMeta := range lock.Packages { + for _, pd := range lock.Packages { pkgs = append( pkgs, - newComposerLockPackage(pkgMeta, - reader.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation), + newComposerInstalledPackage( + pd, + reader.Location, ), ) } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/portage/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/portage/cataloger.go deleted file mode 100644 index b6be1a3c..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/portage/cataloger.go +++ /dev/null @@ -1,13 +0,0 @@ -/* -Package portage provides a concrete Cataloger implementation for Gentoo Portage. -*/ -package portage - -import ( - "github.com/anchore/syft/syft/pkg/cataloger/generic" -) - -func NewPortageCataloger() *generic.Cataloger { - return generic.NewCataloger("portage-cataloger"). - WithParserByGlobs(parsePortageContents, "**/var/db/pkg/*/*/CONTENTS") -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/cataloger.go index af5f0bd3..82dfacc5 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/cataloger.go @@ -1,3 +1,6 @@ +/* +Package python provides a concrete Cataloger implementation relating to packages within the Python language ecosystem. +*/ package python import ( @@ -16,19 +19,19 @@ func DefaultCatalogerConfig() CatalogerConfig { } } -// NewPythonIndexCataloger returns a new cataloger for python packages referenced from poetry lock files, requirements.txt files, and setup.py files. -func NewPythonIndexCataloger(cfg CatalogerConfig) *generic.Cataloger { +// NewPackageCataloger returns a new cataloger for python packages referenced from poetry lock files, requirements.txt files, and setup.py files. +func NewPackageCataloger(cfg CatalogerConfig) *generic.Cataloger { rqp := newRequirementsParser(cfg) - return generic.NewCataloger("python-index-cataloger"). + return generic.NewCataloger("python-package-cataloger"). WithParserByGlobs(rqp.parseRequirementsTxt, "**/*requirements*.txt"). WithParserByGlobs(parsePoetryLock, "**/poetry.lock"). WithParserByGlobs(parsePipfileLock, "**/Pipfile.lock"). WithParserByGlobs(parseSetup, "**/setup.py") } -// NewPythonPackageCataloger returns a new cataloger for python packages within egg or wheel installation directories. -func NewPythonPackageCataloger() *generic.Cataloger { - return generic.NewCataloger("python-package-cataloger"). +// NewInstalledPackageCataloger returns a new cataloger for python packages within egg or wheel installation directories. +func NewInstalledPackageCataloger() *generic.Cataloger { + return generic.NewCataloger("python-installed-package-cataloger"). WithParserByGlobs( parseWheelOrEgg, eggInfoGlob, diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/package.go index e20f8786..59fd9f6b 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/package.go @@ -23,16 +23,15 @@ func newPackageForIndex(name, version string, locations ...file.Location) pkg.Pa return p } -func newPackageForIndexWithMetadata(name, version string, metadata pkg.PythonPipfileLockMetadata, locations ...file.Location) pkg.Package { +func newPackageForIndexWithMetadata(name, version string, metadata pkg.PythonPipfileLockEntry, locations ...file.Location) pkg.Package { p := pkg.Package{ - Name: name, - Version: version, - Locations: file.NewLocationSet(locations...), - PURL: packageURL(name, version, nil), - Language: pkg.Python, - Type: pkg.PythonPkg, - MetadataType: pkg.PythonPipfileLockMetadataType, - Metadata: metadata, + Name: name, + Version: version, + Locations: file.NewLocationSet(locations...), + PURL: packageURL(name, version, nil), + Language: pkg.Python, + Type: pkg.PythonPkg, + Metadata: metadata, } p.SetID() @@ -40,16 +39,15 @@ func newPackageForIndexWithMetadata(name, version string, metadata pkg.PythonPip return p } -func newPackageForRequirementsWithMetadata(name, version string, metadata pkg.PythonRequirementsMetadata, locations ...file.Location) pkg.Package { +func newPackageForRequirementsWithMetadata(name, version string, metadata pkg.PythonRequirementsEntry, locations ...file.Location) pkg.Package { p := pkg.Package{ - Name: name, - Version: version, - Locations: file.NewLocationSet(locations...), - PURL: packageURL(name, version, nil), - Language: pkg.Python, - Type: pkg.PythonPkg, - MetadataType: pkg.PythonRequirementsMetadataType, - Metadata: metadata, + Name: name, + Version: version, + Locations: file.NewLocationSet(locations...), + PURL: packageURL(name, version, nil), + Language: pkg.Python, + Type: pkg.PythonPkg, + Metadata: metadata, } p.SetID() @@ -59,15 +57,14 @@ func newPackageForRequirementsWithMetadata(name, version string, metadata pkg.Py func newPackageForPackage(m parsedData, sources ...file.Location) pkg.Package { p := pkg.Package{ - Name: m.Name, - Version: m.Version, - PURL: packageURL(m.Name, m.Version, &m.PythonPackageMetadata), - Locations: file.NewLocationSet(sources...), - Licenses: pkg.NewLicenseSet(pkg.NewLicensesFromLocation(m.LicenseLocation, m.Licenses)...), - Language: pkg.Python, - Type: pkg.PythonPkg, - MetadataType: pkg.PythonPackageMetadataType, - Metadata: m.PythonPackageMetadata, + Name: m.Name, + Version: m.Version, + PURL: packageURL(m.Name, m.Version, &m.PythonPackage), + Locations: file.NewLocationSet(sources...), + Licenses: pkg.NewLicenseSet(pkg.NewLicensesFromLocation(m.LicenseLocation, m.Licenses)...), + Language: pkg.Python, + Type: pkg.PythonPkg, + Metadata: m.PythonPackage, } p.SetID() @@ -75,7 +72,7 @@ func newPackageForPackage(m parsedData, sources ...file.Location) pkg.Package { return p } -func packageURL(name, version string, m *pkg.PythonPackageMetadata) string { +func packageURL(name, version string, m *pkg.PythonPackage) string { // generate a purl from the package data pURL := packageurl.NewPackageURL( packageurl.TypePyPi, @@ -88,7 +85,7 @@ func packageURL(name, version string, m *pkg.PythonPackageMetadata) string { return pURL.ToString() } -func purlQualifiersForPackage(m *pkg.PythonPackageMetadata) packageurl.Qualifiers { +func purlQualifiersForPackage(m *pkg.PythonPackage) packageurl.Qualifiers { q := packageurl.Qualifiers{} if m == nil { return q diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_pipfile_lock.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_pipfile_lock.go index 77c8cd4f..8a81f70f 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_pipfile_lock.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_pipfile_lock.go @@ -65,7 +65,7 @@ func parsePipfileLock(_ file.Resolver, _ *generic.Environment, reader file.Locat index = "https://pypi.org/simple" } version := strings.TrimPrefix(pkgMeta.Version, "==") - pkgs = append(pkgs, newPackageForIndexWithMetadata(name, version, pkg.PythonPipfileLockMetadata{Index: index, Hashes: pkgMeta.Hashes}, reader.Location)) + pkgs = append(pkgs, newPackageForIndexWithMetadata(name, version, pkg.PythonPipfileLockEntry{Index: index, Hashes: pkgMeta.Hashes}, reader.Location)) } } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_requirements.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_requirements.go index ac310b10..3e2c0a20 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_requirements.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_requirements.go @@ -141,7 +141,7 @@ func (rp requirementsParser) parseRequirementsTxt(_ file.Resolver, _ *generic.En newPackageForRequirementsWithMetadata( name, version, - pkg.PythonRequirementsMetadata{ + pkg.PythonRequirementsEntry{ Name: name, Extras: parseExtras(req.Name), VersionConstraint: req.VersionConstraint, diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_wheel_egg.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_wheel_egg.go index f3fc20ea..e98fe9de 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_wheel_egg.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_wheel_egg.go @@ -15,7 +15,8 @@ import ( "github.com/anchore/syft/syft/pkg/cataloger/generic" ) -// parseWheelOrEgg takes the primary metadata file reference and returns the python package it represents. +// parseWheelOrEgg takes the primary metadata file reference and returns the python package it represents. Contained +// fields are governed by the PyPA core metadata specification (https://packaging.python.org/en/latest/specifications/core-metadata/). func parseWheelOrEgg(resolver file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { pd, sources, err := assembleEggOrWheelMetadata(resolver, reader.Location) if err != nil { @@ -111,7 +112,7 @@ func fetchTopLevelPackages(resolver file.Resolver, metadataLocation file.Locatio if err != nil { return nil, nil, err } - defer internal.CloseAndLogError(topLevelContents, topLevelLocation.VirtualPath) + defer internal.CloseAndLogError(topLevelContents, topLevelLocation.AccessPath) scanner := bufio.NewScanner(topLevelContents) for scanner.Scan() { @@ -125,6 +126,27 @@ func fetchTopLevelPackages(resolver file.Resolver, metadataLocation file.Locatio return pkgs, sources, nil } +type directURLOrigin struct { + URL string `json:"url"` + VCSInfo vcsInfo `json:"vcs_info"` + ArchiveInfo archiveInfo `json:"archive_info"` + DirInfo dirInfo `json:"dir_info"` +} + +type dirInfo struct { + Editable bool `json:"editable"` +} + +type archiveInfo struct { + Hash string `json:"hash"` +} + +type vcsInfo struct { + CommitID string `json:"commit_id"` + VCS string `json:"vcs"` + RequestedRevision string `json:"requested_revision"` +} + func fetchDirectURLData(resolver file.Resolver, metadataLocation file.Location) (d *pkg.PythonDirectURLOriginInfo, sources []file.Location, err error) { parentDir := filepath.Dir(metadataLocation.RealPath) directURLPath := filepath.Join(parentDir, "direct_url.json") @@ -140,14 +162,14 @@ func fetchDirectURLData(resolver file.Resolver, metadataLocation file.Location) if err != nil { return nil, nil, err } - defer internal.CloseAndLogError(directURLContents, directURLLocation.VirtualPath) + defer internal.CloseAndLogError(directURLContents, directURLLocation.AccessPath) buffer, err := io.ReadAll(directURLContents) if err != nil { return nil, nil, err } - var directURLJson pkg.DirectURLOrigin + var directURLJson directURLOrigin if err := json.Unmarshal(buffer, &directURLJson); err != nil { return nil, nil, err } @@ -169,7 +191,7 @@ func assembleEggOrWheelMetadata(resolver file.Resolver, metadataLocation file.Lo if err != nil { return nil, nil, err } - defer internal.CloseAndLogError(metadataContents, metadataLocation.VirtualPath) + defer internal.CloseAndLogError(metadataContents, metadataLocation.AccessPath) pd, err := parseWheelOrEggMetadata(metadataLocation.RealPath, metadataContents) if err != nil { diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_wheel_egg_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_wheel_egg_metadata.go index e8d2cafa..a5211999 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_wheel_egg_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_wheel_egg_metadata.go @@ -16,9 +16,9 @@ import ( ) type parsedData struct { - Licenses string `mapstructure:"License"` - LicenseLocation file.Location - pkg.PythonPackageMetadata `mapstructure:",squash"` + Licenses string `mapstructure:"License"` + LicenseLocation file.Location + pkg.PythonPackage `mapstructure:",squash"` } // parseWheelOrEggMetadata takes a Python Egg or Wheel (which share the same format and values for our purposes), diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/r/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/r/cataloger.go index 8cb4774a..9b3cb094 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/r/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/r/cataloger.go @@ -1,13 +1,14 @@ +/* +Package r provides a concrete Cataloger implementation relating to packages within the R language ecosystem. +*/ package r import ( "github.com/anchore/syft/syft/pkg/cataloger/generic" ) -const catalogerName = "r-package-cataloger" - // NewPackageCataloger returns a new R cataloger object based on detection of R package DESCRIPTION files. func NewPackageCataloger() *generic.Cataloger { - return generic.NewCataloger(catalogerName). + return generic.NewCataloger("r-package-cataloger"). WithParserByGlobs(parseDescriptionFile, "**/DESCRIPTION") } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/r/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/r/package.go index 9fc45d3e..e5ca3c48 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/r/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/r/package.go @@ -17,15 +17,14 @@ func newPackage(pd parseData, locations ...file.Location) pkg.Package { licenses := parseLicenseData(pd.License) result := pkg.Package{ - Name: pd.Package, - Version: pd.Version, - Locations: locationSet, - Licenses: pkg.NewLicenseSet(licenses...), - Language: pkg.R, - Type: pkg.Rpkg, - PURL: packageURL(pd), - MetadataType: pkg.RDescriptionFileMetadataType, - Metadata: pd.RDescriptionFileMetadata, + Name: pd.Package, + Version: pd.Version, + Locations: locationSet, + Licenses: pkg.NewLicenseSet(licenses...), + Language: pkg.R, + Type: pkg.Rpkg, + PURL: packageURL(pd), + Metadata: pd.RDescription, } result.SetID() diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/r/parse_description.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/r/parse_description.go index 182cd4bd..cca13506 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/r/parse_description.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/r/parse_description.go @@ -42,7 +42,7 @@ type parseData struct { Package string Version string License string - pkg.RDescriptionFileMetadata + pkg.RDescription } func parseDataFromDescriptionMap(values map[string]string) parseData { @@ -50,7 +50,7 @@ func parseDataFromDescriptionMap(values map[string]string) parseData { License: values["License"], Package: values["Package"], Version: values["Version"], - RDescriptionFileMetadata: pkg.RDescriptionFileMetadata{ + RDescription: pkg.RDescription{ Title: values["Title"], Description: cleanMultiLineValue(values["Description"]), Maintainer: values["Maintainer"], diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/redhat/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/redhat/cataloger.go new file mode 100644 index 00000000..6279ac54 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/redhat/cataloger.go @@ -0,0 +1,35 @@ +/* +Package redhat provides a concrete DBCataloger implementation relating to packages within the RedHat linux distribution. +*/ +package redhat + +import ( + "database/sql" + + "github.com/anchore/syft/internal/log" + "github.com/anchore/syft/syft/pkg" + "github.com/anchore/syft/syft/pkg/cataloger/generic" +) + +// NewDBCataloger returns a new RPM DB cataloger object. +func NewDBCataloger() *generic.Cataloger { + // check if a sqlite driver is available + if !isSqliteDriverAvailable() { + log.Warnf("sqlite driver is not available, newer RPM databases might not be cataloged") + } + + return generic.NewCataloger("rpm-db-cataloger"). + WithParserByGlobs(parseRpmDB, pkg.RpmDBGlob). + WithParserByGlobs(parseRpmManifest, pkg.RpmManifestGlob) +} + +// NewArchiveCataloger returns a new RPM file cataloger object. +func NewArchiveCataloger() *generic.Cataloger { + return generic.NewCataloger("rpm-archive-cataloger"). + WithParserByGlobs(parseRpmArchive, "**/*.rpm") +} + +func isSqliteDriverAvailable() bool { + _, err := sql.Open("sqlite", ":memory:") + return err == nil +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/redhat/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/redhat/package.go new file mode 100644 index 00000000..23954991 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/redhat/package.go @@ -0,0 +1,120 @@ +package redhat + +import ( + "fmt" + "strconv" + "strings" + + "github.com/anchore/packageurl-go" + "github.com/anchore/syft/syft/file" + "github.com/anchore/syft/syft/linux" + "github.com/anchore/syft/syft/pkg" +) + +func newDBPackage(dbOrRpmLocation file.Location, m pkg.RpmDBEntry, distro *linux.Release, licenses []string) pkg.Package { + p := pkg.Package{ + Name: m.Name, + Version: toELVersion(m.Epoch, m.Version, m.Release), + Licenses: pkg.NewLicenseSet(pkg.NewLicensesFromLocation(dbOrRpmLocation, licenses...)...), + PURL: packageURL(m.Name, m.Arch, m.Epoch, m.SourceRpm, m.Version, m.Release, distro), + Locations: file.NewLocationSet(dbOrRpmLocation.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), + Type: pkg.RpmPkg, + Metadata: m, + } + + p.SetID() + return p +} + +func newArchivePackage(archiveLocation file.Location, m pkg.RpmArchive, licenses []string) pkg.Package { + p := pkg.Package{ + Name: m.Name, + Version: toELVersion(m.Epoch, m.Version, m.Release), + Licenses: pkg.NewLicenseSet(pkg.NewLicensesFromLocation(archiveLocation, licenses...)...), + PURL: packageURL(m.Name, m.Arch, m.Epoch, m.SourceRpm, m.Version, m.Release, nil), + Locations: file.NewLocationSet(archiveLocation.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), + Type: pkg.RpmPkg, + Metadata: m, + } + + p.SetID() + return p +} + +// newMetadataFromManifestLine parses an entry in an RPM manifest file as used in Mariner distroless containers. +// Each line is the output from: +// - rpm --query --all --query-format "%{NAME}\t%{VERSION}-%{RELEASE}\t%{INSTALLTIME}\t%{BUILDTIME}\t%{VENDOR}\t%{EPOCH}\t%{SIZE}\t%{ARCH}\t%{EPOCHNUM}\t%{SOURCERPM}\n" +// - https://github.com/microsoft/CBL-Mariner/blob/3df18fac373aba13a54bd02466e64969574f13af/toolkit/docs/how_it_works/5_misc.md?plain=1#L150 +func newMetadataFromManifestLine(entry string) (*pkg.RpmDBEntry, error) { + parts := strings.Split(entry, "\t") + if len(parts) < 10 { + return nil, fmt.Errorf("unexpected number of fields in line: %s", entry) + } + + versionParts := strings.Split(parts[1], "-") + if len(versionParts) != 2 { + return nil, fmt.Errorf("unexpected version field: %s", parts[1]) + } + version := versionParts[0] + release := versionParts[1] + + converted, err := strconv.Atoi(parts[8]) + var epoch *int + if err != nil || parts[5] == "(none)" { + epoch = nil + } else { + epoch = &converted + } + + converted, err = strconv.Atoi(parts[6]) + var size int + if err == nil { + size = converted + } + return &pkg.RpmDBEntry{ + Name: parts[0], + Version: version, + Epoch: epoch, + Arch: parts[7], + Release: release, + SourceRpm: parts[9], + Vendor: parts[4], + Size: size, + }, nil +} + +// packageURL returns the PURL for the specific RHEL package (see https://github.com/package-url/purl-spec) +func packageURL(name, arch string, epoch *int, srpm string, version, release string, distro *linux.Release) string { + var namespace string + if distro != nil { + namespace = distro.ID + } + + qualifiers := map[string]string{} + + if arch != "" { + qualifiers[pkg.PURLQualifierArch] = arch + } + + if epoch != nil { + qualifiers[pkg.PURLQualifierEpoch] = strconv.Itoa(*epoch) + } + + if srpm != "" { + qualifiers[pkg.PURLQualifierUpstream] = srpm + } + + return packageurl.NewPackageURL( + packageurl.TypeRPM, + namespace, + name, + // for purl the epoch is a qualifier, not part of the version + // see https://github.com/package-url/purl-spec/blob/master/PURL-TYPES.rst under the RPM section + fmt.Sprintf("%s-%s", version, release), + pkg.PURLQualifiers( + qualifiers, + distro, + ), + "", + ).ToString() +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/parse_rpm.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/redhat/parse_rpm_archive.go similarity index 69% rename from vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/parse_rpm.go rename to vendor/github.com/anchore/syft/syft/pkg/cataloger/redhat/parse_rpm_archive.go index 06c5f614..73e3ff4b 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/parse_rpm.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/redhat/parse_rpm_archive.go @@ -1,4 +1,4 @@ -package rpm +package redhat import ( "fmt" @@ -13,8 +13,8 @@ import ( "github.com/anchore/syft/syft/pkg/cataloger/generic" ) -// parseRpm parses a single RPM -func parseRpm(_ file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { +// parseRpmArchive parses a single RPM +func parseRpmArchive(_ file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { rpm, err := rpmutils.ReadRpm(reader) if err != nil { return nil, nil, fmt.Errorf("RPM file found but unable to read: %s (%w)", reader.Location.RealPath, err) @@ -32,22 +32,19 @@ func parseRpm(_ file.Resolver, _ *generic.Environment, reader file.LocationReadC size, _ := rpm.Header.InstalledSize() files, _ := rpm.Header.GetFiles() - pd := parsedData{ - Licenses: pkg.NewLicensesFromLocation(reader.Location, licenses...), - RpmMetadata: pkg.RpmMetadata{ - Name: nevra.Name, - Version: nevra.Version, - Epoch: parseEpoch(nevra.Epoch), - Arch: nevra.Arch, - Release: nevra.Release, - SourceRpm: sourceRpm, - Vendor: vendor, - Size: int(size), - Files: mapFiles(files, digestAlgorithm), - }, + metadata := pkg.RpmArchive{ + Name: nevra.Name, + Version: nevra.Version, + Epoch: parseEpoch(nevra.Epoch), + Arch: nevra.Arch, + Release: nevra.Release, + SourceRpm: sourceRpm, + Vendor: vendor, + Size: int(size), + Files: mapFiles(files, digestAlgorithm), } - return []pkg.Package{newPackage(reader.Location, pd, nil)}, nil, nil + return []pkg.Package{newArchivePackage(reader.Location, metadata, licenses)}, nil, nil } func getDigestAlgorithm(header *rpmutils.RpmHeader) string { @@ -63,8 +60,8 @@ func getDigestAlgorithm(header *rpmutils.RpmHeader) string { return "" } -func mapFiles(files []rpmutils.FileInfo, digestAlgorithm string) []pkg.RpmdbFileRecord { - var out []pkg.RpmdbFileRecord +func mapFiles(files []rpmutils.FileInfo, digestAlgorithm string) []pkg.RpmFileRecord { + var out []pkg.RpmFileRecord for _, f := range files { digest := file.Digest{} if f.Digest() != "" { @@ -73,9 +70,9 @@ func mapFiles(files []rpmutils.FileInfo, digestAlgorithm string) []pkg.RpmdbFile Value: f.Digest(), } } - out = append(out, pkg.RpmdbFileRecord{ + out = append(out, pkg.RpmFileRecord{ Path: f.Name(), - Mode: pkg.RpmdbFileMode(f.Mode()), + Mode: pkg.RpmFileMode(f.Mode()), Size: int(f.Size()), Digest: digest, UserName: f.UserName(), diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/parse_rpm_db.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/redhat/parse_rpm_db.go similarity index 76% rename from vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/parse_rpm_db.go rename to vendor/github.com/anchore/syft/syft/pkg/cataloger/redhat/parse_rpm_db.go index 1dee8c0f..672b1c75 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/parse_rpm_db.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/redhat/parse_rpm_db.go @@ -1,4 +1,4 @@ -package rpm +package redhat import ( "fmt" @@ -56,10 +56,24 @@ func parseRpmDB(resolver file.Resolver, env *generic.Environment, reader file.Lo continue } - p := newPackage( + metadata := pkg.RpmDBEntry{ + Name: entry.Name, + Version: entry.Version, + Epoch: entry.Epoch, + Arch: entry.Arch, + Release: entry.Release, + SourceRpm: entry.SourceRpm, + Vendor: entry.Vendor, + Size: entry.Size, + ModularityLabel: entry.Modularitylabel, + Files: extractRpmFileRecords(resolver, *entry), + } + + p := newDBPackage( reader.Location, - newParsedDataFromEntry(reader.Location, *entry, extractRpmdbFileRecords(resolver, *entry)), + metadata, distro, + []string{entry.License}, ) if !pkg.IsValid(&p) { @@ -81,15 +95,15 @@ func parseRpmDB(resolver file.Resolver, env *generic.Environment, reader file.Lo // version string, containing epoch (optional), version, and release information. Epoch is an optional field and can be // assumed to be 0 when not provided for comparison purposes, however, if the underlying RPM DB entry does not have // an epoch specified it would be slightly disingenuous to display a value of 0. -func toELVersion(metadata pkg.RpmMetadata) string { - if metadata.Epoch != nil { - return fmt.Sprintf("%d:%s-%s", *metadata.Epoch, metadata.Version, metadata.Release) +func toELVersion(epoch *int, version, release string) string { + if epoch != nil { + return fmt.Sprintf("%d:%s-%s", *epoch, version, release) } - return fmt.Sprintf("%s-%s", metadata.Version, metadata.Release) + return fmt.Sprintf("%s-%s", version, release) } -func extractRpmdbFileRecords(resolver file.PathResolver, entry rpmdb.PackageInfo) []pkg.RpmdbFileRecord { - var records = make([]pkg.RpmdbFileRecord, 0) +func extractRpmFileRecords(resolver file.PathResolver, entry rpmdb.PackageInfo) []pkg.RpmFileRecord { + var records = make([]pkg.RpmFileRecord, 0) files, err := entry.InstalledFiles() if err != nil { @@ -100,9 +114,9 @@ func extractRpmdbFileRecords(resolver file.PathResolver, entry rpmdb.PackageInfo for _, record := range files { // only persist RPMDB file records which exist in the image/directory, otherwise ignore them if resolver.HasPath(record.Path) { - records = append(records, pkg.RpmdbFileRecord{ + records = append(records, pkg.RpmFileRecord{ Path: record.Path, - Mode: pkg.RpmdbFileMode(record.Mode), + Mode: pkg.RpmFileMode(record.Mode), Size: int(record.Size), Digest: file.Digest{ Value: record.Digest, diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/redhat/parse_rpm_manifest.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/redhat/parse_rpm_manifest.go new file mode 100644 index 00000000..b3b06bdc --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/redhat/parse_rpm_manifest.go @@ -0,0 +1,56 @@ +package redhat + +import ( + "bufio" + "errors" + "io" + "strings" + + "github.com/anchore/syft/internal/log" + "github.com/anchore/syft/syft/artifact" + "github.com/anchore/syft/syft/file" + "github.com/anchore/syft/syft/pkg" + "github.com/anchore/syft/syft/pkg/cataloger/generic" +) + +// Parses an RPM manifest file, as used in Mariner distroless containers, and returns the Packages listed +func parseRpmManifest(_ file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { + r := bufio.NewReader(reader) + allPkgs := make([]pkg.Package, 0) + + for { + line, err := r.ReadString('\n') + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return nil, nil, err + } + + if line == "" { + continue + } + + metadata, err := newMetadataFromManifestLine(strings.TrimSuffix(line, "\n")) + if err != nil { + log.Warnf("unable to parse RPM manifest entry: %+v", err) + continue + } + + if metadata == nil { + log.Warn("unable to parse RPM manifest entry: no metadata found") + continue + } + + p := newDBPackage(reader.Location, *metadata, nil, nil) + + if !pkg.IsValid(&p) { + continue + } + + p.SetID() + allPkgs = append(allPkgs, p) + } + + return allPkgs, nil, nil +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/cataloger.go deleted file mode 100644 index 75a0c492..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/cataloger.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Package rpm provides a concrete DBCataloger implementation for RPM "Package" DB files and a FileCataloger for RPM files. -*/ -package rpm - -import ( - "database/sql" - - "github.com/anchore/syft/internal/log" - "github.com/anchore/syft/syft/pkg" - "github.com/anchore/syft/syft/pkg/cataloger/generic" -) - -const ( - dbCatalogerName = "rpm-db-cataloger" - fileCatalogerName = "rpm-file-cataloger" -) - -// NewRpmDBCataloger returns a new RPM DB cataloger object. -func NewRpmDBCataloger() *generic.Cataloger { - // check if a sqlite driver is available - if !isSqliteDriverAvailable() { - log.Warnf("sqlite driver is not available, newer RPM databases might not be cataloged") - } - - return generic.NewCataloger(dbCatalogerName). - WithParserByGlobs(parseRpmDB, pkg.RpmDBGlob). - WithParserByGlobs(parseRpmManifest, pkg.RpmManifestGlob) -} - -// NewFileCataloger returns a new RPM file cataloger object. -func NewFileCataloger() *generic.Cataloger { - return generic.NewCataloger(fileCatalogerName). - WithParserByGlobs(parseRpm, "**/*.rpm") -} - -func isSqliteDriverAvailable() bool { - _, err := sql.Open("sqlite", ":memory:") - return err == nil -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/package.go deleted file mode 100644 index 136af9f5..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/package.go +++ /dev/null @@ -1,127 +0,0 @@ -package rpm - -import ( - "fmt" - "strconv" - "strings" - - rpmdb "github.com/knqyf263/go-rpmdb/pkg" - - "github.com/anchore/packageurl-go" - "github.com/anchore/syft/syft/file" - "github.com/anchore/syft/syft/linux" - "github.com/anchore/syft/syft/pkg" -) - -func newPackage(dbOrRpmLocation file.Location, pd parsedData, distro *linux.Release) pkg.Package { - p := pkg.Package{ - Name: pd.Name, - Version: toELVersion(pd.RpmMetadata), - Licenses: pkg.NewLicenseSet(pd.Licenses...), - PURL: packageURL(pd.RpmMetadata, distro), - Locations: file.NewLocationSet(dbOrRpmLocation.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), - Type: pkg.RpmPkg, - MetadataType: pkg.RpmMetadataType, - Metadata: pd.RpmMetadata, - } - - p.SetID() - return p -} - -type parsedData struct { - Licenses []pkg.License - pkg.RpmMetadata -} - -func newParsedDataFromEntry(licenseLocation file.Location, entry rpmdb.PackageInfo, files []pkg.RpmdbFileRecord) parsedData { - return parsedData{ - Licenses: pkg.NewLicensesFromLocation(licenseLocation, entry.License), - RpmMetadata: pkg.RpmMetadata{ - Name: entry.Name, - Version: entry.Version, - Epoch: entry.Epoch, - Arch: entry.Arch, - Release: entry.Release, - SourceRpm: entry.SourceRpm, - Vendor: entry.Vendor, - Size: entry.Size, - ModularityLabel: entry.Modularitylabel, - Files: files, - }, - } -} - -func newMetadataFromManifestLine(entry string) (*parsedData, error) { - parts := strings.Split(entry, "\t") - if len(parts) < 10 { - return nil, fmt.Errorf("unexpected number of fields in line: %s", entry) - } - - versionParts := strings.Split(parts[1], "-") - if len(versionParts) != 2 { - return nil, fmt.Errorf("unexpected version field: %s", parts[1]) - } - version := versionParts[0] - release := versionParts[1] - - converted, err := strconv.Atoi(parts[8]) - var epoch *int - if err != nil || parts[5] == "(none)" { - epoch = nil - } else { - epoch = &converted - } - - converted, err = strconv.Atoi(parts[6]) - var size int - if err == nil { - size = converted - } - return &parsedData{ - RpmMetadata: pkg.RpmMetadata{ - Name: parts[0], - Version: version, - Epoch: epoch, - Arch: parts[7], - Release: release, - SourceRpm: parts[9], - Vendor: parts[4], - Size: size, - }, - }, nil -} - -// packageURL returns the PURL for the specific RHEL package (see https://github.com/package-url/purl-spec) -func packageURL(m pkg.RpmMetadata, distro *linux.Release) string { - var namespace string - if distro != nil { - namespace = distro.ID - } - - qualifiers := map[string]string{ - pkg.PURLQualifierArch: m.Arch, - } - - if m.Epoch != nil { - qualifiers[pkg.PURLQualifierEpoch] = strconv.Itoa(*m.Epoch) - } - - if m.SourceRpm != "" { - qualifiers[pkg.PURLQualifierUpstream] = m.SourceRpm - } - - return packageurl.NewPackageURL( - packageurl.TypeRPM, - namespace, - m.Name, - // for purl the epoch is a qualifier, not part of the version - // see https://github.com/package-url/purl-spec/blob/master/PURL-TYPES.rst under the RPM section - fmt.Sprintf("%s-%s", m.Version, m.Release), - pkg.PURLQualifiers( - qualifiers, - distro, - ), - "", - ).ToString() -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/parse_rpm_manifest.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/parse_rpm_manifest.go deleted file mode 100644 index c8110d6d..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/parse_rpm_manifest.go +++ /dev/null @@ -1,68 +0,0 @@ -package rpm - -import ( - "bufio" - "errors" - "io" - "strings" - - "github.com/anchore/syft/internal/log" - "github.com/anchore/syft/syft/artifact" - "github.com/anchore/syft/syft/file" - "github.com/anchore/syft/syft/pkg" - "github.com/anchore/syft/syft/pkg/cataloger/generic" -) - -// Parses an RPM manifest file, as used in Mariner distroless containers, and returns the Packages listed -func parseRpmManifest(_ file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { - r := bufio.NewReader(reader) - allPkgs := make([]pkg.Package, 0) - - for { - line, err := r.ReadString('\n') - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return nil, nil, err - } - - if line == "" { - continue - } - - p, err := parseRpmManifestEntry(strings.TrimSuffix(line, "\n"), reader.Location) - if err != nil { - log.Warnf("unable to parse RPM manifest entry: %w", err) - continue - } - - if !pkg.IsValid(p) { - continue - } - - p.SetID() - allPkgs = append(allPkgs, *p) - } - - return allPkgs, nil, nil -} - -// Parses an entry in an RPM manifest file as used in Mariner distroless containers -// Each line is the output of : -// rpm --query --all --query-format "%{NAME}\t%{VERSION}-%{RELEASE}\t%{INSTALLTIME}\t%{BUILDTIME}\t%{VENDOR}\t%{EPOCH}\t%{SIZE}\t%{ARCH}\t%{EPOCHNUM}\t%{SOURCERPM}\n" -// https://github.com/microsoft/CBL-Mariner/blob/3df18fac373aba13a54bd02466e64969574f13af/toolkit/docs/how_it_works/5_misc.md?plain=1#L150 -func parseRpmManifestEntry(entry string, location file.Location) (*pkg.Package, error) { - metadata, err := newMetadataFromManifestLine(entry) - if err != nil { - return nil, err - } - - if metadata == nil { - return nil, nil - } - - p := newPackage(location, *metadata, nil) - - return &p, nil -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/ruby/catalogers.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/ruby/catalogers.go index e3e173a2..0c8faf8f 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/ruby/catalogers.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/ruby/catalogers.go @@ -1,5 +1,5 @@ /* -Package ruby bundler provides a concrete Cataloger implementation for Ruby Gemfile.lock bundler files. +Package ruby provides a concrete Cataloger implementation relating to packages within the Ruby language ecosystem. */ package ruby @@ -13,8 +13,14 @@ func NewGemFileLockCataloger() *generic.Cataloger { WithParserByGlobs(parseGemFileLockEntries, "**/Gemfile.lock") } -// NewGemSpecCataloger returns a new Bundler cataloger object tailored for detecting installations of gems (e.g. Gemspec). +// NewInstalledGemSpecCataloger returns a new Bundler cataloger object tailored for detecting installations of gems (e.g. Gemspec). +func NewInstalledGemSpecCataloger() *generic.Cataloger { + return generic.NewCataloger("ruby-installed-gemspec-cataloger"). + WithParserByGlobs(parseGemSpecEntries, "**/specifications/**/*.gemspec") +} + +// NewGemSpecCataloger looks for gems without the additional requirement of the gem being installed. func NewGemSpecCataloger() *generic.Cataloger { return generic.NewCataloger("ruby-gemspec-cataloger"). - WithParserByGlobs(parseGemSpecEntries, "**/specifications/**/*.gemspec") + WithParserByGlobs(parseGemSpecEntries, "**/*.gemspec") } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/ruby/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/ruby/package.go index 86075274..349fd594 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/ruby/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/ruby/package.go @@ -23,15 +23,14 @@ func newGemfileLockPackage(name, version string, locations ...file.Location) pkg func newGemspecPackage(m gemData, gemSpecLocation file.Location) pkg.Package { p := pkg.Package{ - Name: m.Name, - Version: m.Version, - Locations: file.NewLocationSet(gemSpecLocation.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), - Licenses: pkg.NewLicenseSet(pkg.NewLicensesFromLocation(gemSpecLocation, m.Licenses...)...), - PURL: packageURL(m.Name, m.Version), - Language: pkg.Ruby, - Type: pkg.GemPkg, - MetadataType: pkg.GemMetadataType, - Metadata: m.GemMetadata, + Name: m.Name, + Version: m.Version, + Locations: file.NewLocationSet(gemSpecLocation.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation)), + Licenses: pkg.NewLicenseSet(pkg.NewLicensesFromLocation(gemSpecLocation, m.Licenses...)...), + PURL: packageURL(m.Name, m.Version), + Language: pkg.Ruby, + Type: pkg.GemPkg, + Metadata: m.RubyGemspec, } p.SetID() diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/ruby/parse_gemspec.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/ruby/parse_gemspec.go index 97c2876b..59874df1 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/ruby/parse_gemspec.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/ruby/parse_gemspec.go @@ -22,7 +22,7 @@ type postProcessor func(string) []string type gemData struct { Licenses []string `mapstructure:"licenses" json:"licenses,omitempty"` - pkg.GemMetadata `mapstructure:",squash" json:",inline"` + pkg.RubyGemspec `mapstructure:",squash" json:",inline"` } // match example: Al\u003Ex ---> 003E @@ -64,6 +64,7 @@ func processList(s string) []string { return results } +// parseGemFileLockEntries parses the gemfile.lock file and returns the packages and relationships found. func parseGemSpecEntries(_ file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { var pkgs []pkg.Package var fields = make(map[string]interface{}) diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/rust/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/rust/cataloger.go index a5128b24..51cdeab8 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/rust/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/rust/cataloger.go @@ -1,5 +1,5 @@ /* -Package rust provides a concrete Cataloger implementation for Cargo.lock files. +Package rust provides a concrete Cataloger implementation relating to packages within the Rust language ecosystem. */ package rust diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/rust/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/rust/package.go index 8787c515..93d1eeae 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/rust/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/rust/package.go @@ -9,16 +9,15 @@ import ( ) // Pkg returns the standard `pkg.Package` representation of the package referenced within the Cargo.lock metadata. -func newPackageFromCargoMetadata(m pkg.CargoPackageMetadata, locations ...file.Location) pkg.Package { +func newPackageFromCargoMetadata(m pkg.RustCargoLockEntry, locations ...file.Location) pkg.Package { p := pkg.Package{ - Name: m.Name, - Version: m.Version, - Locations: file.NewLocationSet(locations...), - PURL: packageURL(m.Name, m.Version), - Language: pkg.Rust, - Type: pkg.RustPkg, - MetadataType: pkg.RustCargoPackageMetadataType, - Metadata: m, + Name: m.Name, + Version: m.Version, + Locations: file.NewLocationSet(locations...), + PURL: packageURL(m.Name, m.Version), + Language: pkg.Rust, + Type: pkg.RustPkg, + Metadata: m, } p.SetID() @@ -42,14 +41,13 @@ func newPackagesFromAudit(location file.Location, versionInfo rustaudit.VersionI func newPackageFromAudit(dep *rustaudit.Package, locations ...file.Location) pkg.Package { p := pkg.Package{ - Name: dep.Name, - Version: dep.Version, - PURL: packageURL(dep.Name, dep.Version), - Language: pkg.Rust, - Type: pkg.RustPkg, - Locations: file.NewLocationSet(locations...), - MetadataType: pkg.RustCargoPackageMetadataType, - Metadata: pkg.CargoPackageMetadata{ + Name: dep.Name, + Version: dep.Version, + PURL: packageURL(dep.Name, dep.Version), + Language: pkg.Rust, + Type: pkg.RustPkg, + Locations: file.NewLocationSet(locations...), + Metadata: pkg.RustBinaryAuditEntry{ Name: dep.Name, Version: dep.Version, Source: dep.Source, diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/rust/parse_cargo_lock.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/rust/parse_cargo_lock.go index cd001728..faab56f2 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/rust/parse_cargo_lock.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/rust/parse_cargo_lock.go @@ -14,7 +14,7 @@ import ( var _ generic.Parser = parseCargoLock type cargoLockFile struct { - Packages []pkg.CargoPackageMetadata `toml:"package"` + Packages []pkg.RustCargoLockEntry `toml:"package"` } // parseCargoLock is a parser function for Cargo.lock contents, returning all rust cargo crates discovered. diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/sbom/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/sbom/cataloger.go index a08c2c2a..08c60145 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/sbom/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/sbom/cataloger.go @@ -1,18 +1,25 @@ +/* +Package sbom provides a concrete Cataloger implementation for capturing packages embedded within SBOM files. +*/ package sbom import ( + "bytes" + "fmt" + "io" + "github.com/anchore/syft/internal/log" "github.com/anchore/syft/syft/artifact" "github.com/anchore/syft/syft/file" - "github.com/anchore/syft/syft/formats" + "github.com/anchore/syft/syft/format" "github.com/anchore/syft/syft/pkg" "github.com/anchore/syft/syft/pkg/cataloger/generic" ) const catalogerName = "sbom-cataloger" -// NewSBOMCataloger returns a new SBOM cataloger object loaded from saved SBOM JSON. -func NewSBOMCataloger() *generic.Cataloger { +// NewCataloger returns a new SBOM cataloger object loaded from saved SBOM JSON. +func NewCataloger() *generic.Cataloger { return generic.NewCataloger(catalogerName). WithParserByGlobs(parseSBOM, "**/*.syft.json", @@ -30,7 +37,11 @@ func NewSBOMCataloger() *generic.Cataloger { } func parseSBOM(_ file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { - s, _, err := formats.Decode(reader) + readSeeker, err := adaptToReadSeeker(reader) + if err != nil { + return nil, nil, fmt.Errorf("unable to read SBOM file %q: %w", reader.Location.RealPath, err) + } + s, _, _, err := format.Decode(readSeeker) if err != nil { return nil, nil, err } @@ -62,3 +73,17 @@ func parseSBOM(_ file.Resolver, _ *generic.Environment, reader file.LocationRead return pkgs, relationships, nil } + +func adaptToReadSeeker(reader io.Reader) (io.ReadSeeker, error) { + // with the stereoscope API and default file.Resolver implementation here in syft, odds are very high that + // the underlying reader is already a ReadSeeker, so we can just return it as-is. We still want to + if rs, ok := reader.(io.ReadSeeker); ok { + return rs, nil + } + + log.Debug("SBOM cataloger reader is not a ReadSeeker, reading entire SBOM into memory") + + var buff bytes.Buffer + _, err := io.Copy(&buff, reader) + return bytes.NewReader(buff.Bytes()), err +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/cataloger.go index a890b858..f928d3e0 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/cataloger.go @@ -1,5 +1,5 @@ /* -Package swift provides a concrete Cataloger implementation for Podfile.lock and Package.resolved files. +Package swift provides a concrete Cataloger implementation relating to packages within the swift language ecosystem. */ package swift @@ -8,7 +8,7 @@ import ( ) func NewSwiftPackageManagerCataloger() *generic.Cataloger { - return generic.NewCataloger("spm-cataloger"). + return generic.NewCataloger("swift-package-manager-cataloger"). WithParserByGlobs(parsePackageResolved, "**/Package.resolved", "**/.package.resolved") } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/package.go index c5f606cb..c4370f5d 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/package.go @@ -10,14 +10,13 @@ import ( func newSwiftPackageManagerPackage(name, version, sourceURL, revision string, locations ...file.Location) pkg.Package { p := pkg.Package{ - Name: name, - Version: version, - PURL: swiftPackageManagerPackageURL(name, version, sourceURL), - Locations: file.NewLocationSet(locations...), - Type: pkg.SwiftPkg, - Language: pkg.Swift, - MetadataType: pkg.SwiftPackageManagerMetadataType, - Metadata: pkg.SwiftPackageManagerMetadata{ + Name: name, + Version: version, + PURL: swiftPackageManagerPackageURL(name, version, sourceURL), + Locations: file.NewLocationSet(locations...), + Type: pkg.SwiftPkg, + Language: pkg.Swift, + Metadata: pkg.SwiftPackageManagerResolvedEntry{ Revision: revision, }, } @@ -29,14 +28,13 @@ func newSwiftPackageManagerPackage(name, version, sourceURL, revision string, lo func newCocoaPodsPackage(name, version, hash string, locations ...file.Location) pkg.Package { p := pkg.Package{ - Name: name, - Version: version, - PURL: cocoaPodsPackageURL(name, version), - Locations: file.NewLocationSet(locations...), - Type: pkg.CocoapodsPkg, - Language: pkg.Swift, - MetadataType: pkg.CocoapodsMetadataType, - Metadata: pkg.CocoapodsMetadata{ + Name: name, + Version: version, + PURL: cocoaPodsPackageURL(name, version), + Locations: file.NewLocationSet(locations...), + Type: pkg.CocoapodsPkg, + Language: pkg.Swift, + Metadata: pkg.CocoaPodfileLockEntry{ Checksum: hash, }, } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/parse_package_resolved.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/parse_package_resolved.go index 2fb99b89..191496a4 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/parse_package_resolved.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/parse_package_resolved.go @@ -84,14 +84,14 @@ func parsePackageResolved(_ file.Resolver, _ *generic.Environment, reader file.L } var pkgs []pkg.Package - for _, packagePin := range pins { + for _, pkgPin := range pins { pkgs = append( pkgs, newSwiftPackageManagerPackage( - packagePin.Identity, - packagePin.Version, - packagePin.Location, - packagePin.Revision, + pkgPin.Identity, + pkgPin.Version, + pkgPin.Location, + pkgPin.Revision, reader.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation), ), ) diff --git a/vendor/github.com/anchore/syft/syft/pkg/cocoapods.go b/vendor/github.com/anchore/syft/syft/pkg/cocoapods.go new file mode 100644 index 00000000..58ce99c5 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/cocoapods.go @@ -0,0 +1,6 @@ +package pkg + +// CocoaPodfileLockEntry represents a single entry from the "Pods" section of a Podfile.lock file. +type CocoaPodfileLockEntry struct { + Checksum string `mapstructure:"checksum" json:"checksum"` +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cocoapods_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/cocoapods_metadata.go deleted file mode 100644 index 6fdc9026..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/cocoapods_metadata.go +++ /dev/null @@ -1,5 +0,0 @@ -package pkg - -type CocoapodsMetadata struct { - Checksum string `mapstructure:"checksum" json:"checksum"` -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/conan.go b/vendor/github.com/anchore/syft/syft/pkg/conan.go new file mode 100644 index 00000000..bf2b1584 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/conan.go @@ -0,0 +1,25 @@ +package pkg + +// ConanLockEntry represents a single "node" entry from a conan.lock file. +type ConanLockEntry struct { + Ref string `json:"ref"` + PackageID string `json:"package_id,omitempty"` + Prev string `json:"prev,omitempty"` + Requires []string `json:"requires,omitempty"` + BuildRequires []string `json:"build_requires,omitempty"` + PythonRequires []string `json:"py_requires,omitempty"` + Options map[string]string `json:"options,omitempty"` + Path string `json:"path,omitempty"` + Context string `json:"context,omitempty"` +} + +// ConanfileEntry represents a single "Requires" entry from a conanfile.txt. +type ConanfileEntry struct { + Ref string `mapstructure:"ref" json:"ref"` +} + +// ConaninfoEntry represents a single "full_requires" entry from a conaninfo.txt. +type ConaninfoEntry struct { + Ref string `json:"ref"` + PackageID string `json:"package_id,omitempty"` +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/conan_lock_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/conan_lock_metadata.go deleted file mode 100644 index 3c20b7ca..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/conan_lock_metadata.go +++ /dev/null @@ -1,50 +0,0 @@ -package pkg - -import ( - "strings" - - "github.com/anchore/packageurl-go" - "github.com/anchore/syft/syft/linux" -) - -type ConanLockMetadata struct { - Ref string `json:"ref"` - PackageID string `json:"package_id,omitempty"` - Prev string `json:"prev,omitempty"` - Requires []string `json:"requires,omitempty"` - BuildRequires []string `json:"build_requires,omitempty"` - PythonRequires []string `json:"py_requires,omitempty"` - Options map[string]string `json:"options,omitempty"` - Path string `json:"path,omitempty"` - Context string `json:"context,omitempty"` -} - -func (m ConanLockMetadata) PackageURL(_ *linux.Release) string { - var qualifiers packageurl.Qualifiers - - name, version := m.NameAndVersion() - - return packageurl.NewPackageURL( - packageurl.TypeConan, - "", - name, - version, - qualifiers, - "", - ).ToString() -} - -// NameAndVersion returns the name and version of the package. -// If ref is not in the format of "name/version@user/channel", then an empty string is returned for both. -func (m ConanLockMetadata) NameAndVersion() (name, version string) { - if len(m.Ref) < 1 { - return name, version - } - - splits := strings.Split(strings.Split(m.Ref, "@")[0], "/") - if len(splits) < 2 { - return name, version - } - - return splits[0], splits[1] -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/conan_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/conan_metadata.go deleted file mode 100644 index a138c279..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/conan_metadata.go +++ /dev/null @@ -1,5 +0,0 @@ -package pkg - -type ConanMetadata struct { - Ref string `mapstructure:"ref" json:"ref"` -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/dart_pub_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/dart.go similarity index 62% rename from vendor/github.com/anchore/syft/syft/pkg/dart_pub_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/dart.go index 38884b9e..49674126 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/dart_pub_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/dart.go @@ -1,6 +1,7 @@ package pkg -type DartPubMetadata struct { +// DartPubspecLockEntry is a struct that represents a single entry found in the "packages" section in a Dart pubspec.lock file. +type DartPubspecLockEntry struct { Name string `mapstructure:"name" json:"name"` Version string `mapstructure:"version" json:"version"` HostedURL string `mapstructure:"hosted_url" json:"hosted_url,omitempty"` diff --git a/vendor/github.com/anchore/syft/syft/pkg/dotnet.go b/vendor/github.com/anchore/syft/syft/pkg/dotnet.go new file mode 100644 index 00000000..4477ac01 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/dotnet.go @@ -0,0 +1,21 @@ +package pkg + +// DotnetDepsEntry is a struct that represents a single entry found in the "libraries" section in a .NET [*.]deps.json file. +type DotnetDepsEntry struct { + Name string `mapstructure:"name" json:"name"` + Version string `mapstructure:"version" json:"version"` + Path string `mapstructure:"path" json:"path"` + Sha512 string `mapstructure:"sha512" json:"sha512"` + HashPath string `mapstructure:"hashPath" json:"hashPath"` +} + +// DotnetPortableExecutableEntry is a struct that represents a single entry found within "VersionResources" section of a .NET Portable Executable binary file. +type DotnetPortableExecutableEntry struct { + AssemblyVersion string `json:"assemblyVersion"` + LegalCopyright string `json:"legalCopyright"` + Comments string `json:"comments,omitempty"` + InternalName string `json:"internalName,omitempty"` + CompanyName string `json:"companyName"` + ProductName string `json:"productName"` + ProductVersion string `json:"productVersion"` +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/dotnet_deps_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/dotnet_deps_metadata.go deleted file mode 100644 index 2b141341..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/dotnet_deps_metadata.go +++ /dev/null @@ -1,9 +0,0 @@ -package pkg - -type DotnetDepsMetadata struct { - Name string `mapstructure:"name" json:"name"` - Version string `mapstructure:"version" json:"version"` - Path string `mapstructure:"path" json:"path"` - Sha512 string `mapstructure:"sha512" json:"sha512"` - HashPath string `mapstructure:"hashPath" json:"hashPath"` -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/dotnet_portable_executable_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/dotnet_portable_executable_metadata.go deleted file mode 100644 index 7b42d133..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/dotnet_portable_executable_metadata.go +++ /dev/null @@ -1,11 +0,0 @@ -package pkg - -type DotnetPortableExecutableMetadata struct { - AssemblyVersion string `json:"assemblyVersion"` - LegalCopyright string `json:"legalCopyright"` - Comments string `json:"comments,omitempty"` - InternalName string `json:"internalName,omitempty"` - CompanyName string `json:"companyName"` - ProductName string `json:"productName"` - ProductVersion string `json:"productVersion"` -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/dpkg_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/dpkg.go similarity index 93% rename from vendor/github.com/anchore/syft/syft/pkg/dpkg_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/dpkg.go index 5b38be03..c2435b97 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/dpkg_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/dpkg.go @@ -10,17 +10,16 @@ import ( const DpkgDBGlob = "**/var/lib/dpkg/{status,status.d/**}" -var _ FileOwner = (*DpkgMetadata)(nil) +var _ FileOwner = (*DpkgDBEntry)(nil) -// DpkgMetadata represents all captured data for a Debian package DB entry; available fields are described +// DpkgDBEntry represents all captured data for a Debian package DB entry; available fields are described // at http://manpages.ubuntu.com/manpages/xenial/man1/dpkg-query.1.html in the --showformat section. // Additional information about how these fields are used can be found at // - https://www.debian.org/doc/debian-policy/ch-controlfields.html // - https://www.debian.org/doc/debian-policy/ch-relationships.html // - https://www.debian.org/doc/debian-policy/ch-binary.html#s-virtual-pkg // - https://www.debian.org/doc/debian-policy/ch-relationships.html#s-virtual - -type DpkgMetadata struct { +type DpkgDBEntry struct { Package string `json:"package"` Source string `json:"source" cyclonedx:"source"` Version string `json:"version"` @@ -66,7 +65,7 @@ type DpkgFileRecord struct { IsConfigFile bool `json:"isConfigFile"` } -func (m DpkgMetadata) OwnedFiles() (result []string) { +func (m DpkgDBEntry) OwnedFiles() (result []string) { s := strset.New() for _, f := range m.Files { if f.Path != "" { diff --git a/vendor/github.com/anchore/syft/syft/pkg/mix_lock_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/elixir.go similarity index 68% rename from vendor/github.com/anchore/syft/syft/pkg/mix_lock_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/elixir.go index 1677aae0..ac556cd8 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/mix_lock_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/elixir.go @@ -1,6 +1,7 @@ package pkg -type MixLockMetadata struct { +// ElixirMixLockEntry is a struct that represents a single entry in a mix.lock file +type ElixirMixLockEntry struct { Name string `mapstructure:"name" json:"name"` Version string `mapstructure:"version" json:"version"` PkgHash string `mapstructure:"pkgHash" json:"pkgHash"` diff --git a/vendor/github.com/anchore/syft/syft/pkg/rebar_lock_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/erlang.go similarity index 62% rename from vendor/github.com/anchore/syft/syft/pkg/rebar_lock_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/erlang.go index 581105cb..d74ff680 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/rebar_lock_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/erlang.go @@ -1,6 +1,7 @@ package pkg -type RebarLockMetadata struct { +// ErlangRebarLockEntry represents a single package entry from the "deps" section within an Erlang rebar.lock file. +type ErlangRebarLockEntry struct { Name string `mapstructure:"name" json:"name"` Version string `mapstructure:"version" json:"version"` PkgHash string `mapstructure:"pkgHash" json:"pkgHash"` diff --git a/vendor/github.com/anchore/syft/syft/pkg/file_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/file_metadata.go deleted file mode 100644 index d3d379de..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/file_metadata.go +++ /dev/null @@ -1,7 +0,0 @@ -package pkg - -type FileMetadata struct { - Classifier string `mapstructure:"Classifier" json:"classifier"` - RealPath string `mapstructure:"RealPath" json:"realPath"` - VirtualPath string `mapstructure:"VirtualPath" json:"virtualPath"` -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/golang_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/golang.go similarity index 75% rename from vendor/github.com/anchore/syft/syft/pkg/golang_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/golang.go index 791b7fcc..54e18dc9 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/golang_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/golang.go @@ -1,7 +1,7 @@ package pkg -// GolangBinMetadata represents all captured data for a Golang binary -type GolangBinMetadata struct { +// GolangBinaryBuildinfoEntry represents all captured data for a Golang binary +type GolangBinaryBuildinfoEntry struct { BuildSettings map[string]string `json:"goBuildSettings,omitempty" cyclonedx:"goBuildSettings"` GoCompiledVersion string `json:"goCompiledVersion" cyclonedx:"goCompiledVersion"` Architecture string `json:"architecture" cyclonedx:"architecture"` @@ -10,7 +10,7 @@ type GolangBinMetadata struct { GoCryptoSettings []string `json:"goCryptoSettings,omitempty" cyclonedx:"goCryptoSettings"` } -// GolangModMetadata represents all captured data for a Golang source scan with go.mod/go.sum -type GolangModMetadata struct { +// GolangModuleEntry represents all captured data for a Golang source scan with go.mod/go.sum +type GolangModuleEntry struct { H1Digest string `json:"h1Digest,omitempty" cyclonedx:"h1Digest"` } diff --git a/vendor/github.com/anchore/syft/syft/pkg/hackage.go b/vendor/github.com/anchore/syft/syft/pkg/hackage.go new file mode 100644 index 00000000..50e7d7eb --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/hackage.go @@ -0,0 +1,12 @@ +package pkg + +// HackageStackYamlLockEntry represents a single entry from the "packages" section of a stack.yaml.lock file. +type HackageStackYamlLockEntry struct { + PkgHash string `mapstructure:"pkgHash" json:"pkgHash,omitempty"` + SnapshotURL string `mapstructure:"snapshotURL" json:"snapshotURL,omitempty"` +} + +// HackageStackYamlEntry represents a single entry from the "extra-deps" section of a stack.yaml file. +type HackageStackYamlEntry struct { + PkgHash string `mapstructure:"pkgHash" json:"pkgHash,omitempty"` +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/hackage_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/hackage_metadata.go deleted file mode 100644 index 3d691273..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/hackage_metadata.go +++ /dev/null @@ -1,8 +0,0 @@ -package pkg - -type HackageMetadata struct { - Name string `mapstructure:"name" json:"name"` - Version string `mapstructure:"version" json:"version"` - PkgHash string `mapstructure:"pkgHash" json:"pkgHash,omitempty"` - SnapshotURL string `mapstructure:"snapshotURL" json:"snapshotURL,omitempty"` -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/java.go b/vendor/github.com/anchore/syft/syft/pkg/java.go new file mode 100644 index 00000000..33c46ee9 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/java.go @@ -0,0 +1,71 @@ +package pkg + +import ( + "strings" + + "github.com/anchore/syft/internal" + "github.com/anchore/syft/syft/file" +) + +var jenkinsPluginPomPropertiesGroupIDs = []string{ + "io.jenkins.plugins", + "org.jenkins.plugins", + "org.jenkins-ci.plugins", + "io.jenkins-ci.plugins", + "com.cloudbees.jenkins.plugins", +} + +// JavaArchive encapsulates all Java ecosystem metadata for a package as well as an (optional) parent relationship. +type JavaArchive struct { + VirtualPath string `json:"virtualPath" cyclonedx:"virtualPath"` // we need to include the virtual path in cyclonedx documents to prevent deduplication of jars within jars + Manifest *JavaManifest `mapstructure:"Manifest" json:"manifest,omitempty"` + PomProperties *JavaPomProperties `mapstructure:"PomProperties" json:"pomProperties,omitempty" cyclonedx:"-"` + PomProject *JavaPomProject `mapstructure:"PomProject" json:"pomProject,omitempty"` + ArchiveDigests []file.Digest `hash:"ignore" json:"digest,omitempty"` + Parent *Package `hash:"ignore" json:"-"` // note: the parent cannot be included in the minimal definition of uniqueness since this field is not reproducible in an encode-decode cycle (is lossy). +} + +// JavaPomProperties represents the fields of interest extracted from a Java archive's pom.properties file. +type JavaPomProperties struct { + Path string `mapstructure:"path" json:"path"` + Name string `mapstructure:"name" json:"name"` + GroupID string `mapstructure:"groupId" json:"groupId" cyclonedx:"groupID"` + ArtifactID string `mapstructure:"artifactId" json:"artifactId" cyclonedx:"artifactID"` + Version string `mapstructure:"version" json:"version"` + Scope string `mapstructure:"scope" json:"scope,omitempty"` + Extra map[string]string `mapstructure:",remain" json:"extraFields,omitempty"` +} + +// JavaPomProject represents fields of interest extracted from a Java archive's pom.xml file. See https://maven.apache.org/ref/3.6.3/maven-model/maven.html for more details. +type JavaPomProject struct { + Path string `json:"path"` + Parent *JavaPomParent `json:"parent,omitempty"` + GroupID string `json:"groupId"` + ArtifactID string `json:"artifactId"` + Version string `json:"version"` + Name string `json:"name"` + Description string `json:"description,omitempty"` + URL string `json:"url,omitempty"` +} + +// JavaPomParent contains the fields within the tag in a pom.xml file +type JavaPomParent struct { + GroupID string `json:"groupId"` + ArtifactID string `json:"artifactId"` + Version string `json:"version"` +} + +// PkgTypeIndicated returns the package Type indicated by the data contained in the JavaPomProperties. +func (p JavaPomProperties) PkgTypeIndicated() Type { + if internal.HasAnyOfPrefixes(p.GroupID, jenkinsPluginPomPropertiesGroupIDs...) || strings.Contains(p.GroupID, ".jenkins.plugin") { + return JenkinsPluginPkg + } + + return JavaPkg +} + +// JavaManifest represents the fields of interest extracted from a Java archive's META-INF/MANIFEST.MF file. +type JavaManifest struct { + Main map[string]string `json:"main,omitempty"` + NamedSections map[string]map[string]string `json:"namedSections,omitempty"` +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/java_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/java_metadata.go deleted file mode 100644 index 12b9c5c5..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/java_metadata.go +++ /dev/null @@ -1,71 +0,0 @@ -package pkg - -import ( - "strings" - - "github.com/anchore/syft/internal" - "github.com/anchore/syft/syft/file" -) - -var jenkinsPluginPomPropertiesGroupIDs = []string{ - "io.jenkins.plugins", - "org.jenkins.plugins", - "org.jenkins-ci.plugins", - "io.jenkins-ci.plugins", - "com.cloudbees.jenkins.plugins", -} - -// JavaMetadata encapsulates all Java ecosystem metadata for a package as well as an (optional) parent relationship. -type JavaMetadata struct { - VirtualPath string `json:"virtualPath" cyclonedx:"virtualPath"` // we need to include the virtual path in cyclonedx documents to prevent deduplication of jars within jars - Manifest *JavaManifest `mapstructure:"Manifest" json:"manifest,omitempty"` - PomProperties *PomProperties `mapstructure:"PomProperties" json:"pomProperties,omitempty" cyclonedx:"-"` - PomProject *PomProject `mapstructure:"PomProject" json:"pomProject,omitempty"` - ArchiveDigests []file.Digest `hash:"ignore" json:"digest,omitempty"` - Parent *Package `hash:"ignore" json:"-"` // note: the parent cannot be included in the minimal definition of uniqueness since this field is not reproducible in an encode-decode cycle (is lossy). -} - -// PomProperties represents the fields of interest extracted from a Java archive's pom.properties file. -type PomProperties struct { - Path string `mapstructure:"path" json:"path"` - Name string `mapstructure:"name" json:"name"` - GroupID string `mapstructure:"groupId" json:"groupId" cyclonedx:"groupID"` - ArtifactID string `mapstructure:"artifactId" json:"artifactId" cyclonedx:"artifactID"` - Version string `mapstructure:"version" json:"version"` - Scope string `mapstructure:"scope" json:"scope,omitempty"` - Extra map[string]string `mapstructure:",remain" json:"extraFields,omitempty"` -} - -// PomProject represents fields of interest extracted from a Java archive's pom.xml file. See https://maven.apache.org/ref/3.6.3/maven-model/maven.html for more details. -type PomProject struct { - Path string `json:"path"` - Parent *PomParent `json:"parent,omitempty"` - GroupID string `json:"groupId"` - ArtifactID string `json:"artifactId"` - Version string `json:"version"` - Name string `json:"name"` - Description string `json:"description,omitempty"` - URL string `json:"url,omitempty"` -} - -// PomParent contains the fields within the tag in a pom.xml file -type PomParent struct { - GroupID string `json:"groupId"` - ArtifactID string `json:"artifactId"` - Version string `json:"version"` -} - -// PkgTypeIndicated returns the package Type indicated by the data contained in the PomProperties. -func (p PomProperties) PkgTypeIndicated() Type { - if internal.HasAnyOfPrefixes(p.GroupID, jenkinsPluginPomPropertiesGroupIDs...) || strings.Contains(p.GroupID, ".jenkins.plugin") { - return JenkinsPluginPkg - } - - return JavaPkg -} - -// JavaManifest represents the fields of interest extracted from a Java archive's META-INF/MANIFEST.MF file. -type JavaManifest struct { - Main map[string]string `json:"main,omitempty"` - NamedSections map[string]map[string]string `json:"namedSections,omitempty"` -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/linux_kernel_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/linux_kernel.go similarity index 94% rename from vendor/github.com/anchore/syft/syft/pkg/linux_kernel_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/linux_kernel.go index 3f69a284..e2aeb7cd 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/linux_kernel_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/linux_kernel.go @@ -1,7 +1,7 @@ package pkg -// LinuxKernelMetadata represents all captured data for a Linux kernel -type LinuxKernelMetadata struct { +// LinuxKernel represents all captured data for a Linux kernel +type LinuxKernel struct { Name string `mapstructure:"name" json:"name" cyclonedx:"name"` Architecture string `mapstructure:"architecture" json:"architecture" cyclonedx:"architecture"` Version string `mapstructure:"version" json:"version" cyclonedx:"version"` @@ -15,7 +15,7 @@ type LinuxKernelMetadata struct { VideoMode string `mapstructure:"videoMode" json:"videoMode,omitempty" cyclonedx:"videoMode"` } -type LinuxKernelModuleMetadata struct { +type LinuxKernelModule struct { Name string `mapstructure:"name" json:"name,omitempty" cyclonedx:"name"` Version string `mapstructure:"version" json:"version,omitempty" cyclonedx:"version"` SourceVersion string `mapstructure:"sourceVersion" json:"sourceVersion,omitempty" cyclonedx:"sourceVersion"` diff --git a/vendor/github.com/anchore/syft/syft/pkg/metadata.go b/vendor/github.com/anchore/syft/syft/pkg/metadata.go deleted file mode 100644 index ce0594bd..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/metadata.go +++ /dev/null @@ -1,126 +0,0 @@ -package pkg - -import ( - "reflect" -) - -// MetadataType represents the data shape stored within pkg.Package.Metadata. -type MetadataType string - -const ( - // this is the full set of data shapes that can be represented within the pkg.Package.Metadata field - - UnknownMetadataType MetadataType = "UnknownMetadata" - AlpmMetadataType MetadataType = "AlpmMetadata" - ApkMetadataType MetadataType = "ApkMetadata" - BinaryMetadataType MetadataType = "BinaryMetadata" - CocoapodsMetadataType MetadataType = "CocoapodsMetadataType" - ConanLockMetadataType MetadataType = "ConanLockMetadataType" - ConanMetadataType MetadataType = "ConanMetadataType" - DartPubMetadataType MetadataType = "DartPubMetadata" - DotnetDepsMetadataType MetadataType = "DotnetDepsMetadata" - DotnetPortableExecutableMetadataType MetadataType = "DotnetPortableExecutableMetadata" - DpkgMetadataType MetadataType = "DpkgMetadata" - GemMetadataType MetadataType = "GemMetadata" - GolangBinMetadataType MetadataType = "GolangBinMetadata" - GolangModMetadataType MetadataType = "GolangModMetadata" - HackageMetadataType MetadataType = "HackageMetadataType" - JavaMetadataType MetadataType = "JavaMetadata" - KbPackageMetadataType MetadataType = "KbPackageMetadata" - LinuxKernelMetadataType MetadataType = "LinuxKernelMetadata" - LinuxKernelModuleMetadataType MetadataType = "LinuxKernelModuleMetadata" - MixLockMetadataType MetadataType = "MixLockMetadataType" - NixStoreMetadataType MetadataType = "NixStoreMetadata" - NpmPackageJSONMetadataType MetadataType = "NpmPackageJsonMetadata" - NpmPackageLockJSONMetadataType MetadataType = "NpmPackageLockJsonMetadata" - PhpComposerJSONMetadataType MetadataType = "PhpComposerJsonMetadata" - PortageMetadataType MetadataType = "PortageMetadata" - PythonPackageMetadataType MetadataType = "PythonPackageMetadata" - PythonPipfileLockMetadataType MetadataType = "PythonPipfileLockMetadata" - PythonRequirementsMetadataType MetadataType = "PythonRequirementsMetadata" - RebarLockMetadataType MetadataType = "RebarLockMetadataType" - RDescriptionFileMetadataType MetadataType = "RDescriptionFileMetadataType" - RpmMetadataType MetadataType = "RpmMetadata" - RustCargoPackageMetadataType MetadataType = "RustCargoPackageMetadata" - SwiftPackageManagerMetadataType MetadataType = "SwiftPackageManagerMetadata" -) - -var AllMetadataTypes = []MetadataType{ - AlpmMetadataType, - ApkMetadataType, - BinaryMetadataType, - CocoapodsMetadataType, - ConanLockMetadataType, - ConanMetadataType, - DartPubMetadataType, - DotnetDepsMetadataType, - DotnetPortableExecutableMetadataType, - DpkgMetadataType, - GemMetadataType, - GolangBinMetadataType, - GolangModMetadataType, - HackageMetadataType, - JavaMetadataType, - KbPackageMetadataType, - LinuxKernelMetadataType, - LinuxKernelModuleMetadataType, - MixLockMetadataType, - NixStoreMetadataType, - NpmPackageJSONMetadataType, - NpmPackageLockJSONMetadataType, - PhpComposerJSONMetadataType, - PortageMetadataType, - PythonPackageMetadataType, - PythonPipfileLockMetadataType, - PythonRequirementsMetadataType, - RDescriptionFileMetadataType, - RebarLockMetadataType, - RpmMetadataType, - RustCargoPackageMetadataType, - SwiftPackageManagerMetadataType, -} - -var MetadataTypeByName = map[MetadataType]reflect.Type{ - AlpmMetadataType: reflect.TypeOf(AlpmMetadata{}), - ApkMetadataType: reflect.TypeOf(ApkMetadata{}), - BinaryMetadataType: reflect.TypeOf(BinaryMetadata{}), - CocoapodsMetadataType: reflect.TypeOf(CocoapodsMetadata{}), - ConanLockMetadataType: reflect.TypeOf(ConanLockMetadata{}), - ConanMetadataType: reflect.TypeOf(ConanMetadata{}), - DartPubMetadataType: reflect.TypeOf(DartPubMetadata{}), - DotnetDepsMetadataType: reflect.TypeOf(DotnetDepsMetadata{}), - DotnetPortableExecutableMetadataType: reflect.TypeOf(DotnetPortableExecutableMetadata{}), - DpkgMetadataType: reflect.TypeOf(DpkgMetadata{}), - GemMetadataType: reflect.TypeOf(GemMetadata{}), - GolangBinMetadataType: reflect.TypeOf(GolangBinMetadata{}), - GolangModMetadataType: reflect.TypeOf(GolangModMetadata{}), - HackageMetadataType: reflect.TypeOf(HackageMetadata{}), - JavaMetadataType: reflect.TypeOf(JavaMetadata{}), - KbPackageMetadataType: reflect.TypeOf(KbPackageMetadata{}), - LinuxKernelMetadataType: reflect.TypeOf(LinuxKernelMetadata{}), - LinuxKernelModuleMetadataType: reflect.TypeOf(LinuxKernelModuleMetadata{}), - MixLockMetadataType: reflect.TypeOf(MixLockMetadata{}), - NixStoreMetadataType: reflect.TypeOf(NixStoreMetadata{}), - NpmPackageJSONMetadataType: reflect.TypeOf(NpmPackageJSONMetadata{}), - NpmPackageLockJSONMetadataType: reflect.TypeOf(NpmPackageLockJSONMetadata{}), - PhpComposerJSONMetadataType: reflect.TypeOf(PhpComposerJSONMetadata{}), - PortageMetadataType: reflect.TypeOf(PortageMetadata{}), - PythonPackageMetadataType: reflect.TypeOf(PythonPackageMetadata{}), - PythonPipfileLockMetadataType: reflect.TypeOf(PythonPipfileLockMetadata{}), - PythonRequirementsMetadataType: reflect.TypeOf(PythonRequirementsMetadata{}), - RDescriptionFileMetadataType: reflect.TypeOf(RDescriptionFileMetadata{}), - RebarLockMetadataType: reflect.TypeOf(RebarLockMetadata{}), - RpmMetadataType: reflect.TypeOf(RpmMetadata{}), - RustCargoPackageMetadataType: reflect.TypeOf(CargoPackageMetadata{}), - SwiftPackageManagerMetadataType: reflect.TypeOf(SwiftPackageManagerMetadata{}), -} - -func CleanMetadataType(typ MetadataType) MetadataType { - if typ == "RpmdbMetadata" { - return RpmMetadataType - } - if typ == "GolangMetadata" { - return GolangBinMetadataType - } - return typ -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/kb_package_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/microsoft.go similarity index 77% rename from vendor/github.com/anchore/syft/syft/pkg/kb_package_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/microsoft.go index e243712a..c80229f8 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/kb_package_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/microsoft.go @@ -1,11 +1,11 @@ package pkg -// KbPackageMetadata is slightly odd in how it is expected to map onto data. +// MicrosoftKbPatch is slightly odd in how it is expected to map onto data. // This is critical to grasp because there is no MSRC cataloger. The `ProductID` // field is expected to be the MSRC Product ID, for example: // "Windows 10 Version 1703 for 32-bit Systems". // `Kb` is expected to be the actual KB number, for example "5001028" -type KbPackageMetadata struct { +type MicrosoftKbPatch struct { ProductID string `toml:"product_id" json:"product_id"` Kb string `toml:"kb" json:"kb"` } diff --git a/vendor/github.com/anchore/syft/syft/pkg/nix_store_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/nix.go similarity index 88% rename from vendor/github.com/anchore/syft/syft/pkg/nix_store_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/nix.go index 964447f4..88466244 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/nix_store_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/nix.go @@ -6,7 +6,7 @@ import ( "github.com/scylladb/go-set/strset" ) -type NixStoreMetadata struct { +type NixStoreEntry struct { // OutputHash is the prefix of the nix store basename path OutputHash string `mapstructure:"outputHash" json:"outputHash"` @@ -18,7 +18,7 @@ type NixStoreMetadata struct { Files []string `mapstructure:"files" json:"files"` } -func (m NixStoreMetadata) OwnedFiles() (result []string) { +func (m NixStoreEntry) OwnedFiles() (result []string) { result = strset.New(m.Files...).List() sort.Strings(result) return diff --git a/vendor/github.com/anchore/syft/syft/pkg/npm_package_json_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/npm.go similarity index 54% rename from vendor/github.com/anchore/syft/syft/pkg/npm_package_json_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/npm.go index a17f4035..4abeace1 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/npm_package_json_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/npm.go @@ -1,7 +1,7 @@ package pkg -// NpmPackageJSONMetadata holds parsing information for a javascript package.json file -type NpmPackageJSONMetadata struct { +// NpmPackage represents the contents of a javascript package.json file. +type NpmPackage struct { Name string `mapstructure:"name" json:"name"` Version string `mapstructure:"version" json:"version"` Author string `mapstructure:"author" json:"author"` @@ -10,3 +10,9 @@ type NpmPackageJSONMetadata struct { URL string `mapstructure:"url" json:"url"` Private bool `mapstructure:"private" json:"private"` } + +// NpmPackageLockEntry represents a single entry within the "packages" section of a package-lock.json file. +type NpmPackageLockEntry struct { + Resolved string `mapstructure:"resolved" json:"resolved"` + Integrity string `mapstructure:"integrity" json:"integrity"` +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/npm_package_lock_json_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/npm_package_lock_json_metadata.go deleted file mode 100644 index 3d9db0bf..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/npm_package_lock_json_metadata.go +++ /dev/null @@ -1,7 +0,0 @@ -package pkg - -// NpmPackageLockJSONMetadata holds parsing information for a javascript package-lock.json file -type NpmPackageLockJSONMetadata struct { - Resolved string `mapstructure:"resolved" json:"resolved"` - Integrity string `mapstructure:"integrity" json:"integrity"` -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/package.go b/vendor/github.com/anchore/syft/syft/pkg/package.go index c72e57d3..8ee8d969 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/package.go @@ -17,18 +17,17 @@ import ( // Package represents an application or library that has been bundled into a distributable format. // TODO: if we ignore FoundBy for ID generation should we merge the field to show it was found in two places? type Package struct { - id artifact.ID `hash:"ignore"` - Name string // the package name - Version string // the version of the package - FoundBy string `hash:"ignore" cyclonedx:"foundBy"` // the specific cataloger that discovered this package - Locations file.LocationSet // the locations that lead to the discovery of this package (note: this is not necessarily the locations that make up this package) - Licenses LicenseSet // licenses discovered with the package metadata - Language Language `hash:"ignore" cyclonedx:"language"` // the language ecosystem this package belongs to (e.g. JavaScript, Python, etc) - Type Type `cyclonedx:"type"` // the package type (e.g. Npm, Yarn, Python, Rpm, Deb, etc) - CPEs []cpe.CPE `hash:"ignore"` // all possible Common Platform Enumerators (note: this is NOT included in the definition of the ID since all fields on a CPE are derived from other fields) - PURL string `hash:"ignore"` // the Package URL (see https://github.com/package-url/purl-spec) - MetadataType MetadataType `cyclonedx:"metadataType"` // the shape of the additional data in the "metadata" field - Metadata interface{} // additional data found while parsing the package source + id artifact.ID `hash:"ignore"` + Name string // the package name + Version string // the version of the package + FoundBy string `hash:"ignore" cyclonedx:"foundBy"` // the specific cataloger that discovered this package + Locations file.LocationSet // the locations that lead to the discovery of this package (note: this is not necessarily the locations that make up this package) + Licenses LicenseSet // licenses discovered with the package metadata + Language Language `hash:"ignore" cyclonedx:"language"` // the language ecosystem this package belongs to (e.g. JavaScript, Python, etc) + Type Type `cyclonedx:"type"` // the package type (e.g. Npm, Yarn, Python, Rpm, Deb, etc) + CPEs []cpe.CPE `hash:"ignore"` // all possible Common Platform Enumerators (note: this is NOT included in the definition of the ID since all fields on a CPE are derived from other fields) + PURL string `hash:"ignore"` // the Package URL (see https://github.com/package-url/purl-spec) + Metadata interface{} // additional data found while parsing the package source } func (p *Package) OverrideID(id artifact.ID) { diff --git a/vendor/github.com/anchore/syft/syft/pkg/php_composer_json_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/php.go similarity index 82% rename from vendor/github.com/anchore/syft/syft/pkg/php_composer_json_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/php.go index 4bc7528c..3e4fbd41 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/php_composer_json_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/php.go @@ -1,7 +1,10 @@ package pkg -// PhpComposerJSONMetadata represents information found from composer v1/v2 "installed.json" files as well as composer.lock files -type PhpComposerJSONMetadata struct { +// PhpComposerInstalledEntry represents a single package entry from a composer v1/v2 "installed.json" files (very similar to composer.lock files). +type PhpComposerInstalledEntry PhpComposerLockEntry + +// PhpComposerLockEntry represents a single package entry found from a composer.lock file. +type PhpComposerLockEntry struct { Name string `json:"name"` Version string `json:"version"` Source PhpComposerExternalReference `json:"source"` diff --git a/vendor/github.com/anchore/syft/syft/pkg/portage_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/portage.go similarity index 74% rename from vendor/github.com/anchore/syft/syft/pkg/portage_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/portage.go index fe709db6..5c9aa1b4 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/portage_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/portage.go @@ -8,10 +8,10 @@ import ( "github.com/anchore/syft/syft/file" ) -var _ FileOwner = (*PortageMetadata)(nil) +var _ FileOwner = (*PortageEntry)(nil) -// PortageMetadata represents all captured data for a Package package DB entry. -type PortageMetadata struct { +// PortageEntry represents a single package entry in the portage DB flat-file store. +type PortageEntry struct { InstalledSize int `mapstructure:"InstalledSize" json:"installedSize" cyclonedx:"installedSize"` Files []PortageFileRecord `json:"files"` } @@ -22,7 +22,7 @@ type PortageFileRecord struct { Digest *file.Digest `json:"digest,omitempty"` } -func (m PortageMetadata) OwnedFiles() (result []string) { +func (m PortageEntry) OwnedFiles() (result []string) { s := strset.New() for _, f := range m.Files { if f.Path != "" { diff --git a/vendor/github.com/anchore/syft/syft/pkg/python_package_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/python.go similarity index 55% rename from vendor/github.com/anchore/syft/syft/pkg/python_package_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/python.go index 36d7fe88..1976ea7c 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/python_package_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/python.go @@ -6,7 +6,23 @@ import ( "github.com/scylladb/go-set/strset" ) -var _ FileOwner = (*PythonPackageMetadata)(nil) +var _ FileOwner = (*PythonPackage)(nil) + +// PythonPackage represents all captured data for a python egg or wheel package (specifically as outlined in +// the PyPA core metadata specification https://packaging.python.org/en/latest/specifications/core-metadata/). +// Historically these were defined in PEPs 345, 314, and 241, but have been superseded by PEP 566. This means that this +// struct can (partially) express at least versions 1.0, 1.1, 1.2, 2.1, 2.2, and 2.3 of the metadata format. +type PythonPackage struct { + Name string `json:"name" mapstruct:"Name"` + Version string `json:"version" mapstruct:"Version"` + Author string `json:"author" mapstruct:"Author"` + AuthorEmail string `json:"authorEmail" mapstruct:"Authoremail"` + Platform string `json:"platform" mapstruct:"Platform"` + Files []PythonFileRecord `json:"files,omitempty"` + SitePackagesRootPath string `json:"sitePackagesRootPath"` + TopLevelPackages []string `json:"topLevelPackages,omitempty"` + DirectURLOrigin *PythonDirectURLOriginInfo `json:"directUrlOrigin,omitempty"` +} // PythonFileDigest represents the file metadata for a single file attributed to a python package. type PythonFileDigest struct { @@ -27,41 +43,7 @@ type PythonDirectURLOriginInfo struct { VCS string `json:"vcs,omitempty"` } -// PythonPackageMetadata represents all captured data for a python egg or wheel package. -type PythonPackageMetadata struct { - Name string `json:"name" mapstruct:"Name"` - Version string `json:"version" mapstruct:"Version"` - Author string `json:"author" mapstruct:"Author"` - AuthorEmail string `json:"authorEmail" mapstruct:"Authoremail"` - Platform string `json:"platform" mapstruct:"Platform"` - Files []PythonFileRecord `json:"files,omitempty"` - SitePackagesRootPath string `json:"sitePackagesRootPath"` - TopLevelPackages []string `json:"topLevelPackages,omitempty"` - DirectURLOrigin *PythonDirectURLOriginInfo `json:"directUrlOrigin,omitempty"` -} - -type DirectURLOrigin struct { - URL string `json:"url"` - VCSInfo VCSInfo `json:"vcs_info"` - ArchiveInfo ArchiveInfo `json:"archive_info"` - DirInfo DirInfo `json:"dir_info"` -} - -type DirInfo struct { - Editable bool `json:"editable"` -} - -type ArchiveInfo struct { - Hash string `json:"hash"` -} - -type VCSInfo struct { - CommitID string `json:"commit_id"` - VCS string `json:"vcs"` - RequestedRevision string `json:"requested_revision"` -} - -func (m PythonPackageMetadata) OwnedFiles() (result []string) { +func (m PythonPackage) OwnedFiles() (result []string) { s := strset.New() for _, f := range m.Files { if f.Path != "" { @@ -72,3 +54,18 @@ func (m PythonPackageMetadata) OwnedFiles() (result []string) { sort.Strings(result) return result } + +// PythonPipfileLockEntry represents a single package entry within a Pipfile.lock file. +type PythonPipfileLockEntry struct { + Hashes []string `mapstructure:"hashes" json:"hashes"` + Index string `mapstructure:"index" json:"index"` +} + +// PythonRequirementsEntry represents a single entry within a [*-]requirements.txt file. +type PythonRequirementsEntry struct { + Name string `json:"name" mapstruct:"Name"` + Extras []string `json:"extras,omitempty" mapstruct:"Extras"` + VersionConstraint string `json:"versionConstraint" mapstruct:"VersionConstraint"` + URL string `json:"url,omitempty" mapstruct:"URL"` + Markers string `json:"markers,omitempty" mapstruct:"Markers"` +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/python_pipefile_lock_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/python_pipefile_lock_metadata.go deleted file mode 100644 index 07233d95..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/python_pipefile_lock_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -package pkg - -type PythonPipfileLockMetadata struct { - Hashes []string `mapstructure:"hashes" json:"hashes"` - Index string `mapstructure:"index" json:"index"` -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/python_requirements_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/python_requirements_metadata.go deleted file mode 100644 index 161669a4..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/python_requirements_metadata.go +++ /dev/null @@ -1,9 +0,0 @@ -package pkg - -type PythonRequirementsMetadata struct { - Name string `json:"name" mapstruct:"Name"` - Extras []string `json:"extras,omitempty" mapstruct:"Extras"` - VersionConstraint string `json:"versionConstraint" mapstruct:"VersionConstraint"` - URL string `json:"url,omitempty" mapstruct:"URL"` - Markers string `json:"markers,omitempty" mapstruct:"Markers"` -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/r_package_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/r.go similarity index 95% rename from vendor/github.com/anchore/syft/syft/pkg/r_package_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/r.go index 2a3ad320..e9c65c1f 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/r_package_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/r.go @@ -1,6 +1,6 @@ package pkg -type RDescriptionFileMetadata struct { +type RDescription struct { /* Fields chosen by: docker run --rm -it rocker/r-ver bash diff --git a/vendor/github.com/anchore/syft/syft/pkg/rpm.go b/vendor/github.com/anchore/syft/syft/pkg/rpm.go new file mode 100644 index 00000000..0634e22b --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/rpm.go @@ -0,0 +1,65 @@ +package pkg + +import ( + "sort" + + "github.com/scylladb/go-set/strset" + + "github.com/anchore/syft/syft/file" +) + +// RpmDBGlob is the glob pattern used to find RPM DB files. Where: +// - /var/lib/rpm/... is the typical path for most distributions +// - /usr/share/rpm/... is common for rpm-ostree distributions (coreos-like) +// - Packages is the legacy Berkeley db based format +// - Packages.db is the "ndb" format used in SUSE +// - rpmdb.sqlite is the sqlite format used in fedora + derivates +const RpmDBGlob = "**/{var/lib,usr/share,usr/lib/sysimage}/rpm/{Packages,Packages.db,rpmdb.sqlite}" + +// RpmManifestGlob is used in CBL-Mariner distroless images +const RpmManifestGlob = "**/var/lib/rpmmanifest/container-manifest-2" + +var _ FileOwner = (*RpmDBEntry)(nil) + +// RpmArchive represents all captured data from a RPM package archive. +type RpmArchive RpmDBEntry + +// RpmDBEntry represents all captured data from a RPM DB package entry. +type RpmDBEntry struct { + Name string `json:"name"` + Version string `json:"version"` + Epoch *int `json:"epoch" cyclonedx:"epoch" jsonschema:"nullable"` + Arch string `json:"architecture"` + Release string `json:"release" cyclonedx:"release"` + SourceRpm string `json:"sourceRpm" cyclonedx:"sourceRpm"` + Size int `json:"size" cyclonedx:"size"` + Vendor string `json:"vendor"` + ModularityLabel string `json:"modularityLabel"` + Files []RpmFileRecord `json:"files"` +} + +// RpmFileRecord represents the file metadata for a single file attributed to a RPM package. +type RpmFileRecord struct { + Path string `json:"path"` + Mode RpmFileMode `json:"mode"` + Size int `json:"size"` + Digest file.Digest `json:"digest"` + UserName string `json:"userName"` + GroupName string `json:"groupName"` + Flags string `json:"flags"` +} + +// RpmFileMode is the raw file mode for a single file. This can be interpreted as the linux stat.h mode (see https://pubs.opengroup.org/onlinepubs/007908799/xsh/sysstat.h.html) +type RpmFileMode uint16 + +func (m RpmDBEntry) OwnedFiles() (result []string) { + s := strset.New() + for _, f := range m.Files { + if f.Path != "" { + s.Add(f.Path) + } + } + result = s.List() + sort.Strings(result) + return result +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/rpm_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/rpm_metadata.go deleted file mode 100644 index e2442787..00000000 --- a/vendor/github.com/anchore/syft/syft/pkg/rpm_metadata.go +++ /dev/null @@ -1,61 +0,0 @@ -package pkg - -import ( - "sort" - - "github.com/scylladb/go-set/strset" - - "github.com/anchore/syft/syft/file" -) - -// /var/lib/rpm/... is the typical path for most distributions -// /usr/share/rpm/... is common for rpm-ostree distributions (coreos-like) -// Packages is the legacy Berkely db based format -// Packages.db is the "ndb" format used in SUSE -// rpmdb.sqlite is the sqlite format used in fedora + derivates -const RpmDBGlob = "**/{var/lib,usr/share,usr/lib/sysimage}/rpm/{Packages,Packages.db,rpmdb.sqlite}" - -// Used in CBL-Mariner distroless images -const RpmManifestGlob = "**/var/lib/rpmmanifest/container-manifest-2" - -var _ FileOwner = (*RpmMetadata)(nil) - -// RpmMetadata represents all captured data for a RPM DB package entry. -type RpmMetadata struct { - Name string `json:"name"` - Version string `json:"version"` - Epoch *int `json:"epoch" cyclonedx:"epoch" jsonschema:"nullable"` - Arch string `json:"architecture"` - Release string `json:"release" cyclonedx:"release"` - SourceRpm string `json:"sourceRpm" cyclonedx:"sourceRpm"` - Size int `json:"size" cyclonedx:"size"` - Vendor string `json:"vendor"` - ModularityLabel string `json:"modularityLabel"` - Files []RpmdbFileRecord `json:"files"` -} - -// RpmdbFileRecord represents the file metadata for a single file attributed to a RPM package. -type RpmdbFileRecord struct { - Path string `json:"path"` - Mode RpmdbFileMode `json:"mode"` - Size int `json:"size"` - Digest file.Digest `json:"digest"` - UserName string `json:"userName"` - GroupName string `json:"groupName"` - Flags string `json:"flags"` -} - -// RpmdbFileMode is the raw file mode for a single file. This can be interpreted as the linux stat.h mode (see https://pubs.opengroup.org/onlinepubs/007908799/xsh/sysstat.h.html) -type RpmdbFileMode uint16 - -func (m RpmMetadata) OwnedFiles() (result []string) { - s := strset.New() - for _, f := range m.Files { - if f.Path != "" { - s.Add(f.Path) - } - } - result = s.List() - sort.Strings(result) - return result -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/gem_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/ruby.go similarity index 92% rename from vendor/github.com/anchore/syft/syft/pkg/gem_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/ruby.go index 01b05eec..81280c54 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/gem_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/ruby.go @@ -1,7 +1,7 @@ package pkg -// GemMetadata represents all metadata parsed from the gemspec file -type GemMetadata struct { +// RubyGemspec represents all metadata parsed from the *.gemspec file +type RubyGemspec struct { Name string `mapstructure:"name" json:"name"` Version string `mapstructure:"version" json:"version"` // note regarding if Files can contribute to GemMetadata being able to implement FileOwner: this list is a diff --git a/vendor/github.com/anchore/syft/syft/pkg/cargo_package_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/rust.go similarity index 58% rename from vendor/github.com/anchore/syft/syft/pkg/cargo_package_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/rust.go index 1c23bc5e..c06c65fe 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cargo_package_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/rust.go @@ -1,9 +1,15 @@ package pkg -type CargoPackageMetadata struct { +type RustCargoLockEntry struct { Name string `toml:"name" json:"name"` Version string `toml:"version" json:"version"` Source string `toml:"source" json:"source"` Checksum string `toml:"checksum" json:"checksum"` Dependencies []string `toml:"dependencies" json:"dependencies"` } + +type RustBinaryAuditEntry struct { + Name string `toml:"name" json:"name"` + Version string `toml:"version" json:"version"` + Source string `toml:"source" json:"source"` +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/swiftpackagemanager_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/swift.go similarity index 61% rename from vendor/github.com/anchore/syft/syft/pkg/swiftpackagemanager_metadata.go rename to vendor/github.com/anchore/syft/syft/pkg/swift.go index fd33a8d6..3df696c1 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/swiftpackagemanager_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/swift.go @@ -1,5 +1,5 @@ package pkg -type SwiftPackageManagerMetadata struct { +type SwiftPackageManagerResolvedEntry struct { Revision string `mapstructure:"revision" json:"revision"` } diff --git a/vendor/github.com/anchore/syft/syft/sbom/format.go b/vendor/github.com/anchore/syft/syft/sbom/format.go index 5247845b..94901ffc 100644 --- a/vendor/github.com/anchore/syft/syft/sbom/format.go +++ b/vendor/github.com/anchore/syft/syft/sbom/format.go @@ -1,17 +1,9 @@ package sbom import ( - "errors" - "fmt" "io" ) -var ( - ErrEncodingNotSupported = errors.New("encoding not supported") - ErrDecodingNotSupported = errors.New("decoding not supported") - ErrValidationNotSupported = errors.New("validation not supported") -) - type FormatID string // String returns a string representation of the FormatID. @@ -21,88 +13,19 @@ func (f FormatID) String() string { const AnyVersion = "" -type Format interface { +type FormatEncoder interface { ID() FormatID - IDs() []FormatID + Aliases() []string Version() string Encode(io.Writer, SBOM) error - Decode(io.Reader) (*SBOM, error) - Validate(io.Reader) error - fmt.Stringer -} - -type format struct { - ids []FormatID - version string - encoder Encoder - decoder Decoder - validator Validator -} - -func (f format) IDs() []FormatID { - return f.ids -} - -func (f format) Version() string { - return f.version -} - -func (f format) String() string { - if f.version == AnyVersion { - return f.ID().String() - } - return fmt.Sprintf("%s@%s", f.ID(), f.version) -} - -// Decoder is a function that can convert an SBOM document of a specific format from a reader into Syft native objects. -type Decoder func(reader io.Reader) (*SBOM, error) - -// Encoder is a function that can transform Syft native objects into an SBOM document of a specific format written to the given writer. -type Encoder func(io.Writer, SBOM) error - -// Validator reads the SBOM from the given reader and assesses whether the document conforms to the specific SBOM format. -// The validator should positively confirm if the SBOM is not only the format but also has the minimal set of values -// that the format requires. For example, all syftjson formatted documents have a schema section which should have -// "anchore/syft" within the version --if this isn't found then the validator should raise an error. These active -// assertions protect against "simple" format decoding validations that may lead to false positives (e.g. I decoded -// json successfully therefore this must be the target format, however, all values are their default zero-value and -// really represent a different format that also uses json) -type Validator func(reader io.Reader) error - -func NewFormat(version string, encoder Encoder, decoder Decoder, validator Validator, ids ...FormatID) Format { - return format{ - ids: ids, - version: version, - encoder: encoder, - decoder: decoder, - validator: validator, - } } -func (f format) ID() FormatID { - return f.ids[0] -} +type FormatDecoder interface { + // Decode will return an SBOM from the given reader. If the bytes are not a valid SBOM for the given format + // then an error will be returned. + Decode(io.ReadSeeker) (*SBOM, FormatID, string, error) -func (f format) Encode(output io.Writer, s SBOM) error { - if f.encoder == nil { - return ErrEncodingNotSupported - } - return f.encoder(output, s) + // Identify will return the format ID and version for the given reader. Note: this does not validate the + // full SBOM, only pulls the minimal information necessary to identify the format. + Identify(io.ReadSeeker) (FormatID, string) } - -func (f format) Decode(reader io.Reader) (*SBOM, error) { - if f.decoder == nil { - return nil, ErrDecodingNotSupported - } - return f.decoder(reader) -} - -func (f format) Validate(reader io.Reader) error { - if f.validator == nil { - return ErrValidationNotSupported - } - - return f.validator(reader) -} - -var _ Format = (*format)(nil) diff --git a/vendor/github.com/anchore/syft/syft/source/file_source.go b/vendor/github.com/anchore/syft/syft/source/file_source.go index 5adc81d9..481a015e 100644 --- a/vendor/github.com/anchore/syft/syft/source/file_source.go +++ b/vendor/github.com/anchore/syft/syft/source/file_source.go @@ -108,10 +108,6 @@ func deriveIDFromFile(cfg FileConfig) (artifact.ID, string) { info += fmt.Sprintf(":%s@%s", cfg.Alias.Name, cfg.Alias.Version) } - if d != "" { - d = fmt.Sprintf("sha256:%s", d) - } - return artifactIDFromDigest(digest.SHA256.FromString(info).String()), d } diff --git a/vendor/github.com/anchore/syft/syft/source/stereoscope_image_metadata.go b/vendor/github.com/anchore/syft/syft/source/stereoscope_image_metadata.go index ade4f592..8637995f 100644 --- a/vendor/github.com/anchore/syft/syft/source/stereoscope_image_metadata.go +++ b/vendor/github.com/anchore/syft/syft/source/stereoscope_image_metadata.go @@ -18,6 +18,7 @@ type StereoscopeImageSourceMetadata struct { Architecture string `json:"architecture"` Variant string `json:"architectureVariant,omitempty"` OS string `json:"os"` + Labels map[string]string `json:"labels,omitempty"` } // StereoscopeLayerMetadata represents all static metadata that defines what a container image layer is. @@ -48,6 +49,7 @@ func NewStereoscopeImageMetadata(img *image.Image, userInput string) Stereoscope Architecture: img.Metadata.Architecture, Variant: img.Metadata.Variant, OS: img.Metadata.OS, + Labels: img.Metadata.Config.Config.Labels, } // populate image metadata diff --git a/vendor/github.com/anchore/syft/syft/source/stereoscope_image_source.go b/vendor/github.com/anchore/syft/syft/source/stereoscope_image_source.go index fc5defee..53bca002 100644 --- a/vendor/github.com/anchore/syft/syft/source/stereoscope_image_source.go +++ b/vendor/github.com/anchore/syft/syft/source/stereoscope_image_source.go @@ -192,6 +192,7 @@ func imageMetadataFromStereoscopeImage(img *image.Image, reference string) Stere Architecture: img.Metadata.Architecture, Variant: img.Metadata.Variant, OS: img.Metadata.OS, + Labels: img.Metadata.Config.Config.Labels, } } diff --git a/vendor/github.com/bmatcuk/doublestar/v4/README.md b/vendor/github.com/bmatcuk/doublestar/v4/README.md index be715ff6..70117eff 100644 --- a/vendor/github.com/bmatcuk/doublestar/v4/README.md +++ b/vendor/github.com/bmatcuk/doublestar/v4/README.md @@ -387,8 +387,6 @@ ever since. In that time, it has grown into one of the most popular globbing libraries in the Go ecosystem. So, if **doublestar** is a useful library in your project, consider [sponsoring] my work! I'd really appreciate it! -[![reviewpad](../sponsors/reviewpad.png?raw=true)](https://reviewpad.com/) - Thanks for sponsoring me! ## License diff --git a/vendor/github.com/bmatcuk/doublestar/v4/match.go b/vendor/github.com/bmatcuk/doublestar/v4/match.go index 6581d998..4232c79f 100644 --- a/vendor/github.com/bmatcuk/doublestar/v4/match.go +++ b/vendor/github.com/bmatcuk/doublestar/v4/match.go @@ -301,9 +301,14 @@ MATCH: } func isZeroLengthPattern(pattern string, separator rune) (ret bool, err error) { - // `/**` is a special case - a pattern such as `path/to/a/**` *should* match - // `path/to/a` because `a` might be a directory - if pattern == "" || pattern == "*" || pattern == "**" || pattern == string(separator)+"**" { + // `/**`, `**/`, and `/**/` are special cases - a pattern such as `path/to/a/**` or `path/to/a/**/` + // *should* match `path/to/a` because `a` might be a directory + if pattern == "" || + pattern == "*" || + pattern == "**" || + pattern == string(separator)+"**" || + pattern == "**"+string(separator) || + pattern == string(separator)+"**"+string(separator) { return true, nil } diff --git a/vendor/github.com/containerd/containerd/.cirrus.yml b/vendor/github.com/containerd/containerd/.cirrus.yml index 1252edff..db7d2dd9 100644 --- a/vendor/github.com/containerd/containerd/.cirrus.yml +++ b/vendor/github.com/containerd/containerd/.cirrus.yml @@ -26,8 +26,9 @@ task: # v7.0.0 does not boot. v6.0.0 was not released. BOX: rockylinux/8@5.0.0 install_libvirt_vagrant_script: | - apt-get update - apt-get install -y libvirt-daemon libvirt-daemon-system vagrant vagrant-libvirt + # if another process is keeping a lock, wait for 60 seconds for it to release the lock. + apt-get -o DPkg::Lock::Timeout=60 update + apt-get -o DPkg::Lock::Timeout=60 install -y libvirt-daemon libvirt-daemon-system vagrant vagrant-libvirt systemctl enable --now libvirtd vagrant_cache: @@ -61,8 +62,9 @@ task: memory: 16G install_libvirt_vagrant_script: | - apt-get update - apt-get install -y libvirt-daemon libvirt-daemon-system vagrant vagrant-libvirt + # if another process is keeping a lock, wait for 60 seconds for it to release the lock. + apt-get -o DPkg::Lock::Timeout=60 update + apt-get -o DPkg::Lock::Timeout=60 install -y libvirt-daemon libvirt-daemon-system vagrant vagrant-libvirt systemctl enable --now libvirtd vagrant_cache: diff --git a/vendor/github.com/containerd/containerd/.golangci.yml b/vendor/github.com/containerd/containerd/.golangci.yml index e52c4ffb..efd8df64 100644 --- a/vendor/github.com/containerd/containerd/.golangci.yml +++ b/vendor/github.com/containerd/containerd/.golangci.yml @@ -33,6 +33,15 @@ issues: # conversion is necessary on Linux, unnecessary on macOS text: "unnecessary conversion" + # FIXME temporarily suppress deprecation warnings for the logs package. See https://github.com/containerd/containerd/pull/9086 + - text: "SA1019: log\\.(G|L|Fields|Entry|RFC3339NanoFixed|Level|TraceLevel|DebugLevel|InfoLevel|WarnLevel|ErrorLevel|FatalLevel|PanicLevel|SetLevel|GetLevel|OutputFormat|TextFormat|JSONFormat|SetFormat|WithLogger|GetLogger)" + linters: + - staticcheck + - text: "SA1019: logtest\\.WithT" + linters: + - staticcheck + + linters-settings: gosec: # The following issues surfaced when `gosec` linter diff --git a/vendor/github.com/containerd/containerd/.mailmap b/vendor/github.com/containerd/containerd/.mailmap index 87221c9a..34281d42 100644 --- a/vendor/github.com/containerd/containerd/.mailmap +++ b/vendor/github.com/containerd/containerd/.mailmap @@ -76,10 +76,12 @@ Justin Terry Justin Terry Kante Kazuyoshi Kato +Kazuyoshi Kato Kenfe-Mickaël Laventure Kevin Kern Kevin Parsons Kevin Xu +Kirtana Ashok Kitt Hsu Kohei Tokunaga Krasi Georgiev @@ -164,3 +166,5 @@ Zhoulin Xie Zhoulin Xie <42261994+JoeWrightss@users.noreply.github.com> zounengren 张潇 +Kazuyoshi Kato +Andrey Epifanov diff --git a/vendor/github.com/containerd/containerd/Makefile b/vendor/github.com/containerd/containerd/Makefile index 02a8aa20..905dfeda 100644 --- a/vendor/github.com/containerd/containerd/Makefile +++ b/vendor/github.com/containerd/containerd/Makefile @@ -234,6 +234,11 @@ bin/cni-bridge-fp: integration/failpoint/cmd/cni-bridge-fp FORCE @echo "$(WHALE) $@" @$(GO) build ${GO_BUILD_FLAGS} -o $@ ./integration/failpoint/cmd/cni-bridge-fp +# build runc-fp as runc wrapper to support failpoint, only used by integration test +bin/runc-fp: integration/failpoint/cmd/runc-fp FORCE + @echo "$(WHALE) $@" + @$(GO) build ${GO_BUILD_FLAGS} -o $@ ./integration/failpoint/cmd/runc-fp + benchmark: ## run benchmarks tests @echo "$(WHALE) $@" @$(GO) test ${TESTFLAGS} -bench . -run Benchmark -test.root diff --git a/vendor/github.com/containerd/containerd/RELEASES.md b/vendor/github.com/containerd/containerd/RELEASES.md index 24e8e893..26855b25 100644 --- a/vendor/github.com/containerd/containerd/RELEASES.md +++ b/vendor/github.com/containerd/containerd/RELEASES.md @@ -386,7 +386,6 @@ The deprecated properties in [`config.toml`](./docs/cri/config.md) are shown in | Property Group | Property | Deprecation release | Target release for removal | Recommendation | |----------------------------------------------------------------------|------------------------------|---------------------|----------------------------|-------------------------------------------------| |`[plugins."io.containerd.grpc.v1.cri"]` | `systemd_cgroup` | containerd v1.3 | containerd v2.0 | Use `SystemdCgroup` in runc options (see below) | -|`[plugins."io.containerd.grpc.v1.cri".cni]` | `conf_template` | containerd v1.? | containerd v2.0 | Create a CNI config in `/etc/cni/net.d` | |`[plugins."io.containerd.grpc.v1.cri".containerd]` | `untrusted_workload_runtime` | containerd v1.2 | containerd v2.0 | Create `untrusted` runtime in `runtimes` | |`[plugins."io.containerd.grpc.v1.cri".containerd]` | `default_runtime` | containerd v1.3 | containerd v2.0 | Use `default_runtime_name` | |`[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.*]` | `runtime_engine` | containerd v1.3 | containerd v2.0 | Use runtime v2 | @@ -396,6 +395,11 @@ The deprecated properties in [`config.toml`](./docs/cri/config.md) are shown in |`[plugins."io.containerd.grpc.v1.cri".registry]` | `configs` | containerd v1.5 | containerd v2.0 | Use [`config_path`](./docs/hosts.md) | |`[plugins."io.containerd.grpc.v1.cri".registry]` | `mirrors` | containerd v1.5 | containerd v2.0 | Use [`config_path`](./docs/hosts.md) | +> **Note** +> +> CNI Config Template (`plugins."io.containerd.grpc.v1.cri".cni.conf_template`) was once deprecated in v1.7.0, +> but its deprecation was cancelled in v1.7.3. +
Example: runc option SystemdCgroup

```toml diff --git a/vendor/github.com/containerd/containerd/Vagrantfile b/vendor/github.com/containerd/containerd/Vagrantfile index 10947b0c..7b7a3ad0 100644 --- a/vendor/github.com/containerd/containerd/Vagrantfile +++ b/vendor/github.com/containerd/containerd/Vagrantfile @@ -78,6 +78,7 @@ Vagrant.configure("2") do |config| libselinux-devel \ lsof \ make \ + strace \ ${INSTALL_PACKAGES} SHELL end @@ -101,7 +102,7 @@ EOF config.vm.provision "install-golang", type: "shell", run: "once" do |sh| sh.upload_path = "/tmp/vagrant-install-golang" sh.env = { - 'GO_VERSION': ENV['GO_VERSION'] || "1.20.2", + 'GO_VERSION': ENV['GO_VERSION'] || "1.20.10", } sh.inline = <<~SHELL #!/usr/bin/env bash diff --git a/vendor/github.com/containerd/containerd/cio/io.go b/vendor/github.com/containerd/containerd/cio/io.go index f85ec174..11724f8d 100644 --- a/vendor/github.com/containerd/containerd/cio/io.go +++ b/vendor/github.com/containerd/containerd/cio/io.go @@ -166,6 +166,15 @@ func NewAttach(opts ...Opt) Attach { if fifos == nil { return nil, fmt.Errorf("cannot attach, missing fifos") } + if streams.Stdin == nil { + fifos.Stdin = "" + } + if streams.Stdout == nil { + fifos.Stdout = "" + } + if streams.Stderr == nil { + fifos.Stderr = "" + } return copyIO(fifos, streams) } } diff --git a/vendor/github.com/containerd/containerd/cio/io_unix.go b/vendor/github.com/containerd/containerd/cio/io_unix.go index fddbe58c..9dc21dcc 100644 --- a/vendor/github.com/containerd/containerd/cio/io_unix.go +++ b/vendor/github.com/containerd/containerd/cio/io_unix.go @@ -98,7 +98,14 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) { config: fifos.Config, wg: wg, closers: append(pipes.closers(), fifos), - cancel: cancel, + cancel: func() { + cancel() + for _, c := range pipes.closers() { + if c != nil { + c.Close() + } + } + }, }, nil } diff --git a/vendor/github.com/containerd/containerd/container.go b/vendor/github.com/containerd/containerd/container.go index 2890bfdc..7863b742 100644 --- a/vendor/github.com/containerd/containerd/container.go +++ b/vendor/github.com/containerd/containerd/container.go @@ -281,6 +281,7 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N }) } } + request.RuntimePath = info.RuntimePath if info.Options != nil { any, err := typeurl.MarshalAny(info.Options) if err != nil { diff --git a/vendor/github.com/containerd/containerd/content/content.go b/vendor/github.com/containerd/containerd/content/content.go index b7230d88..8eb1a169 100644 --- a/vendor/github.com/containerd/containerd/content/content.go +++ b/vendor/github.com/containerd/containerd/content/content.go @@ -87,9 +87,6 @@ type IngestManager interface { } // Info holds content specific information -// -// TODO(stevvooe): Consider a very different name for this struct. Info is way -// to general. It also reads very weird in certain context, like pluralization. type Info struct { Digest digest.Digest Size int64 @@ -111,12 +108,17 @@ type Status struct { // WalkFunc defines the callback for a blob walk. type WalkFunc func(Info) error -// Manager provides methods for inspecting, listing and removing content. -type Manager interface { +// InfoProvider provides info for content inspection. +type InfoProvider interface { // Info will return metadata about content available in the content store. // // If the content is not present, ErrNotFound will be returned. Info(ctx context.Context, dgst digest.Digest) (Info, error) +} + +// Manager provides methods for inspecting, listing and removing content. +type Manager interface { + InfoProvider // Update updates mutable information related to content. // If one or more fieldpaths are provided, only those diff --git a/vendor/github.com/containerd/containerd/diff.go b/vendor/github.com/containerd/containerd/diff.go index 6e3ae2f5..0b1d44ed 100644 --- a/vendor/github.com/containerd/containerd/diff.go +++ b/vendor/github.com/containerd/containerd/diff.go @@ -17,19 +17,9 @@ package containerd import ( - "context" - diffapi "github.com/containerd/containerd/api/services/diff/v1" - "github.com/containerd/containerd/api/types" "github.com/containerd/containerd/diff" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/pkg/epoch" - "github.com/containerd/containerd/protobuf" - ptypes "github.com/containerd/containerd/protobuf/types" - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "google.golang.org/protobuf/types/known/timestamppb" + "github.com/containerd/containerd/diff/proxy" ) // DiffService handles the computation and application of diffs @@ -41,96 +31,5 @@ type DiffService interface { // NewDiffServiceFromClient returns a new diff service which communicates // over a GRPC connection. func NewDiffServiceFromClient(client diffapi.DiffClient) DiffService { - return &diffRemote{ - client: client, - } -} - -type diffRemote struct { - client diffapi.DiffClient -} - -func (r *diffRemote) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (ocispec.Descriptor, error) { - var config diff.ApplyConfig - for _, opt := range opts { - if err := opt(ctx, desc, &config); err != nil { - return ocispec.Descriptor{}, err - } - } - - payloads := make(map[string]*ptypes.Any) - for k, v := range config.ProcessorPayloads { - payloads[k] = protobuf.FromAny(v) - } - - req := &diffapi.ApplyRequest{ - Diff: fromDescriptor(desc), - Mounts: fromMounts(mounts), - Payloads: payloads, - } - resp, err := r.client.Apply(ctx, req) - if err != nil { - return ocispec.Descriptor{}, errdefs.FromGRPC(err) - } - return toDescriptor(resp.Applied), nil -} - -func (r *diffRemote) Compare(ctx context.Context, a, b []mount.Mount, opts ...diff.Opt) (ocispec.Descriptor, error) { - var config diff.Config - for _, opt := range opts { - if err := opt(&config); err != nil { - return ocispec.Descriptor{}, err - } - } - if tm := epoch.FromContext(ctx); tm != nil && config.SourceDateEpoch == nil { - config.SourceDateEpoch = tm - } - var sourceDateEpoch *timestamppb.Timestamp - if config.SourceDateEpoch != nil { - sourceDateEpoch = timestamppb.New(*config.SourceDateEpoch) - } - req := &diffapi.DiffRequest{ - Left: fromMounts(a), - Right: fromMounts(b), - MediaType: config.MediaType, - Ref: config.Reference, - Labels: config.Labels, - SourceDateEpoch: sourceDateEpoch, - } - resp, err := r.client.Diff(ctx, req) - if err != nil { - return ocispec.Descriptor{}, errdefs.FromGRPC(err) - } - return toDescriptor(resp.Diff), nil -} - -func toDescriptor(d *types.Descriptor) ocispec.Descriptor { - return ocispec.Descriptor{ - MediaType: d.MediaType, - Digest: digest.Digest(d.Digest), - Size: d.Size, - Annotations: d.Annotations, - } -} - -func fromDescriptor(d ocispec.Descriptor) *types.Descriptor { - return &types.Descriptor{ - MediaType: d.MediaType, - Digest: d.Digest.String(), - Size: d.Size, - Annotations: d.Annotations, - } -} - -func fromMounts(mounts []mount.Mount) []*types.Mount { - apiMounts := make([]*types.Mount, len(mounts)) - for i, m := range mounts { - apiMounts[i] = &types.Mount{ - Type: m.Type, - Source: m.Source, - Target: m.Target, - Options: m.Options, - } - } - return apiMounts + return proxy.NewDiffApplier(client).(DiffService) } diff --git a/vendor/github.com/containerd/containerd/diff/proxy/differ.go b/vendor/github.com/containerd/containerd/diff/proxy/differ.go new file mode 100644 index 00000000..8ed8bdf4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/proxy/differ.go @@ -0,0 +1,131 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proxy + +import ( + "context" + + diffapi "github.com/containerd/containerd/api/services/diff/v1" + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/pkg/epoch" + "github.com/containerd/containerd/protobuf" + ptypes "github.com/containerd/containerd/protobuf/types" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "google.golang.org/protobuf/types/known/timestamppb" +) + +// NewDiffApplier returns a new comparer and applier which communicates +// over a GRPC connection. +func NewDiffApplier(client diffapi.DiffClient) interface{} { + return &diffRemote{ + client: client, + } +} + +type diffRemote struct { + client diffapi.DiffClient +} + +func (r *diffRemote) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (ocispec.Descriptor, error) { + var config diff.ApplyConfig + for _, opt := range opts { + if err := opt(ctx, desc, &config); err != nil { + return ocispec.Descriptor{}, err + } + } + + payloads := make(map[string]*ptypes.Any) + for k, v := range config.ProcessorPayloads { + payloads[k] = protobuf.FromAny(v) + } + + req := &diffapi.ApplyRequest{ + Diff: fromDescriptor(desc), + Mounts: fromMounts(mounts), + Payloads: payloads, + } + resp, err := r.client.Apply(ctx, req) + if err != nil { + return ocispec.Descriptor{}, errdefs.FromGRPC(err) + } + return toDescriptor(resp.Applied), nil +} + +func (r *diffRemote) Compare(ctx context.Context, a, b []mount.Mount, opts ...diff.Opt) (ocispec.Descriptor, error) { + var config diff.Config + for _, opt := range opts { + if err := opt(&config); err != nil { + return ocispec.Descriptor{}, err + } + } + if tm := epoch.FromContext(ctx); tm != nil && config.SourceDateEpoch == nil { + config.SourceDateEpoch = tm + } + var sourceDateEpoch *timestamppb.Timestamp + if config.SourceDateEpoch != nil { + sourceDateEpoch = timestamppb.New(*config.SourceDateEpoch) + } + req := &diffapi.DiffRequest{ + Left: fromMounts(a), + Right: fromMounts(b), + MediaType: config.MediaType, + Ref: config.Reference, + Labels: config.Labels, + SourceDateEpoch: sourceDateEpoch, + } + resp, err := r.client.Diff(ctx, req) + if err != nil { + return ocispec.Descriptor{}, errdefs.FromGRPC(err) + } + return toDescriptor(resp.Diff), nil +} + +func toDescriptor(d *types.Descriptor) ocispec.Descriptor { + return ocispec.Descriptor{ + MediaType: d.MediaType, + Digest: digest.Digest(d.Digest), + Size: d.Size, + Annotations: d.Annotations, + } +} + +func fromDescriptor(d ocispec.Descriptor) *types.Descriptor { + return &types.Descriptor{ + MediaType: d.MediaType, + Digest: d.Digest.String(), + Size: d.Size, + Annotations: d.Annotations, + } +} + +func fromMounts(mounts []mount.Mount) []*types.Mount { + apiMounts := make([]*types.Mount, len(mounts)) + for i, m := range mounts { + apiMounts[i] = &types.Mount{ + Type: m.Type, + Source: m.Source, + Target: m.Target, + Options: m.Options, + } + } + return apiMounts +} diff --git a/vendor/github.com/containerd/containerd/image.go b/vendor/github.com/containerd/containerd/image.go index 40dc3ff6..46854fc4 100644 --- a/vendor/github.com/containerd/containerd/image.go +++ b/vendor/github.com/containerd/containerd/image.go @@ -437,7 +437,15 @@ func (i *image) getLayers(ctx context.Context, platform platforms.MatchComparer, if err != nil { return nil, fmt.Errorf("failed to resolve rootfs: %w", err) } - if len(diffIDs) != len(manifest.Layers) { + + // parse out the image layers from oci artifact layers + imageLayers := []ocispec.Descriptor{} + for _, ociLayer := range manifest.Layers { + if images.IsLayerType(ociLayer.MediaType) { + imageLayers = append(imageLayers, ociLayer) + } + } + if len(diffIDs) != len(imageLayers) { return nil, errors.New("mismatched image rootfs and manifest layers") } layers := make([]rootfs.Layer, len(diffIDs)) @@ -447,7 +455,7 @@ func (i *image) getLayers(ctx context.Context, platform platforms.MatchComparer, MediaType: ocispec.MediaTypeImageLayer, Digest: diffIDs[i], } - layers[i].Blob = manifest.Layers[i] + layers[i].Blob = imageLayers[i] } return layers, nil } diff --git a/vendor/github.com/containerd/containerd/images/archive/exporter.go b/vendor/github.com/containerd/containerd/images/archive/exporter.go index 87858a95..b1430226 100644 --- a/vendor/github.com/containerd/containerd/images/archive/exporter.go +++ b/vendor/github.com/containerd/containerd/images/archive/exporter.go @@ -188,7 +188,7 @@ func Export(ctx context.Context, store content.Provider, writer io.Writer, opts } name := desc.Annotations[images.AnnotationImageName] - if name != "" && !eo.skipDockerManifest { + if name != "" { mt.names = append(mt.names, name) } case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: @@ -227,26 +227,24 @@ func Export(ctx context.Context, store content.Provider, writer io.Writer, opts records = append(records, r...) } - if !eo.skipDockerManifest { - if len(manifests) >= 1 { - if len(manifests) > 1 { - sort.SliceStable(manifests, func(i, j int) bool { - if manifests[i].Platform == nil { - return false - } - if manifests[j].Platform == nil { - return true - } - return eo.platform.Less(*manifests[i].Platform, *manifests[j].Platform) - }) - } - d = manifests[0].Digest - dManifests[d] = &exportManifest{ - manifest: manifests[0], - } - } else if eo.platform != nil { - return fmt.Errorf("no manifest found for platform: %w", errdefs.ErrNotFound) + if len(manifests) >= 1 { + if len(manifests) > 1 { + sort.SliceStable(manifests, func(i, j int) bool { + if manifests[i].Platform == nil { + return false + } + if manifests[j].Platform == nil { + return true + } + return eo.platform.Less(*manifests[i].Platform, *manifests[j].Platform) + }) + } + d = manifests[0].Digest + dManifests[d] = &exportManifest{ + manifest: manifests[0], } + } else if eo.platform != nil { + return fmt.Errorf("no manifest found for platform: %w", errdefs.ErrNotFound) } resolvedIndex[desc.Digest] = d } @@ -262,7 +260,7 @@ func Export(ctx context.Context, store content.Provider, writer io.Writer, opts } } - if len(dManifests) > 0 { + if !eo.skipDockerManifest && len(dManifests) > 0 { tr, err := manifestsRecord(ctx, store, dManifests) if err != nil { return fmt.Errorf("unable to create manifests file: %w", err) diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go deleted file mode 100644 index 92cfcd91..00000000 --- a/vendor/github.com/containerd/containerd/log/context.go +++ /dev/null @@ -1,72 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package log - -import ( - "context" - - "github.com/sirupsen/logrus" -) - -var ( - // G is an alias for GetLogger. - // - // We may want to define this locally to a package to get package tagged log - // messages. - G = GetLogger - - // L is an alias for the standard logger. - L = logrus.NewEntry(logrus.StandardLogger()) -) - -type ( - loggerKey struct{} - - // Fields type to pass to `WithFields`, alias from `logrus`. - Fields = logrus.Fields -) - -const ( - // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to - // ensure the formatted time is always the same number of characters. - RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - - // TextFormat represents the text logging format - TextFormat = "text" - - // JSONFormat represents the JSON logging format - JSONFormat = "json" -) - -// WithLogger returns a new context with the provided logger. Use in -// combination with logger.WithField(s) for great effect. -func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { - e := logger.WithContext(ctx) - return context.WithValue(ctx, loggerKey{}, e) -} - -// GetLogger retrieves the current logger from the context. If no logger is -// available, the default logger is returned. -func GetLogger(ctx context.Context) *logrus.Entry { - logger := ctx.Value(loggerKey{}) - - if logger == nil { - return L.WithContext(ctx) - } - - return logger.(*logrus.Entry) -} diff --git a/vendor/github.com/containerd/containerd/log/context_deprecated.go b/vendor/github.com/containerd/containerd/log/context_deprecated.go new file mode 100644 index 00000000..9e9e8b49 --- /dev/null +++ b/vendor/github.com/containerd/containerd/log/context_deprecated.go @@ -0,0 +1,149 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package log + +import ( + "context" + + "github.com/containerd/log" +) + +// G is a shorthand for [GetLogger]. +// +// Deprecated: use [log.G]. +var G = log.G + +// L is an alias for the standard logger. +// +// Deprecated: use [log.L]. +var L = log.L + +// Fields type to pass to "WithFields". +// +// Deprecated: use [log.Fields]. +type Fields = log.Fields + +// Entry is a logging entry. +// +// Deprecated: use [log.Entry]. +type Entry = log.Entry + +// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using +// zeros to ensure the formatted time is always the same number of +// characters. +// +// Deprecated: use [log.RFC3339NanoFixed]. +const RFC3339NanoFixed = log.RFC3339NanoFixed + +// Level is a logging level. +// +// Deprecated: use [log.Level]. +type Level = log.Level + +// Supported log levels. +const ( + // TraceLevel level. + // + // Deprecated: use [log.TraceLevel]. + TraceLevel Level = log.TraceLevel + + // DebugLevel level. + // + // Deprecated: use [log.DebugLevel]. + DebugLevel Level = log.DebugLevel + + // InfoLevel level. + // + // Deprecated: use [log.InfoLevel]. + InfoLevel Level = log.InfoLevel + + // WarnLevel level. + // + // Deprecated: use [log.WarnLevel]. + WarnLevel Level = log.WarnLevel + + // ErrorLevel level + // + // Deprecated: use [log.ErrorLevel]. + ErrorLevel Level = log.ErrorLevel + + // FatalLevel level. + // + // Deprecated: use [log.FatalLevel]. + FatalLevel Level = log.FatalLevel + + // PanicLevel level. + // + // Deprecated: use [log.PanicLevel]. + PanicLevel Level = log.PanicLevel +) + +// SetLevel sets log level globally. It returns an error if the given +// level is not supported. +// +// Deprecated: use [log.SetLevel]. +func SetLevel(level string) error { + return log.SetLevel(level) +} + +// GetLevel returns the current log level. +// +// Deprecated: use [log.GetLevel]. +func GetLevel() log.Level { + return log.GetLevel() +} + +// OutputFormat specifies a log output format. +// +// Deprecated: use [log.OutputFormat]. +type OutputFormat = log.OutputFormat + +// Supported log output formats. +const ( + // TextFormat represents the text logging format. + // + // Deprecated: use [log.TextFormat]. + TextFormat log.OutputFormat = "text" + + // JSONFormat represents the JSON logging format. + // + // Deprecated: use [log.JSONFormat]. + JSONFormat log.OutputFormat = "json" +) + +// SetFormat sets the log output format. +// +// Deprecated: use [log.SetFormat]. +func SetFormat(format OutputFormat) error { + return log.SetFormat(format) +} + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +// +// Deprecated: use [log.WithLogger]. +func WithLogger(ctx context.Context, logger *log.Entry) context.Context { + return log.WithLogger(ctx, logger) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +// +// Deprecated: use [log.GetLogger]. +func GetLogger(ctx context.Context) *log.Entry { + return log.GetLogger(ctx) +} diff --git a/vendor/github.com/containerd/containerd/mount/losetup_linux.go b/vendor/github.com/containerd/containerd/mount/losetup_linux.go index 567ebb4e..e3647e95 100644 --- a/vendor/github.com/containerd/containerd/mount/losetup_linux.go +++ b/vendor/github.com/containerd/containerd/mount/losetup_linux.go @@ -89,6 +89,12 @@ func setupLoopDev(backingFile, loopDev string, param LoopParams) (_ *os.File, re return nil, fmt.Errorf("could not set loop fd for device: %s: %w", loopDev, err) } + defer func() { + if retErr != nil { + _ = unix.IoctlSetInt(int(loop.Fd()), unix.LOOP_CLR_FD, 0) + } + }() + // 3. Set Info info := unix.LoopInfo64{} copy(info.File_name[:], backingFile) @@ -100,27 +106,20 @@ func setupLoopDev(backingFile, loopDev string, param LoopParams) (_ *os.File, re info.Flags |= unix.LO_FLAGS_AUTOCLEAR } - if param.Direct { - info.Flags |= unix.LO_FLAGS_DIRECT_IO - } - err = unix.IoctlLoopSetStatus64(int(loop.Fd()), &info) - if err == nil { - return loop, nil + if err != nil { + return nil, fmt.Errorf("failed to set loop device info: %w", err) } + // 4. Set Direct IO if param.Direct { - // Retry w/o direct IO flag in case kernel does not support it. The downside is that - // it will suffer from double cache problem. - info.Flags &= ^(uint32(unix.LO_FLAGS_DIRECT_IO)) - err = unix.IoctlLoopSetStatus64(int(loop.Fd()), &info) - if err == nil { - return loop, nil + err = unix.IoctlSetInt(int(loop.Fd()), unix.LOOP_SET_DIRECT_IO, 1) + if err != nil { + return nil, fmt.Errorf("failed to setup loop with direct: %w", err) } } - _ = unix.IoctlSetInt(int(loop.Fd()), unix.LOOP_CLR_FD, 0) - return nil, fmt.Errorf("failed to set loop device info: %v", err) + return loop, nil } // setupLoop looks for (and possibly creates) a free loop device, and diff --git a/vendor/github.com/containerd/containerd/mount/mount.go b/vendor/github.com/containerd/containerd/mount/mount.go index 21dd0f90..ae7520f9 100644 --- a/vendor/github.com/containerd/containerd/mount/mount.go +++ b/vendor/github.com/containerd/containerd/mount/mount.go @@ -18,6 +18,7 @@ package mount import ( "fmt" + "strings" "github.com/containerd/continuity/fs" ) @@ -67,6 +68,17 @@ func UnmountMounts(mounts []Mount, target string, flags int) error { return nil } +// ReadOnly returns a boolean value indicating whether this mount has the "ro" +// option set. +func (m *Mount) ReadOnly() bool { + for _, option := range m.Options { + if option == "ro" { + return true + } + } + return false +} + // Mount to the provided target path. func (m *Mount) Mount(target string) error { target, err := fs.RootPath(target, m.Target) @@ -75,3 +87,46 @@ func (m *Mount) Mount(target string) error { } return m.mount(target) } + +// readonlyMounts modifies the received mount options +// to make them readonly +func readonlyMounts(mounts []Mount) []Mount { + for i, m := range mounts { + if m.Type == "overlay" { + mounts[i].Options = readonlyOverlay(m.Options) + continue + } + opts := make([]string, 0, len(m.Options)) + for _, opt := range m.Options { + if opt != "rw" && opt != "ro" { // skip `ro` too so we don't append it twice + opts = append(opts, opt) + } + } + opts = append(opts, "ro") + mounts[i].Options = opts + } + return mounts +} + +// readonlyOverlay takes mount options for overlay mounts and makes them readonly by +// removing workdir and upperdir (and appending the upperdir layer to lowerdir) - see: +// https://www.kernel.org/doc/html/latest/filesystems/overlayfs.html#multiple-lower-layers +func readonlyOverlay(opt []string) []string { + out := make([]string, 0, len(opt)) + upper := "" + for _, o := range opt { + if strings.HasPrefix(o, "upperdir=") { + upper = strings.TrimPrefix(o, "upperdir=") + } else if !strings.HasPrefix(o, "workdir=") { + out = append(out, o) + } + } + if upper != "" { + for i, o := range out { + if strings.HasPrefix(o, "lowerdir=") { + out[i] = "lowerdir=" + upper + ":" + strings.TrimPrefix(o, "lowerdir=") + } + } + } + return out +} diff --git a/vendor/github.com/containerd/containerd/mount/mount_linux.go b/vendor/github.com/containerd/containerd/mount/mount_linux.go index 4183333d..b58e249d 100644 --- a/vendor/github.com/containerd/containerd/mount/mount_linux.go +++ b/vendor/github.com/containerd/containerd/mount/mount_linux.go @@ -63,9 +63,6 @@ func (m *Mount) mount(target string) (err error) { } flags, data, losetup := parseMountOptions(options) - if len(data) > pagesize { - return errors.New("mount options is too long") - } // propagation types. const ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE @@ -73,15 +70,27 @@ func (m *Mount) mount(target string) (err error) { // Ensure propagation type change flags aren't included in other calls. oflags := flags &^ ptypes + var loopParams LoopParams + if losetup { + loopParams = LoopParams{ + Readonly: oflags&unix.MS_RDONLY == unix.MS_RDONLY, + Autoclear: true, + } + loopParams.Direct, data = hasDirectIO(data) + } + + dataInStr := strings.Join(data, ",") + if len(dataInStr) > pagesize { + return errors.New("mount options is too long") + } + // In the case of remounting with changed data (data != ""), need to call mount (moby/moby#34077). - if flags&unix.MS_REMOUNT == 0 || data != "" { + if flags&unix.MS_REMOUNT == 0 || dataInStr != "" { // Initial call applying all non-propagation flags for mount // or remount with changed data source := m.Source if losetup { - loFile, err := setupLoop(m.Source, LoopParams{ - Readonly: oflags&unix.MS_RDONLY == unix.MS_RDONLY, - Autoclear: true}) + loFile, err := setupLoop(m.Source, loopParams) if err != nil { return err } @@ -90,7 +99,7 @@ func (m *Mount) mount(target string) (err error) { // Mount the loop device instead source = loFile.Name() } - if err := mountAt(chdir, source, target, m.Type, uintptr(oflags), data); err != nil { + if err := mountAt(chdir, source, target, m.Type, uintptr(oflags), dataInStr); err != nil { return err } } @@ -199,7 +208,7 @@ func UnmountAll(mount string, flags int) error { // parseMountOptions takes fstab style mount options and parses them for // use with a standard mount() syscall -func parseMountOptions(options []string) (int, string, bool) { +func parseMountOptions(options []string) (int, []string, bool) { var ( flag int losetup bool @@ -252,7 +261,16 @@ func parseMountOptions(options []string) (int, string, bool) { data = append(data, o) } } - return flag, strings.Join(data, ","), losetup + return flag, data, losetup +} + +func hasDirectIO(opts []string) (bool, []string) { + for idx, opt := range opts { + if opt == "direct-io" { + return true, append(opts[:idx], opts[idx+1:]...) + } + } + return false, opts } // compactLowerdirOption updates overlay lowdir option and returns the common diff --git a/vendor/github.com/containerd/containerd/mount/mount_windows.go b/vendor/github.com/containerd/containerd/mount/mount_windows.go index b73fe364..7c24fa60 100644 --- a/vendor/github.com/containerd/containerd/mount/mount_windows.go +++ b/vendor/github.com/containerd/containerd/mount/mount_windows.go @@ -17,17 +17,29 @@ package mount import ( + "context" "encoding/json" + "errors" "fmt" "os" "path/filepath" "strings" + "github.com/Microsoft/go-winio/pkg/bindfilter" "github.com/Microsoft/hcsshim" + "github.com/containerd/containerd/log" + "golang.org/x/sys/windows" +) + +const sourceStreamName = "containerd.io-source" + +var ( + // ErrNotImplementOnWindows is returned when an action is not implemented for windows + ErrNotImplementOnWindows = errors.New("not implemented under windows") ) // Mount to the provided target. -func (m *Mount) mount(target string) error { +func (m *Mount) mount(target string) (retErr error) { if m.Type != "windows-layer" { return fmt.Errorf("invalid windows mount type: '%s'", m.Type) } @@ -43,25 +55,60 @@ func (m *Mount) mount(target string) error { HomeDir: home, } - if err = hcsshim.ActivateLayer(di, layerID); err != nil { + if err := hcsshim.ActivateLayer(di, layerID); err != nil { return fmt.Errorf("failed to activate layer %s: %w", m.Source, err) } + defer func() { + if retErr != nil { + if layerErr := hcsshim.DeactivateLayer(di, layerID); layerErr != nil { + log.G(context.TODO()).WithError(layerErr).Error("failed to deactivate layer during mount failure cleanup") + } + } + }() - if err = hcsshim.PrepareLayer(di, layerID, parentLayerPaths); err != nil { + if err := hcsshim.PrepareLayer(di, layerID, parentLayerPaths); err != nil { return fmt.Errorf("failed to prepare layer %s: %w", m.Source, err) } - // We can link the layer mount path to the given target. It is an UNC path, and it needs - // a trailing backslash. - mountPath, err := hcsshim.GetLayerMountPath(di, layerID) + defer func() { + if retErr != nil { + if layerErr := hcsshim.UnprepareLayer(di, layerID); layerErr != nil { + log.G(context.TODO()).WithError(layerErr).Error("failed to unprepare layer during mount failure cleanup") + } + } + }() + + volume, err := hcsshim.GetLayerMountPath(di, layerID) if err != nil { - return fmt.Errorf("failed to get layer mount path for %s: %w", m.Source, err) + return fmt.Errorf("failed to get volume path for layer %s: %w", m.Source, err) + } + + if len(parentLayerPaths) == 0 { + // this is a base layer. It gets mounted without going through WCIFS. We need to mount the Files + // folder, not the actual source, or the client may inadvertently remove metadata files. + volume = filepath.Join(volume, "Files") + if _, err := os.Stat(volume); err != nil { + return fmt.Errorf("no Files folder in layer %s", layerID) + } } - mountPath = mountPath + `\` + if err := bindfilter.ApplyFileBinding(target, volume, m.ReadOnly()); err != nil { + return fmt.Errorf("failed to set volume mount path for layer %s: %w", m.Source, err) + } + defer func() { + if retErr != nil { + if bindErr := bindfilter.RemoveFileBinding(target); bindErr != nil { + log.G(context.TODO()).WithError(bindErr).Error("failed to remove binding during mount failure cleanup") + } + } + }() - if err = os.Symlink(mountPath, target); err != nil { - return fmt.Errorf("failed to link mount to target %s: %w", target, err) + // Add an Alternate Data Stream to record the layer source. + // See https://docs.microsoft.com/en-au/archive/blogs/askcore/alternate-data-streams-in-ntfs + // for details on Alternate Data Streams. + if err := os.WriteFile(filepath.Clean(target)+":"+sourceStreamName, []byte(m.Source), 0666); err != nil { + return fmt.Errorf("failed to record source for layer %s: %w", m.Source, err) } + return nil } @@ -85,25 +132,55 @@ func (m *Mount) GetParentPaths() ([]string, error) { // Unmount the mount at the provided path func Unmount(mount string, flags int) error { - var ( - home, layerID = filepath.Split(mount) - di = hcsshim.DriverInfo{ - HomeDir: home, + mount = filepath.Clean(mount) + adsFile := mount + ":" + sourceStreamName + var layerPath string + + if _, err := os.Lstat(adsFile); err == nil { + layerPathb, err := os.ReadFile(mount + ":" + sourceStreamName) + if err != nil { + return fmt.Errorf("failed to retrieve source for layer %s: %w", mount, err) } - ) - - if err := hcsshim.UnprepareLayer(di, layerID); err != nil { - return fmt.Errorf("failed to unprepare layer %s: %w", mount, err) + layerPath = string(layerPathb) } - if err := hcsshim.DeactivateLayer(di, layerID); err != nil { - return fmt.Errorf("failed to deactivate layer %s: %w", mount, err) + + if err := bindfilter.RemoveFileBinding(mount); err != nil { + if errors.Is(err, windows.ERROR_INVALID_PARAMETER) || errors.Is(err, windows.ERROR_NOT_FOUND) { + // not a mount point + return nil + } + return fmt.Errorf("removing mount: %w", err) } + if layerPath != "" { + var ( + home, layerID = filepath.Split(layerPath) + di = hcsshim.DriverInfo{ + HomeDir: home, + } + ) + + if err := hcsshim.UnprepareLayer(di, layerID); err != nil { + return fmt.Errorf("failed to unprepare layer %s: %w", mount, err) + } + + if err := hcsshim.DeactivateLayer(di, layerID); err != nil { + return fmt.Errorf("failed to deactivate layer %s: %w", mount, err) + } + } return nil } // UnmountAll unmounts from the provided path func UnmountAll(mount string, flags int) error { + if mount == "" { + // This isn't an error, per the EINVAL handling in the Linux version + return nil + } + if _, err := os.Stat(mount); os.IsNotExist(err) { + return nil + } + return Unmount(mount, flags) } diff --git a/vendor/github.com/containerd/containerd/mount/temp.go b/vendor/github.com/containerd/containerd/mount/temp.go index 349c2404..83143521 100644 --- a/vendor/github.com/containerd/containerd/mount/temp.go +++ b/vendor/github.com/containerd/containerd/mount/temp.go @@ -67,6 +67,13 @@ func WithTempMount(ctx context.Context, mounts []Mount, f func(root string) erro return nil } +// WithReadonlyTempMount mounts the provided mounts to a temp dir as readonly, +// and pass the temp dir to f. The mounts are valid during the call to the f. +// Finally we will unmount and remove the temp dir regardless of the result of f. +func WithReadonlyTempMount(ctx context.Context, mounts []Mount, f func(root string) error) (err error) { + return WithTempMount(ctx, readonlyMounts(mounts), f) +} + func getTempDir() string { if xdg := os.Getenv("XDG_RUNTIME_DIR"); xdg != "" { return xdg diff --git a/vendor/github.com/containerd/containerd/oci/spec.go b/vendor/github.com/containerd/containerd/oci/spec.go index bee3b44d..9c3d7d3b 100644 --- a/vendor/github.com/containerd/containerd/oci/spec.go +++ b/vendor/github.com/containerd/containerd/oci/spec.go @@ -18,6 +18,8 @@ package oci import ( "context" + "encoding/json" + "os" "path/filepath" "runtime" @@ -43,6 +45,22 @@ var ( // to be created without the "issues" with go vendoring and package imports type Spec = specs.Spec +const ConfigFilename = "config.json" + +// ReadSpec deserializes JSON into an OCI runtime Spec from a given path. +func ReadSpec(path string) (*Spec, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + var s Spec + if err := json.NewDecoder(f).Decode(&s); err != nil { + return nil, err + } + return &s, nil +} + // GenerateSpec will generate a default spec from the provided image // for use as a containerd container func GenerateSpec(ctx context.Context, client Client, c *containers.Container, opts ...SpecOpts) (*Spec, error) { diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts.go b/vendor/github.com/containerd/containerd/oci/spec_opts.go index 00942995..f3882898 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts.go @@ -607,7 +607,9 @@ func WithUser(userstr string) SpecOpts { // The `Username` field on the runtime spec is marked by Platform as only for Windows, and in this case it // *is* being set on a Windows host at least, but will be used as a temporary holding spot until the guest // can use the string to perform these same operations to grab the uid:gid inside. - if s.Windows != nil && s.Linux != nil { + // + // Mounts are not supported on Darwin, so using the same workaround. + if (s.Windows != nil && s.Linux != nil) || runtime.GOOS == "darwin" { s.Process.User.Username = userstr return nil } @@ -681,8 +683,11 @@ func WithUser(userstr string) SpecOpts { return err } - mounts = tryReadonlyMounts(mounts) - return mount.WithTempMount(ctx, mounts, f) + // Use a read-only mount when trying to get user/group information + // from the container's rootfs. Since the option does read operation + // only, we append ReadOnly mount option to prevent the Linux kernel + // from syncing whole filesystem in umount syscall. + return mount.WithReadonlyTempMount(ctx, mounts, f) default: return fmt.Errorf("invalid USER value %s", userstr) } @@ -742,8 +747,11 @@ func WithUserID(uid uint32) SpecOpts { return err } - mounts = tryReadonlyMounts(mounts) - return mount.WithTempMount(ctx, mounts, setUser) + // Use a read-only mount when trying to get user/group information + // from the container's rootfs. Since the option does read operation + // only, we append ReadOnly mount option to prevent the Linux kernel + // from syncing whole filesystem in umount syscall. + return mount.WithReadonlyTempMount(ctx, mounts, setUser) } } @@ -787,8 +795,11 @@ func WithUsername(username string) SpecOpts { return err } - mounts = tryReadonlyMounts(mounts) - return mount.WithTempMount(ctx, mounts, setUser) + // Use a read-only mount when trying to get user/group information + // from the container's rootfs. Since the option does read operation + // only, we append ReadOnly mount option to prevent the Linux kernel + // from syncing whole filesystem in umount syscall. + return mount.WithReadonlyTempMount(ctx, mounts, setUser) } else if s.Windows != nil { s.Process.User.Username = username } else { @@ -866,8 +877,11 @@ func WithAdditionalGIDs(userstr string) SpecOpts { return err } - mounts = tryReadonlyMounts(mounts) - return mount.WithTempMount(ctx, mounts, setAdditionalGids) + // Use a read-only mount when trying to get user/group information + // from the container's rootfs. Since the option does read operation + // only, we append ReadOnly mount option to prevent the Linux kernel + // from syncing whole filesystem in umount syscall. + return mount.WithReadonlyTempMount(ctx, mounts, setAdditionalGids) } } @@ -928,8 +942,11 @@ func WithAppendAdditionalGroups(groups ...string) SpecOpts { return err } - mounts = tryReadonlyMounts(mounts) - return mount.WithTempMount(ctx, mounts, setAdditionalGids) + // Use a read-only mount when trying to get user/group information + // from the container's rootfs. Since the option does read operation + // only, we append ReadOnly mount option to prevent the Linux kernel + // from syncing whole filesystem in umount syscall. + return mount.WithReadonlyTempMount(ctx, mounts, setAdditionalGids) } } @@ -1424,24 +1441,6 @@ func WithDevShmSize(kb int64) SpecOpts { } } -// tryReadonlyMounts is used by the options which are trying to get user/group -// information from container's rootfs. Since the option does read operation -// only, this helper will append ReadOnly mount option to prevent linux kernel -// from syncing whole filesystem in umount syscall. -// -// TODO(fuweid): -// -// Currently, it only works for overlayfs. I think we can apply it to other -// kinds of filesystem. Maybe we can return `ro` option by `snapshotter.Mount` -// API, when the caller passes that experimental annotation -// `containerd.io/snapshot/readonly.mount` something like that. -func tryReadonlyMounts(mounts []mount.Mount) []mount.Mount { - if len(mounts) == 1 && mounts[0].Type == "overlay" { - mounts[0].Options = append(mounts[0].Options, "ro") - } - return mounts -} - // WithWindowsDevice adds a device exposed to a Windows (WCOW or LCOW) Container func WithWindowsDevice(idType, id string) SpecOpts { return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { diff --git a/vendor/github.com/containerd/containerd/pkg/transfer/streaming/stream.go b/vendor/github.com/containerd/containerd/pkg/transfer/streaming/stream.go index 7352fda8..c859cf42 100644 --- a/vendor/github.com/containerd/containerd/pkg/transfer/streaming/stream.go +++ b/vendor/github.com/containerd/containerd/pkg/transfer/streaming/stream.go @@ -164,7 +164,7 @@ func ReceiveStream(ctx context.Context, stream streaming.Stream) io.Reader { } any, err := stream.Recv() if err != nil { - if errors.Is(err, io.EOF) { + if errors.Is(err, io.EOF) || errors.Is(err, context.Canceled) { err = nil } else { err = fmt.Errorf("received failed: %w", err) diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go index fd575651..d10fa901 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -22,6 +22,7 @@ import ( "strconv" "strings" + "github.com/Microsoft/hcsshim/osversion" specs "github.com/opencontainers/image-spec/specs-go/v1" "golang.org/x/sys/windows" ) @@ -50,15 +51,36 @@ func (m windowsmatcher) Match(p specs.Platform) bool { match := m.defaultMatcher.Match(p) if match && m.OS == "windows" { - if strings.HasPrefix(p.OSVersion, m.osVersionPrefix) { + // HPC containers do not have OS version filled + if p.OSVersion == "" { return true } - return p.OSVersion == "" + + hostOsVersion := GetOsVersion(m.osVersionPrefix) + ctrOsVersion := GetOsVersion(p.OSVersion) + return osversion.CheckHostAndContainerCompat(hostOsVersion, ctrOsVersion) } return match } +func GetOsVersion(osVersionPrefix string) osversion.OSVersion { + parts := strings.Split(osVersionPrefix, ".") + if len(parts) < 3 { + return osversion.OSVersion{} + } + + majorVersion, _ := strconv.Atoi(parts[0]) + minorVersion, _ := strconv.Atoi(parts[1]) + buildNumber, _ := strconv.Atoi(parts[2]) + + return osversion.OSVersion{ + MajorVersion: uint8(majorVersion), + MinorVersion: uint8(minorVersion), + Build: uint16(buildNumber), + } +} + // Less sorts matched platforms in front of other platforms. // For matched platforms, it puts platforms with larger revision // number in front. diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go index 8dcde7db..56613b07 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -196,6 +196,10 @@ func Parse(specifier string) (specs.Platform, error) { p.Variant = cpuVariant() } + if p.OS == "windows" { + p.OSVersion = GetWindowsOsVersion() + } + return p, nil } @@ -218,6 +222,10 @@ func Parse(specifier string) (specs.Platform, error) { p.Variant = "" } + if p.OS == "windows" { + p.OSVersion = GetWindowsOsVersion() + } + return p, nil case 3: // we have a fully specified variant, this is rare @@ -227,6 +235,10 @@ func Parse(specifier string) (specs.Platform, error) { p.Variant = "v8" } + if p.OS == "windows" { + p.OSVersion = GetWindowsOsVersion() + } + return p, nil } diff --git a/vendor/github.com/containerd/containerd/platforms/platforms_other.go b/vendor/github.com/containerd/containerd/platforms/platforms_other.go index 03f4dcd9..59beeb3d 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms_other.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms_other.go @@ -28,3 +28,7 @@ func newDefaultMatcher(platform specs.Platform) Matcher { Platform: Normalize(platform), } } + +func GetWindowsOsVersion() string { + return "" +} diff --git a/vendor/github.com/containerd/containerd/platforms/platforms_windows.go b/vendor/github.com/containerd/containerd/platforms/platforms_windows.go index 950e2a2d..733d18dd 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms_windows.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms_windows.go @@ -17,7 +17,10 @@ package platforms import ( + "fmt" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sys/windows" ) // NewMatcher returns a Windows matcher that will match on osVersionPrefix if @@ -32,3 +35,8 @@ func newDefaultMatcher(platform specs.Platform) Matcher { }, } } + +func GetWindowsOsVersion() string { + major, minor, build := windows.RtlGetNtVersionNumbers() + return fmt.Sprintf("%d.%d.%d", major, minor, build) +} diff --git a/vendor/github.com/containerd/containerd/plugin/context.go b/vendor/github.com/containerd/containerd/plugin/context.go index dcb533c8..cf916789 100644 --- a/vendor/github.com/containerd/containerd/plugin/context.go +++ b/vendor/github.com/containerd/containerd/plugin/context.go @@ -28,12 +28,13 @@ import ( // InitContext is used for plugin initialization type InitContext struct { - Context context.Context - Root string - State string - Config interface{} - Address string - TTRPCAddress string + Context context.Context + Root string + State string + Config interface{} + Address string + TTRPCAddress string + RegisterReadiness func() func() // deprecated: will be removed in 2.0, use plugin.EventType Events *exchange.Exchange diff --git a/vendor/github.com/containerd/containerd/pull.go b/vendor/github.com/containerd/containerd/pull.go index 5d96c8cc..c35278aa 100644 --- a/vendor/github.com/containerd/containerd/pull.go +++ b/vendor/github.com/containerd/containerd/pull.go @@ -34,7 +34,8 @@ import ( ) const ( - pullSpanPrefix = "pull" + pullSpanPrefix = "pull" + convertedDockerSchema1LabelKey = "io.containerd.image/converted-docker-schema1" ) // Pull downloads the provided content into containerd's content store @@ -189,9 +190,10 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim var ( handler images.Handler - isConvertible bool - converterFunc func(context.Context, ocispec.Descriptor) (ocispec.Descriptor, error) - limiter *semaphore.Weighted + isConvertible bool + originalSchema1Digest string + converterFunc func(context.Context, ocispec.Descriptor) (ocispec.Descriptor, error) + limiter *semaphore.Weighted ) if desc.MediaType == images.MediaTypeDockerSchema1Manifest && rCtx.ConvertSchema1 { @@ -204,6 +206,8 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim converterFunc = func(ctx context.Context, _ ocispec.Descriptor) (ocispec.Descriptor, error) { return schema1Converter.Convert(ctx) } + + originalSchema1Digest = desc.Digest.String() } else { // Get all the children for a descriptor childrenHandler := images.ChildrenHandler(store) @@ -270,6 +274,13 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim } } + if originalSchema1Digest != "" { + if rCtx.Labels == nil { + rCtx.Labels = make(map[string]string) + } + rCtx.Labels[convertedDockerSchema1LabelKey] = originalSchema1Digest + } + return images.Image{ Name: name, Target: desc, diff --git a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go index 8fc82314..9b3663cd 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go @@ -186,15 +186,15 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R return err } - if username != "" && secret != "" { - common := auth.TokenOptions{ - Username: username, - Secret: secret, - } - - a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, common) - return nil + if username == "" || secret == "" { + return fmt.Errorf("%w: no basic auth credentials", ErrInvalidAuthorization) } + + a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, auth.TokenOptions{ + Username: username, + Secret: secret, + }) + return nil } } return fmt.Errorf("failed to find supported auth scheme: %w", errdefs.ErrNotImplemented) diff --git a/vendor/github.com/containerd/containerd/remotes/docker/config/hosts.go b/vendor/github.com/containerd/containerd/remotes/docker/config/hosts.go index aa8ea959..6c719ded 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/config/hosts.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/config/hosts.go @@ -101,12 +101,22 @@ func ConfigureHosts(ctx context.Context, options HostOptions) docker.RegistryHos hosts[len(hosts)-1].host = "registry-1.docker.io" } else if docker.IsLocalhost(host) { hosts[len(hosts)-1].host = host - if options.DefaultScheme == "" || options.DefaultScheme == "http" { - hosts[len(hosts)-1].scheme = "http" + if options.DefaultScheme == "" { + _, port, _ := net.SplitHostPort(host) + if port == "" || port == "443" { + // If port is default or 443, only use https + hosts[len(hosts)-1].scheme = "https" + } else { + // HTTP fallback logic will be used when protocol is ambiguous + hosts[len(hosts)-1].scheme = "http" + } - // Skipping TLS verification for localhost - var skipVerify = true - hosts[len(hosts)-1].skipVerify = &skipVerify + // When port is 80, protocol is not ambiguous + if port != "80" { + // Skipping TLS verification for localhost + var skipVerify = true + hosts[len(hosts)-1].skipVerify = &skipVerify + } } else { hosts[len(hosts)-1].scheme = options.DefaultScheme } @@ -122,8 +132,13 @@ func ConfigureHosts(ctx context.Context, options HostOptions) docker.RegistryHos hosts[len(hosts)-1].capabilities = docker.HostCapabilityPull | docker.HostCapabilityResolve | docker.HostCapabilityPush } + // tlsConfigured indicates that TLS was configured and HTTP endpoints should + // attempt to use the TLS configuration before falling back to HTTP + var tlsConfigured bool + var defaultTLSConfig *tls.Config if options.DefaultTLS != nil { + tlsConfigured = true defaultTLSConfig = options.DefaultTLS } else { defaultTLSConfig = &tls.Config{} @@ -161,14 +176,11 @@ func ConfigureHosts(ctx context.Context, options HostOptions) docker.RegistryHos rhosts := make([]docker.RegistryHost, len(hosts)) for i, host := range hosts { - - rhosts[i].Scheme = host.scheme - rhosts[i].Host = host.host - rhosts[i].Path = host.path - rhosts[i].Capabilities = host.capabilities - rhosts[i].Header = host.header + // Allow setting for each host as well + explicitTLS := tlsConfigured if host.caCerts != nil || host.clientPairs != nil || host.skipVerify != nil { + explicitTLS = true tr := defaultTransport.Clone() tlsConfig := tr.TLSClientConfig if host.skipVerify != nil { @@ -232,6 +244,27 @@ func ConfigureHosts(ctx context.Context, options HostOptions) docker.RegistryHos rhosts[i].Client = client rhosts[i].Authorizer = authorizer } + + // When TLS has been configured for the operation or host and + // the protocol from the port number is ambiguous, use the + // docker.HTTPFallback roundtripper to catch TLS errors and re-attempt the + // request as http. This allows preference for https when configured but + // also catches TLS errors early enough in the request to avoid sending + // the request twice or consuming the request body. + if host.scheme == "http" && explicitTLS { + _, port, _ := net.SplitHostPort(host.host) + if port != "" && port != "80" { + log.G(ctx).WithField("host", host.host).Info("host will try HTTPS first since it is configured for HTTP with a TLS configuration, consider changing host to HTTPS or removing unused TLS configuration") + host.scheme = "https" + rhosts[i].Client.Transport = docker.HTTPFallback{RoundTripper: rhosts[i].Client.Transport} + } + } + + rhosts[i].Scheme = host.scheme + rhosts[i].Host = host.host + rhosts[i].Path = host.path + rhosts[i].Capabilities = host.capabilities + rhosts[i].Header = host.header } return rhosts, nil diff --git a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go index ef6e8056..678e17e1 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go @@ -23,6 +23,7 @@ import ( "io" "net/http" "net/url" + "path" "strings" "sync" "time" @@ -137,6 +138,9 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str if exists { p.tracker.SetStatus(ref, Status{ Committed: true, + PushStatus: PushStatus{ + Exists: true, + }, Status: content.Status{ Ref: ref, Total: desc.Size, @@ -164,6 +168,7 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str // Start upload request req = p.request(host, http.MethodPost, "blobs", "uploads/") + mountedFrom := "" var resp *http.Response if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" { preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo) @@ -180,11 +185,14 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str return nil, err } - if resp.StatusCode == http.StatusUnauthorized { + switch resp.StatusCode { + case http.StatusUnauthorized: log.G(ctx).Debugf("failed to mount from repository %s", fromRepo) resp.Body.Close() resp = nil + case http.StatusCreated: + mountedFrom = path.Join(p.refspec.Hostname(), fromRepo) } } @@ -204,6 +212,9 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str case http.StatusCreated: p.tracker.SetStatus(ref, Status{ Committed: true, + PushStatus: PushStatus{ + MountedFrom: mountedFrom, + }, Status: content.Status{ Ref: ref, Total: desc.Size, @@ -238,13 +249,16 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str } if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme { - lhost.Scheme = lurl.Scheme lhost.Host = lurl.Host - log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination") - // Strip authorizer if change to host or scheme - lhost.Authorizer = nil + // Check if different than what was requested, accounting for fallback in the transport layer + requested := resp.Request.URL + if requested.Host != lhost.Host || requested.Scheme != lhost.Scheme { + // Strip authorizer if change to host or scheme + lhost.Authorizer = nil + log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination, authorizer removed") + } } } q := lurl.Query() diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go index 48737413..c280e091 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go @@ -18,6 +18,7 @@ package docker import ( "context" + "crypto/tls" "errors" "fmt" "io" @@ -98,25 +99,30 @@ type ResolverOptions struct { Tracker StatusTracker // Authorizer is used to authorize registry requests - // Deprecated: use Hosts + // + // Deprecated: use Hosts. Authorizer Authorizer // Credentials provides username and secret given a host. // If username is empty but a secret is given, that secret // is interpreted as a long lived token. - // Deprecated: use Hosts + // + // Deprecated: use Hosts. Credentials func(string) (string, string, error) // Host provides the hostname given a namespace. - // Deprecated: use Hosts + // + // Deprecated: use Hosts. Host func(string) (string, error) // PlainHTTP specifies to use plain http and not https - // Deprecated: use Hosts + // + // Deprecated: use Hosts. PlainHTTP bool // Client is the http client to used when making registry requests - // Deprecated: use Hosts + // + // Deprecated: use Hosts. Client *http.Client } @@ -143,6 +149,9 @@ func NewResolver(options ResolverOptions) remotes.Resolver { if options.Headers == nil { options.Headers = make(http.Header) + } else { + // make a copy of the headers to avoid race due to concurrent map write + options.Headers = options.Headers.Clone() } if _, ok := options.Headers["User-Agent"]; !ok { options.Headers.Set("User-Agent", "containerd/"+version.Version) @@ -538,9 +547,10 @@ func (r *request) do(ctx context.Context) (*http.Response, error) { if err != nil { return nil, err } - req.Header = http.Header{} // headers need to be copied to avoid concurrent map access - for k, v := range r.header { - req.Header[k] = v + if r.header == nil { + req.Header = http.Header{} + } else { + req.Header = r.header.Clone() // headers need to be copied to avoid concurrent map access } if r.body != nil { body, err := r.body() @@ -664,7 +674,7 @@ func requestFields(req *http.Request) log.Fields { } } - return log.Fields(fields) + return fields } func responseFields(resp *http.Response) log.Fields { @@ -682,7 +692,7 @@ func responseFields(resp *http.Response) log.Fields { } } - return log.Fields(fields) + return fields } // IsLocalhost checks if the registry host is local. @@ -698,3 +708,27 @@ func IsLocalhost(host string) bool { ip := net.ParseIP(host) return ip.IsLoopback() } + +// HTTPFallback is an http.RoundTripper which allows fallback from https to http +// for registry endpoints with configurations for both http and TLS, such as +// defaulted localhost endpoints. +type HTTPFallback struct { + http.RoundTripper +} + +func (f HTTPFallback) RoundTrip(r *http.Request) (*http.Response, error) { + resp, err := f.RoundTripper.RoundTrip(r) + var tlsErr tls.RecordHeaderError + if errors.As(err, &tlsErr) && string(tlsErr.RecordHeader[:]) == "HTTP/" { + // server gave HTTP response to HTTPS client + plainHTTPUrl := *r.URL + plainHTTPUrl.Scheme = "http" + + plainHTTPRequest := *r + plainHTTPRequest.URL = &plainHTTPUrl + + return f.RoundTripper.RoundTrip(&plainHTTPRequest) + } + + return resp, err +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/status.go b/vendor/github.com/containerd/containerd/remotes/docker/status.go index 1f7b278a..1a922772 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/status.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/status.go @@ -36,6 +36,17 @@ type Status struct { // UploadUUID is used by the Docker registry to reference blob uploads UploadUUID string + + // PushStatus contains status related to push. + PushStatus +} + +type PushStatus struct { + // MountedFrom is the source content was cross-repo mounted from (empty if no cross-repo mount was performed). + MountedFrom string + + // Exists indicates whether content already exists in the repository and wasn't uploaded. + Exists bool } // StatusTracker to track status of operations diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go index 31de5551..0ff39179 100644 --- a/vendor/github.com/containerd/containerd/remotes/handlers.go +++ b/vendor/github.com/containerd/containerd/remotes/handlers.go @@ -204,8 +204,9 @@ func push(ctx context.Context, provider content.Provider, pusher Pusher, desc oc // Base handlers can be provided which will be called before any push specific // handlers. // -// If the passed in content.Provider is also a content.Manager then this will -// also annotate the distribution sources in the manager. +// If the passed in content.Provider is also a content.InfoProvider (such as +// content.Manager) then this will also annotate the distribution sources using +// labels prefixed with "containerd.io/distribution.source". func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Provider, limiter *semaphore.Weighted, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { var m sync.Mutex @@ -234,7 +235,7 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, st platformFilterhandler := images.FilterPlatforms(images.ChildrenHandler(store), platform) var handler images.Handler - if m, ok := store.(content.Manager); ok { + if m, ok := store.(content.InfoProvider); ok { annotateHandler := annotateDistributionSourceHandler(platformFilterhandler, m) handler = images.Handlers(annotateHandler, filterHandler, pushHandler) } else { @@ -344,14 +345,15 @@ func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) // annotateDistributionSourceHandler add distribution source label into // annotation of config or blob descriptor. -func annotateDistributionSourceHandler(f images.HandlerFunc, manager content.Manager) images.HandlerFunc { +func annotateDistributionSourceHandler(f images.HandlerFunc, provider content.InfoProvider) images.HandlerFunc { return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { children, err := f(ctx, desc) if err != nil { return nil, err } - // only add distribution source for the config or blob data descriptor + // Distribution source is only used for config or blob but may be inherited from + // a manifest or manifest list switch desc.MediaType { case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: @@ -359,12 +361,28 @@ func annotateDistributionSourceHandler(f images.HandlerFunc, manager content.Man return children, nil } + // parentInfo can be used to inherit info for non-existent blobs + var parentInfo *content.Info + for i := range children { child := children[i] - info, err := manager.Info(ctx, child.Digest) + info, err := provider.Info(ctx, child.Digest) if err != nil { - return nil, err + if !errdefs.IsNotFound(err) { + return nil, err + } + if parentInfo == nil { + pi, err := provider.Info(ctx, desc.Digest) + if err != nil { + return nil, err + } + parentInfo = &pi + } + // Blob may not exist locally, annotate with parent labels for cross repo + // mount or fetch. Parent sources may apply to all children since most + // registries enforce that children exist before the manifests. + info = *parentInfo } for k, v := range info.Labels { diff --git a/vendor/github.com/containerd/containerd/task.go b/vendor/github.com/containerd/containerd/task.go index be35611e..9667a1cf 100644 --- a/vendor/github.com/containerd/containerd/task.go +++ b/vendor/github.com/containerd/containerd/task.go @@ -140,6 +140,11 @@ type TaskInfo struct { RootFS []mount.Mount // Options hold runtime specific settings for task creation Options interface{} + // RuntimePath is an absolute path that can be used to overwrite path + // to a shim runtime binary. + RuntimePath string + + // runtime is the runtime name for the container, and cannot be changed. runtime string } @@ -321,7 +326,16 @@ func (t *task) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (*ExitStat return nil, fmt.Errorf("task must be stopped before deletion: %s: %w", status.Status, errdefs.ErrFailedPrecondition) } if t.io != nil { - t.io.Close() + // io.Wait locks for restored tasks on Windows unless we call + // io.Close first (https://github.com/containerd/containerd/issues/5621) + // in other cases, preserve the contract and let IO finish before closing + if t.client.runtime == fmt.Sprintf("%s.%s", plugin.RuntimePlugin, "windows") { + t.io.Close() + } + // io.Cancel is used to cancel the io goroutine while it is in + // fifo-opening state. It does not stop the pipes since these + // should be closed on the shim's side, otherwise we might lose + // data from the container! t.io.Cancel() t.io.Wait() } diff --git a/vendor/github.com/containerd/containerd/task_opts.go b/vendor/github.com/containerd/containerd/task_opts.go index e372ca44..da269016 100644 --- a/vendor/github.com/containerd/containerd/task_opts.go +++ b/vendor/github.com/containerd/containerd/task_opts.go @@ -49,7 +49,7 @@ func WithRootFS(mounts []mount.Mount) NewTaskOpts { // instead of resolving it from runtime name. func WithRuntimePath(absRuntimePath string) NewTaskOpts { return func(ctx context.Context, client *Client, info *TaskInfo) error { - info.runtime = absRuntimePath + info.RuntimePath = absRuntimePath return nil } } diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go index c9f9dd55..0f9044c4 100644 --- a/vendor/github.com/containerd/containerd/version/version.go +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -23,7 +23,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.7.0+unknown" + Version = "1.7.8+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/containerd/continuity/fs/copy.go b/vendor/github.com/containerd/continuity/fs/copy.go index 6982a761..af3abdd4 100644 --- a/vendor/github.com/containerd/continuity/fs/copy.go +++ b/vendor/github.com/containerd/continuity/fs/copy.go @@ -18,21 +18,13 @@ package fs import ( "fmt" - "io/ioutil" + "io" "os" "path/filepath" - "sync" "github.com/sirupsen/logrus" ) -var bufferPool = &sync.Pool{ - New: func() interface{} { - buffer := make([]byte, 32*1024) - return &buffer - }, -} - // XAttrErrorHandler transform a non-nil xattr error. // Return nil to ignore an error. // xattrKey can be empty for listxattr operation. @@ -111,7 +103,7 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er } } - fis, err := ioutil.ReadDir(src) + entries, err := os.ReadDir(src) if err != nil { return fmt.Errorf("failed to read %s: %w", src, err) } @@ -124,18 +116,23 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er return fmt.Errorf("failed to copy xattrs: %w", err) } - for _, fi := range fis { - source := filepath.Join(src, fi.Name()) - target := filepath.Join(dst, fi.Name()) + for _, entry := range entries { + source := filepath.Join(src, entry.Name()) + target := filepath.Join(dst, entry.Name()) + + fileInfo, err := entry.Info() + if err != nil { + return fmt.Errorf("failed to get file info for %s: %w", entry.Name(), err) + } switch { - case fi.IsDir(): + case entry.IsDir(): if err := copyDirectory(target, source, inodes, o); err != nil { return err } continue - case (fi.Mode() & os.ModeType) == 0: - link, err := getLinkSource(target, fi, inodes) + case (fileInfo.Mode() & os.ModeType) == 0: + link, err := getLinkSource(target, fileInfo, inodes) if err != nil { return fmt.Errorf("failed to get hardlink: %w", err) } @@ -146,7 +143,7 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er } else if err := CopyFile(target, source); err != nil { return fmt.Errorf("failed to copy files: %w", err) } - case (fi.Mode() & os.ModeSymlink) == os.ModeSymlink: + case (fileInfo.Mode() & os.ModeSymlink) == os.ModeSymlink: link, err := os.Readlink(source) if err != nil { return fmt.Errorf("failed to read link: %s: %w", source, err) @@ -154,18 +151,18 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er if err := os.Symlink(link, target); err != nil { return fmt.Errorf("failed to create symlink: %s: %w", target, err) } - case (fi.Mode() & os.ModeDevice) == os.ModeDevice, - (fi.Mode() & os.ModeNamedPipe) == os.ModeNamedPipe, - (fi.Mode() & os.ModeSocket) == os.ModeSocket: - if err := copyIrregular(target, fi); err != nil { + case (fileInfo.Mode() & os.ModeDevice) == os.ModeDevice, + (fileInfo.Mode() & os.ModeNamedPipe) == os.ModeNamedPipe, + (fileInfo.Mode() & os.ModeSocket) == os.ModeSocket: + if err := copyIrregular(target, fileInfo); err != nil { return fmt.Errorf("failed to create irregular file: %w", err) } default: - logrus.Warnf("unsupported mode: %s: %s", source, fi.Mode()) + logrus.Warnf("unsupported mode: %s: %s", source, fileInfo.Mode()) continue } - if err := copyFileInfo(fi, source, target); err != nil { + if err := copyFileInfo(fileInfo, source, target); err != nil { return fmt.Errorf("failed to copy file info: %w", err) } @@ -180,6 +177,10 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er // CopyFile copies the source file to the target. // The most efficient means of copying is used for the platform. func CopyFile(target, source string) error { + return copyFile(target, source) +} + +func openAndCopyFile(target, source string) error { src, err := os.Open(source) if err != nil { return fmt.Errorf("failed to open source %s: %w", source, err) @@ -191,5 +192,6 @@ func CopyFile(target, source string) error { } defer tgt.Close() - return copyFileContent(tgt, src) + _, err = io.Copy(tgt, src) + return err } diff --git a/vendor/github.com/containerd/continuity/fs/copy_darwin.go b/vendor/github.com/containerd/continuity/fs/copy_darwin.go new file mode 100644 index 00000000..97fc2e8e --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/copy_darwin.go @@ -0,0 +1,35 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "errors" + "fmt" + + "golang.org/x/sys/unix" +) + +func copyFile(target, source string) error { + if err := unix.Clonefile(source, target, unix.CLONE_NOFOLLOW); err != nil { + if !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.EXDEV) { + return fmt.Errorf("clonefile failed: %w", err) + } + + return openAndCopyFile(target, source) + } + return nil +} diff --git a/vendor/github.com/containerd/continuity/fs/copy_linux.go b/vendor/github.com/containerd/continuity/fs/copy_linux.go index 1906e5e0..48ac3fbd 100644 --- a/vendor/github.com/containerd/continuity/fs/copy_linux.go +++ b/vendor/github.com/containerd/continuity/fs/copy_linux.go @@ -18,7 +18,6 @@ package fs import ( "fmt" - "io" "os" "syscall" @@ -62,51 +61,6 @@ func copyFileInfo(fi os.FileInfo, src, name string) error { return nil } -const maxSSizeT = int64(^uint(0) >> 1) - -func copyFileContent(dst, src *os.File) error { - st, err := src.Stat() - if err != nil { - return fmt.Errorf("unable to stat source: %w", err) - } - - size := st.Size() - first := true - srcFd := int(src.Fd()) - dstFd := int(dst.Fd()) - - for size > 0 { - // Ensure that we are never trying to copy more than SSIZE_MAX at a - // time and at the same time avoids overflows when the file is larger - // than 4GB on 32-bit systems. - var copySize int - if size > maxSSizeT { - copySize = int(maxSSizeT) - } else { - copySize = int(size) - } - n, err := unix.CopyFileRange(srcFd, nil, dstFd, nil, copySize, 0) - if err != nil { - if (err != unix.ENOSYS && err != unix.EXDEV) || !first { - return fmt.Errorf("copy file range failed: %w", err) - } - - buf := bufferPool.Get().(*[]byte) - _, err = io.CopyBuffer(dst, src, *buf) - bufferPool.Put(buf) - if err != nil { - return fmt.Errorf("userspace copy failed: %w", err) - } - return nil - } - - first = false - size -= int64(n) - } - - return nil -} - func copyXAttrs(dst, src string, excludes map[string]struct{}, errorHandler XAttrErrorHandler) error { xattrKeys, err := sysx.LListxattr(src) if err != nil { diff --git a/vendor/github.com/containerd/continuity/fs/copy_nondarwin.go b/vendor/github.com/containerd/continuity/fs/copy_nondarwin.go new file mode 100644 index 00000000..275b64c0 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/copy_nondarwin.go @@ -0,0 +1,22 @@ +//go:build !darwin +// +build !darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +var copyFile = openAndCopyFile diff --git a/vendor/github.com/containerd/continuity/fs/copy_unix.go b/vendor/github.com/containerd/continuity/fs/copy_unix.go index 0e68ba9e..2e25914d 100644 --- a/vendor/github.com/containerd/continuity/fs/copy_unix.go +++ b/vendor/github.com/containerd/continuity/fs/copy_unix.go @@ -1,5 +1,5 @@ -//go:build darwin || freebsd || openbsd || netbsd || solaris -// +build darwin freebsd openbsd netbsd solaris +//go:build darwin || freebsd || openbsd || netbsd || dragonfly || solaris +// +build darwin freebsd openbsd netbsd dragonfly solaris /* Copyright The containerd Authors. @@ -21,8 +21,8 @@ package fs import ( "fmt" - "io" "os" + "runtime" "syscall" "github.com/containerd/continuity/sysx" @@ -60,17 +60,13 @@ func copyFileInfo(fi os.FileInfo, src, name string) error { return nil } -func copyFileContent(dst, src *os.File) error { - buf := bufferPool.Get().(*[]byte) - _, err := io.CopyBuffer(dst, src, *buf) - bufferPool.Put(buf) - - return err -} - func copyXAttrs(dst, src string, excludes map[string]struct{}, errorHandler XAttrErrorHandler) error { xattrKeys, err := sysx.LListxattr(src) if err != nil { + if os.IsPermission(err) && runtime.GOOS == "darwin" { + // On darwin, character devices do not permit listing xattrs + return nil + } e := fmt.Errorf("failed to list xattrs on %s: %w", src, err) if errorHandler != nil { e = errorHandler(dst, src, "", e) diff --git a/vendor/github.com/containerd/continuity/fs/copy_windows.go b/vendor/github.com/containerd/continuity/fs/copy_windows.go index 4dad9441..1fad4c3a 100644 --- a/vendor/github.com/containerd/continuity/fs/copy_windows.go +++ b/vendor/github.com/containerd/continuity/fs/copy_windows.go @@ -19,7 +19,6 @@ package fs import ( "errors" "fmt" - "io" "os" winio "github.com/Microsoft/go-winio" @@ -49,7 +48,6 @@ func copyFileInfo(fi os.FileInfo, src, name string) error { secInfo, err := windows.GetNamedSecurityInfo( src, windows.SE_FILE_OBJECT, windows.OWNER_SECURITY_INFORMATION|windows.DACL_SECURITY_INFORMATION) - if err != nil { return err } @@ -68,19 +66,11 @@ func copyFileInfo(fi os.FileInfo, src, name string) error { name, windows.SE_FILE_OBJECT, windows.OWNER_SECURITY_INFORMATION|windows.DACL_SECURITY_INFORMATION, sid, nil, dacl, nil); err != nil { - return err } return nil } -func copyFileContent(dst, src *os.File) error { - buf := bufferPool.Get().(*[]byte) - _, err := io.CopyBuffer(dst, src, *buf) - bufferPool.Put(buf) - return err -} - func copyXAttrs(dst, src string, excludes map[string]struct{}, errorHandler XAttrErrorHandler) error { return nil } diff --git a/vendor/github.com/containerd/continuity/fs/diff.go b/vendor/github.com/containerd/continuity/fs/diff.go index 3cd4eee6..d2c3c568 100644 --- a/vendor/github.com/containerd/continuity/fs/diff.go +++ b/vendor/github.com/containerd/continuity/fs/diff.go @@ -80,12 +80,13 @@ type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error // // The change callback is called by the order of path names and // should be appliable in that order. -// Due to this apply ordering, the following is true -// - Removed directory trees only create a single change for the root -// directory removed. Remaining changes are implied. -// - A directory which is modified to become a file will not have -// delete entries for sub-path items, their removal is implied -// by the removal of the parent directory. +// +// Due to this apply ordering, the following is true +// - Removed directory trees only create a single change for the root +// directory removed. Remaining changes are implied. +// - A directory which is modified to become a file will not have +// delete entries for sub-path items, their removal is implied +// by the removal of the parent directory. // // Opaque directories will not be treated specially and each file // removed from the base directory will show up as a removal. diff --git a/vendor/github.com/containerd/continuity/fs/dtype_linux.go b/vendor/github.com/containerd/continuity/fs/dtype_linux.go index a8eab1db..9f55e798 100644 --- a/vendor/github.com/containerd/continuity/fs/dtype_linux.go +++ b/vendor/github.com/containerd/continuity/fs/dtype_linux.go @@ -21,14 +21,13 @@ package fs import ( "fmt" - "io/ioutil" "os" "syscall" "unsafe" ) func locateDummyIfEmpty(path string) (string, error) { - children, err := ioutil.ReadDir(path) + children, err := os.ReadDir(path) if err != nil { return "", err } diff --git a/vendor/github.com/containerd/continuity/fs/du_unix.go b/vendor/github.com/containerd/continuity/fs/du_unix.go index bf33c42d..51a08a1d 100644 --- a/vendor/github.com/containerd/continuity/fs/du_unix.go +++ b/vendor/github.com/containerd/continuity/fs/du_unix.go @@ -28,10 +28,11 @@ import ( // blocksUnitSize is the unit used by `st_blocks` in `stat` in bytes. // See https://man7.org/linux/man-pages/man2/stat.2.html -// st_blocks -// This field indicates the number of blocks allocated to the -// file, in 512-byte units. (This may be smaller than -// st_size/512 when the file has holes.) +// +// st_blocks +// This field indicates the number of blocks allocated to the +// file, in 512-byte units. (This may be smaller than +// st_size/512 when the file has holes.) const blocksUnitSize = 512 type inode struct { @@ -48,7 +49,6 @@ func newInode(stat *syscall.Stat_t) inode { } func diskUsage(ctx context.Context, roots ...string) (Usage, error) { - var ( size int64 inodes = map[inode]struct{}{} // expensive! diff --git a/vendor/github.com/containerd/continuity/fs/du_windows.go b/vendor/github.com/containerd/continuity/fs/du_windows.go index 08fb2833..ea721f82 100644 --- a/vendor/github.com/containerd/continuity/fs/du_windows.go +++ b/vendor/github.com/containerd/continuity/fs/du_windows.go @@ -26,9 +26,7 @@ import ( ) func diskUsage(ctx context.Context, roots ...string) (Usage, error) { - var ( - size int64 - ) + var size int64 // TODO(stevvooe): Support inodes (or equivalent) for windows. @@ -57,9 +55,7 @@ func diskUsage(ctx context.Context, roots ...string) (Usage, error) { } func diffUsage(ctx context.Context, a, b string) (Usage, error) { - var ( - size int64 - ) + var size int64 if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error { if err != nil { diff --git a/vendor/github.com/containerd/continuity/fs/path.go b/vendor/github.com/containerd/continuity/fs/path.go index 97313e2b..ec6e6a2f 100644 --- a/vendor/github.com/containerd/continuity/fs/path.go +++ b/vendor/github.com/containerd/continuity/fs/path.go @@ -25,9 +25,7 @@ import ( "path/filepath" ) -var ( - errTooManyLinks = errors.New("too many links") -) +var errTooManyLinks = errors.New("too many links") type currentPath struct { path string diff --git a/vendor/github.com/containerd/continuity/fs/stat_atim.go b/vendor/github.com/containerd/continuity/fs/stat_atim.go index 996b9c1a..ade7bec6 100644 --- a/vendor/github.com/containerd/continuity/fs/stat_atim.go +++ b/vendor/github.com/containerd/continuity/fs/stat_atim.go @@ -1,5 +1,5 @@ -//go:build linux || openbsd || solaris -// +build linux openbsd solaris +//go:build linux || openbsd || dragonfly || solaris +// +build linux openbsd dragonfly solaris /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/log/.golangci.yml b/vendor/github.com/containerd/log/.golangci.yml new file mode 100644 index 00000000..a695775d --- /dev/null +++ b/vendor/github.com/containerd/log/.golangci.yml @@ -0,0 +1,30 @@ +linters: + enable: + - exportloopref # Checks for pointers to enclosing loop variables + - gofmt + - goimports + - gosec + - ineffassign + - misspell + - nolintlint + - revive + - staticcheck + - tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17 + - unconvert + - unused + - vet + - dupword # Checks for duplicate words in the source code + disable: + - errcheck + +run: + timeout: 5m + skip-dirs: + - api + - cluster + - design + - docs + - docs/man + - releases + - reports + - test # e2e scripts diff --git a/vendor/github.com/containerd/log/LICENSE b/vendor/github.com/containerd/log/LICENSE new file mode 100644 index 00000000..584149b6 --- /dev/null +++ b/vendor/github.com/containerd/log/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/log/README.md b/vendor/github.com/containerd/log/README.md new file mode 100644 index 00000000..00e08498 --- /dev/null +++ b/vendor/github.com/containerd/log/README.md @@ -0,0 +1,17 @@ +# log + +A Go package providing a common logging interface across containerd repositories and a way for clients to use and configure logging in containerd packages. + +This package is not intended to be used as a standalone logging package outside of the containerd ecosystem and is intended as an interface wrapper around a logging implementation. +In the future this package may be replaced with a common go logging interface. + +## Project details + +**log** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. + diff --git a/vendor/github.com/containerd/log/context.go b/vendor/github.com/containerd/log/context.go new file mode 100644 index 00000000..20153066 --- /dev/null +++ b/vendor/github.com/containerd/log/context.go @@ -0,0 +1,182 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package log provides types and functions related to logging, passing +// loggers through a context, and attaching context to the logger. +// +// # Transitional types +// +// This package contains various types that are aliases for types in [logrus]. +// These aliases are intended for transitioning away from hard-coding logrus +// as logging implementation. Consumers of this package are encouraged to use +// the type-aliases from this package instead of directly using their logrus +// equivalent. +// +// The intent is to replace these aliases with locally defined types and +// interfaces once all consumers are no longer directly importing logrus +// types. +// +// IMPORTANT: due to the transitional purpose of this package, it is not +// guaranteed for the full logrus API to be provided in the future. As +// outlined, these aliases are provided as a step to transition away from +// a specific implementation which, as a result, exposes the full logrus API. +// While no decisions have been made on the ultimate design and interface +// provided by this package, we do not expect carrying "less common" features. +package log + +import ( + "context" + "fmt" + + "github.com/sirupsen/logrus" +) + +// G is a shorthand for [GetLogger]. +// +// We may want to define this locally to a package to get package tagged log +// messages. +var G = GetLogger + +// L is an alias for the standard logger. +var L = &Entry{ + Logger: logrus.StandardLogger(), + // Default is three fields plus a little extra room. + Data: make(Fields, 6), +} + +type loggerKey struct{} + +// Fields type to pass to "WithFields". +type Fields = map[string]any + +// Entry is a logging entry. It contains all the fields passed with +// [Entry.WithFields]. It's finally logged when Trace, Debug, Info, Warn, +// Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +// +// Entry is a transitional type, and currently an alias for [logrus.Entry]. +type Entry = logrus.Entry + +// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using +// zeros to ensure the formatted time is always the same number of +// characters. +const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + +// Level is a logging level. +type Level = logrus.Level + +// Supported log levels. +const ( + // TraceLevel level. Designates finer-grained informational events + // than [DebugLevel]. + TraceLevel Level = logrus.TraceLevel + + // DebugLevel level. Usually only enabled when debugging. Very verbose + // logging. + DebugLevel Level = logrus.DebugLevel + + // InfoLevel level. General operational entries about what's going on + // inside the application. + InfoLevel Level = logrus.InfoLevel + + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel Level = logrus.WarnLevel + + // ErrorLevel level. Logs errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel Level = logrus.ErrorLevel + + // FatalLevel level. Logs and then calls "logger.Exit(1)". It exits + // even if the logging level is set to Panic. + FatalLevel Level = logrus.FatalLevel + + // PanicLevel level. This is the highest level of severity. Logs and + // then calls panic with the message passed to Debug, Info, ... + PanicLevel Level = logrus.PanicLevel +) + +// SetLevel sets log level globally. It returns an error if the given +// level is not supported. +// +// level can be one of: +// +// - "trace" ([TraceLevel]) +// - "debug" ([DebugLevel]) +// - "info" ([InfoLevel]) +// - "warn" ([WarnLevel]) +// - "error" ([ErrorLevel]) +// - "fatal" ([FatalLevel]) +// - "panic" ([PanicLevel]) +func SetLevel(level string) error { + lvl, err := logrus.ParseLevel(level) + if err != nil { + return err + } + + L.Logger.SetLevel(lvl) + return nil +} + +// GetLevel returns the current log level. +func GetLevel() Level { + return L.Logger.GetLevel() +} + +// OutputFormat specifies a log output format. +type OutputFormat string + +// Supported log output formats. +const ( + // TextFormat represents the text logging format. + TextFormat OutputFormat = "text" + + // JSONFormat represents the JSON logging format. + JSONFormat OutputFormat = "json" +) + +// SetFormat sets the log output format ([TextFormat] or [JSONFormat]). +func SetFormat(format OutputFormat) error { + switch format { + case TextFormat: + L.Logger.SetFormatter(&logrus.TextFormatter{ + TimestampFormat: RFC3339NanoFixed, + FullTimestamp: true, + }) + return nil + case JSONFormat: + L.Logger.SetFormatter(&logrus.JSONFormatter{ + TimestampFormat: RFC3339NanoFixed, + }) + return nil + default: + return fmt.Errorf("unknown log format: %s", format) + } +} + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +func WithLogger(ctx context.Context, logger *Entry) context.Context { + return context.WithValue(ctx, loggerKey{}, logger.WithContext(ctx)) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +func GetLogger(ctx context.Context) *Entry { + if logger := ctx.Value(loggerKey{}); logger != nil { + return logger.(*Entry) + } + return L.WithContext(ctx) +} diff --git a/vendor/github.com/containerd/ttrpc/.golangci.yml b/vendor/github.com/containerd/ttrpc/.golangci.yml index c8be4980..6462e52f 100644 --- a/vendor/github.com/containerd/ttrpc/.golangci.yml +++ b/vendor/github.com/containerd/ttrpc/.golangci.yml @@ -1,7 +1,5 @@ linters: enable: - - structcheck - - varcheck - staticcheck - unconvert - gofmt diff --git a/vendor/github.com/containerd/ttrpc/Makefile b/vendor/github.com/containerd/ttrpc/Makefile index 47419423..c3a497dc 100644 --- a/vendor/github.com/containerd/ttrpc/Makefile +++ b/vendor/github.com/containerd/ttrpc/Makefile @@ -151,7 +151,7 @@ install-protobuild: coverage: ## generate coverprofiles from the unit tests, except tests that require root @echo "$(WHALE) $@" @rm -f coverage.txt - @$(GO) test -i ${TESTFLAGS} ${TESTPACKAGES} 2> /dev/null + @$(GO) test ${TESTFLAGS} ${TESTPACKAGES} 2> /dev/null @( for pkg in ${PACKAGES}; do \ $(GO) test ${TESTFLAGS} \ -cover \ diff --git a/vendor/github.com/containerd/ttrpc/client.go b/vendor/github.com/containerd/ttrpc/client.go index 0abc7025..4b1e1e70 100644 --- a/vendor/github.com/containerd/ttrpc/client.go +++ b/vendor/github.com/containerd/ttrpc/client.go @@ -214,60 +214,66 @@ func (cs *clientStream) RecvMsg(m interface{}) error { if cs.remoteClosed { return io.EOF } + + var msg *streamMessage select { case <-cs.ctx.Done(): return cs.ctx.Err() - case msg, ok := <-cs.s.recv: - if !ok { + case <-cs.s.recvClose: + // If recv has a pending message, process that first + select { + case msg = <-cs.s.recv: + default: return cs.s.recvErr } + case msg = <-cs.s.recv: + } - if msg.header.Type == messageTypeResponse { - resp := &Response{} - err := proto.Unmarshal(msg.payload[:msg.header.Length], resp) - // return the payload buffer for reuse - cs.c.channel.putmbuf(msg.payload) - if err != nil { - return err - } + if msg.header.Type == messageTypeResponse { + resp := &Response{} + err := proto.Unmarshal(msg.payload[:msg.header.Length], resp) + // return the payload buffer for reuse + cs.c.channel.putmbuf(msg.payload) + if err != nil { + return err + } - if err := cs.c.codec.Unmarshal(resp.Payload, m); err != nil { - return err - } + if err := cs.c.codec.Unmarshal(resp.Payload, m); err != nil { + return err + } - if resp.Status != nil && resp.Status.Code != int32(codes.OK) { - return status.ErrorProto(resp.Status) - } + if resp.Status != nil && resp.Status.Code != int32(codes.OK) { + return status.ErrorProto(resp.Status) + } + + cs.c.deleteStream(cs.s) + cs.remoteClosed = true + return nil + } else if msg.header.Type == messageTypeData { + if !cs.desc.StreamingServer { + cs.c.deleteStream(cs.s) + cs.remoteClosed = true + return fmt.Errorf("received data from non-streaming server: %w", ErrProtocol) + } + if msg.header.Flags&flagRemoteClosed == flagRemoteClosed { cs.c.deleteStream(cs.s) cs.remoteClosed = true - return nil - } else if msg.header.Type == messageTypeData { - if !cs.desc.StreamingServer { - cs.c.deleteStream(cs.s) - cs.remoteClosed = true - return fmt.Errorf("received data from non-streaming server: %w", ErrProtocol) - } - if msg.header.Flags&flagRemoteClosed == flagRemoteClosed { - cs.c.deleteStream(cs.s) - cs.remoteClosed = true - - if msg.header.Flags&flagNoData == flagNoData { - return io.EOF - } - } - - err := cs.c.codec.Unmarshal(msg.payload[:msg.header.Length], m) - cs.c.channel.putmbuf(msg.payload) - if err != nil { - return err + if msg.header.Flags&flagNoData == flagNoData { + return io.EOF } - return nil } - return fmt.Errorf("unexpected %q message received: %w", msg.header.Type, ErrProtocol) + err := cs.c.codec.Unmarshal(msg.payload[:msg.header.Length], m) + cs.c.channel.putmbuf(msg.payload) + if err != nil { + return err + } + return nil } + + return fmt.Errorf("unexpected %q message received: %w", msg.header.Type, ErrProtocol) } // Close closes the ttrpc connection and underlying connection @@ -477,25 +483,30 @@ func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) err } defer c.deleteStream(s) + var msg *streamMessage select { case <-ctx.Done(): return ctx.Err() case <-c.ctx.Done(): return ErrClosed - case msg, ok := <-s.recv: - if !ok { + case <-s.recvClose: + // If recv has a pending message, process that first + select { + case msg = <-s.recv: + default: return s.recvErr } + case msg = <-s.recv: + } - if msg.header.Type == messageTypeResponse { - err = proto.Unmarshal(msg.payload[:msg.header.Length], resp) - } else { - err = fmt.Errorf("unexpected %q message received: %w", msg.header.Type, ErrProtocol) - } + if msg.header.Type == messageTypeResponse { + err = proto.Unmarshal(msg.payload[:msg.header.Length], resp) + } else { + err = fmt.Errorf("unexpected %q message received: %w", msg.header.Type, ErrProtocol) + } - // return the payload buffer for reuse - c.channel.putmbuf(msg.payload) + // return the payload buffer for reuse + c.channel.putmbuf(msg.payload) - return err - } + return err } diff --git a/vendor/github.com/containerd/ttrpc/server.go b/vendor/github.com/containerd/ttrpc/server.go index 2efda2bc..7af59f82 100644 --- a/vendor/github.com/containerd/ttrpc/server.go +++ b/vendor/github.com/containerd/ttrpc/server.go @@ -547,7 +547,7 @@ func (c *serverConn) run(sctx context.Context) { // branch. Basically, it means that we are no longer receiving // requests due to a terminal error. recvErr = nil // connection is now "closing" - if err == io.EOF || err == io.ErrUnexpectedEOF || errors.Is(err, syscall.ECONNRESET) { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, syscall.ECONNRESET) { // The client went away and we should stop processing // requests, so that the client connection is closed return diff --git a/vendor/github.com/containerd/ttrpc/stream.go b/vendor/github.com/containerd/ttrpc/stream.go index 5f264fe6..739a4c96 100644 --- a/vendor/github.com/containerd/ttrpc/stream.go +++ b/vendor/github.com/containerd/ttrpc/stream.go @@ -35,27 +35,26 @@ type stream struct { closeOnce sync.Once recvErr error + recvClose chan struct{} } func newStream(id streamID, send sender) *stream { return &stream{ - id: id, - sender: send, - recv: make(chan *streamMessage, 1), + id: id, + sender: send, + recv: make(chan *streamMessage, 1), + recvClose: make(chan struct{}), } } func (s *stream) closeWithError(err error) error { s.closeOnce.Do(func() { - if s.recv != nil { - close(s.recv) - if err != nil { - s.recvErr = err - } else { - s.recvErr = ErrClosed - } - + if err != nil { + s.recvErr = err + } else { + s.recvErr = ErrClosed } + close(s.recvClose) }) return nil } @@ -65,10 +64,14 @@ func (s *stream) send(mt messageType, flags uint8, b []byte) error { } func (s *stream) receive(ctx context.Context, msg *streamMessage) error { - if s.recvErr != nil { + select { + case <-s.recvClose: return s.recvErr + default: } select { + case <-s.recvClose: + return s.recvErr case s.recv <- msg: return nil case <-ctx.Done(): diff --git a/vendor/github.com/containerd/typeurl/v2/types.go b/vendor/github.com/containerd/typeurl/v2/types.go index 41398f40..8d6665bb 100644 --- a/vendor/github.com/containerd/typeurl/v2/types.go +++ b/vendor/github.com/containerd/typeurl/v2/types.go @@ -194,10 +194,6 @@ func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error { } func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) { - if value == nil { - return nil, nil - } - t, err := getTypeByUrl(typeURL) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go index 489e917e..621725a3 100644 --- a/vendor/github.com/docker/docker/api/types/versions/compare.go +++ b/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -16,11 +16,11 @@ func compare(v1, v2 string) int { otherTab = strings.Split(v2, ".") ) - max := len(currTab) - if len(otherTab) > max { - max = len(otherTab) + maxVer := len(currTab) + if len(otherTab) > maxVer { + maxVer = len(otherTab) } - for i := 0; i < max; i++ { + for i := 0; i < maxVer; i++ { var currInt, otherInt int if len(currTab) > i { diff --git a/vendor/github.com/go-git/go-git/v5/.gitignore b/vendor/github.com/go-git/go-git/v5/.gitignore index 361133d0..b7f2c580 100644 --- a/vendor/github.com/go-git/go-git/v5/.gitignore +++ b/vendor/github.com/go-git/go-git/v5/.gitignore @@ -4,3 +4,4 @@ coverage.txt profile.out .tmp/ .git-dist/ +.vscode diff --git a/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md b/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md index afd4f03b..bbffea52 100644 --- a/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md +++ b/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md @@ -5,229 +5,229 @@ compatibility status with go-git. ## Getting and creating repositories -| Feature | Sub-feature | Status | Notes | Examples | -|---|---|---|---|---| -| `init` | | ✅ | | | -| `init` | `--bare` | ✅ | | | -| `init` | `--template`
`--separate-git-dir`
`--shared` | ❌ | | | -| `clone` | | ✅ | | - [PlainClone](_examples/clone/main.go) | -| `clone` | Authentication:
- none
- access token
- username + password
- ssh | ✅ | | - [clone ssh](_examples/clone/auth/ssh/main.go)
- [clone access token](_examples/clone/auth/basic/access_token/main.go)
- [clone user + password](_examples/clone/auth/basic/username_password/main.go) | -| `clone` | `--progress`
`--single-branch`
`--depth`
`--origin`
`--recurse-submodules` | ✅ | | - [recurse submodules](_examples/clone/main.go)
- [progress](_examples/progress/main.go) | +| Feature | Sub-feature | Status | Notes | Examples | +| ------- | ------------------------------------------------------------------------------------------------------------------ | ------ | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `init` | | ✅ | | | +| `init` | `--bare` | ✅ | | | +| `init` | `--template`
`--separate-git-dir`
`--shared` | ❌ | | | +| `clone` | | ✅ | | - [PlainClone](_examples/clone/main.go) | +| `clone` | Authentication:
- none
- access token
- username + password
- ssh | ✅ | | - [clone ssh](_examples/clone/auth/ssh/main.go)
- [clone access token](_examples/clone/auth/basic/access_token/main.go)
- [clone user + password](_examples/clone/auth/basic/username_password/main.go) | +| `clone` | `--progress`
`--single-branch`
`--depth`
`--origin`
`--recurse-submodules`
`--shared` | ✅ | | - [recurse submodules](_examples/clone/main.go)
- [progress](_examples/progress/main.go) | ## Basic snapshotting -| Feature | Sub-feature | Status | Notes | Examples | -|---|---|---|---|---| -| `add` | | ✅ | Plain add is supported. Any other flags aren't supported | | -| `status` | | ✅ | | | -| `commit` | | ✅ | | - [commit](_examples/commit/main.go) | -| `reset` | | ✅ | | | -| `rm` | | ✅ | | | -| `mv` | | ✅ | | | +| Feature | Sub-feature | Status | Notes | Examples | +| -------- | ----------- | ------ | -------------------------------------------------------- | ------------------------------------ | +| `add` | | ✅ | Plain add is supported. Any other flags aren't supported | | +| `status` | | ✅ | | | +| `commit` | | ✅ | | - [commit](_examples/commit/main.go) | +| `reset` | | ✅ | | | +| `rm` | | ✅ | | | +| `mv` | | ✅ | | | ## Branching and merging -| Feature | Sub-feature | Status | Notes | Examples | -|---|---|---|---|---| -| `branch` | | ✅ | | - [branch](_examples/branch/main.go) | -| `checkout` | | ✅ | Basic usages of checkout are supported. | - [checkout](_examples/checkout/main.go) | -| `merge` | | ❌ | | | -| `mergetool` | | ❌ | | | -| `stash` | | ❌ | | | -| `tag` | | ✅ | | - [tag](_examples/tag/main.go)
- [tag create and push](_examples/tag-create-push/main.go) | +| Feature | Sub-feature | Status | Notes | Examples | +| ----------- | ----------- | ------ | --------------------------------------- | ----------------------------------------------------------------------------------------------- | +| `branch` | | ✅ | | - [branch](_examples/branch/main.go) | +| `checkout` | | ✅ | Basic usages of checkout are supported. | - [checkout](_examples/checkout/main.go) | +| `merge` | | ❌ | | | +| `mergetool` | | ❌ | | | +| `stash` | | ❌ | | | +| `tag` | | ✅ | | - [tag](_examples/tag/main.go)
- [tag create and push](_examples/tag-create-push/main.go) | ## Sharing and updating projects -| Feature | Sub-feature | Status | Notes | Examples | -|---|---|---|---|---| -| `fetch` | | ✅ | | | -| `pull` | | ✅ | Only supports merges where the merge can be resolved as a fast-forward. | - [pull](_examples/pull/main.go) | -| `push` | | ✅ | | - [push](_examples/push/main.go) | -| `remote` | | ✅ | | - [remotes](_examples/remotes/main.go) | -| `submodule` | | ✅ | | - [submodule](_examples/submodule/main.go) | -| `submodule` | deinit | ❌ | | | +| Feature | Sub-feature | Status | Notes | Examples | +| ----------- | ----------- | ------ | ----------------------------------------------------------------------- | ------------------------------------------ | +| `fetch` | | ✅ | | | +| `pull` | | ✅ | Only supports merges where the merge can be resolved as a fast-forward. | - [pull](_examples/pull/main.go) | +| `push` | | ✅ | | - [push](_examples/push/main.go) | +| `remote` | | ✅ | | - [remotes](_examples/remotes/main.go) | +| `submodule` | | ✅ | | - [submodule](_examples/submodule/main.go) | +| `submodule` | deinit | ❌ | | | ## Inspection and comparison -| Feature | Sub-feature | Status | Notes | Examples | -|---|---|---|---|---| -| `show` | | ✅ | | | -| `log` | | ✅ | | - [log](_examples/log/main.go) | -| `shortlog` | | (see log) | | | -| `describe` | | ❌ | | | +| Feature | Sub-feature | Status | Notes | Examples | +| ---------- | ----------- | --------- | ----- | ------------------------------ | +| `show` | | ✅ | | | +| `log` | | ✅ | | - [log](_examples/log/main.go) | +| `shortlog` | | (see log) | | | +| `describe` | | ❌ | | | ## Patching -| Feature | Sub-feature | Status | Notes | Examples | -|---|---|---|---|---| -| `apply` | | ❌ | | | -| `cherry-pick` | | ❌ | | | -| `diff` | | ✅ | Patch object with UnifiedDiff output representation. | | -| `rebase` | | ❌ | | | -| `revert` | | ❌ | | | +| Feature | Sub-feature | Status | Notes | Examples | +| ------------- | ----------- | ------ | ---------------------------------------------------- | -------- | +| `apply` | | ❌ | | | +| `cherry-pick` | | ❌ | | | +| `diff` | | ✅ | Patch object with UnifiedDiff output representation. | | +| `rebase` | | ❌ | | | +| `revert` | | ❌ | | | ## Debugging -| Feature | Sub-feature | Status | Notes | Examples | -|---|---|---|---|---| -| `bisect` | | ❌ | | | -| `blame` | | ✅ | | - [blame](_examples/blame/main.go) | -| `grep` | | ✅ | | | +| Feature | Sub-feature | Status | Notes | Examples | +| -------- | ----------- | ------ | ----- | ---------------------------------- | +| `bisect` | | ❌ | | | +| `blame` | | ✅ | | - [blame](_examples/blame/main.go) | +| `grep` | | ✅ | | | ## Email -| Feature | Sub-feature | Status | Notes | Examples | -|---|---|---|---|---| -| `am` | | ❌ | | | -| `apply` | | ❌ | | | -| `format-patch` | | ❌ | | | -| `send-email` | | ❌ | | | -| `request-pull` | | ❌ | | | +| Feature | Sub-feature | Status | Notes | Examples | +| -------------- | ----------- | ------ | ----- | -------- | +| `am` | | ❌ | | | +| `apply` | | ❌ | | | +| `format-patch` | | ❌ | | | +| `send-email` | | ❌ | | | +| `request-pull` | | ❌ | | | ## External systems -| Feature | Sub-feature | Status | Notes | Examples | -|---|---|---|---|---| -| `svn` | | ❌ | | | -| `fast-import` | | ❌ | | | -| `lfs` | | ❌ | | | +| Feature | Sub-feature | Status | Notes | Examples | +| ------------- | ----------- | ------ | ----- | -------- | +| `svn` | | ❌ | | | +| `fast-import` | | ❌ | | | +| `lfs` | | ❌ | | | ## Administration -| Feature | Sub-feature | Status | Notes | Examples | -|---|---|---|---|---| -| `clean` | | ✅ | | | -| `gc` | | ❌ | | | -| `fsck` | | ❌ | | | -| `reflog` | | ❌ | | | -| `filter-branch` | | ❌ | | | -| `instaweb` | | ❌ | | | -| `archive` | | ❌ | | | -| `bundle` | | ❌ | | | -| `prune` | | ❌ | | | -| `repack` | | ❌ | | | +| Feature | Sub-feature | Status | Notes | Examples | +| --------------- | ----------- | ------ | ----- | -------- | +| `clean` | | ✅ | | | +| `gc` | | ❌ | | | +| `fsck` | | ❌ | | | +| `reflog` | | ❌ | | | +| `filter-branch` | | ❌ | | | +| `instaweb` | | ❌ | | | +| `archive` | | ❌ | | | +| `bundle` | | ❌ | | | +| `prune` | | ❌ | | | +| `repack` | | ❌ | | | ## Server admin -| Feature | Sub-feature | Status | Notes | Examples | -|---|---|---|---|---| -| `daemon` | | ❌ | | | -| `update-server-info` | | ❌ | | | +| Feature | Sub-feature | Status | Notes | Examples | +| -------------------- | ----------- | ------ | ----- | -------- | +| `daemon` | | ❌ | | | +| `update-server-info` | | ❌ | | | ## Advanced -| Feature | Sub-feature | Status | Notes | Examples | -|---|---|---|---|---| -| `notes` | | ❌ | | | -| `replace` | | ❌ | | | -| `worktree` | | ❌ | | | -| `annotate` | | (see blame) | | | +| Feature | Sub-feature | Status | Notes | Examples | +| ---------- | ----------- | ----------- | ----- | -------- | +| `notes` | | ❌ | | | +| `replace` | | ❌ | | | +| `worktree` | | ❌ | | | +| `annotate` | | (see blame) | | | ## GPG -| Feature | Sub-feature | Status | Notes | Examples | -|---|---|---|---|---| -| `git-verify-commit` | | ✅ | | | -| `git-verify-tag` | | ✅ | | | +| Feature | Sub-feature | Status | Notes | Examples | +| ------------------- | ----------- | ------ | ----- | -------- | +| `git-verify-commit` | | ✅ | | | +| `git-verify-tag` | | ✅ | | | ## Plumbing commands -| Feature | Sub-feature | Status | Notes | Examples | -|---|---|---|---|---| -| `cat-file` | | ✅ | | | -| `check-ignore` | | ❌ | | | -| `commit-tree` | | ❌ | | | -| `count-objects` | | ❌ | | | -| `diff-index` | | ❌ | | | -| `for-each-ref` | | ✅ | | | -| `hash-object` | | ✅ | | | -| `ls-files` | | ✅ | | | -| `ls-remote` | | ✅ | | - [ls-remote](_examples/ls-remote/main.go) | -| `merge-base` | `--independent`
`--is-ancestor` | ⚠️ (partial) | Calculates the merge-base only between two commits. | - [merge-base](_examples/merge_base/main.go) | -| `merge-base` | `--fork-point`
`--octopus` | ❌ | | | -| `read-tree` | | ❌ | | | -| `rev-list` | | ✅ | | | -| `rev-parse` | | ❌ | | | -| `show-ref` | | ✅ | | | -| `symbolic-ref` | | ✅ | | | -| `update-index` | | ❌ | | | -| `update-ref` | | ❌ | | | -| `verify-pack` | | ❌ | | | -| `write-tree` | | ❌ | | | +| Feature | Sub-feature | Status | Notes | Examples | +| --------------- | ------------------------------------- | ------------ | --------------------------------------------------- | -------------------------------------------- | +| `cat-file` | | ✅ | | | +| `check-ignore` | | ❌ | | | +| `commit-tree` | | ❌ | | | +| `count-objects` | | ❌ | | | +| `diff-index` | | ❌ | | | +| `for-each-ref` | | ✅ | | | +| `hash-object` | | ✅ | | | +| `ls-files` | | ✅ | | | +| `ls-remote` | | ✅ | | - [ls-remote](_examples/ls-remote/main.go) | +| `merge-base` | `--independent`
`--is-ancestor` | ⚠️ (partial) | Calculates the merge-base only between two commits. | - [merge-base](_examples/merge_base/main.go) | +| `merge-base` | `--fork-point`
`--octopus` | ❌ | | | +| `read-tree` | | ❌ | | | +| `rev-list` | | ✅ | | | +| `rev-parse` | | ❌ | | | +| `show-ref` | | ✅ | | | +| `symbolic-ref` | | ✅ | | | +| `update-index` | | ❌ | | | +| `update-ref` | | ❌ | | | +| `verify-pack` | | ❌ | | | +| `write-tree` | | ❌ | | | ## Indexes and Git Protocols -| Feature | Version | Status | Notes | -|---|---|---|---| -| index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | | -| index | [v2](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ✅ | | -| index | [v3](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | | -| pack-protocol | [v1](https://github.com/git/git/blob/master/Documentation/gitprotocol-pack.txt) | ✅ | | -| pack-protocol | [v2](https://github.com/git/git/blob/master/Documentation/gitprotocol-v2.txt) | ❌ | | -| multi-pack-index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | | -| pack-*.rev files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | | -| pack-*.mtimes files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | | -| cruft packs | | ❌ | | +| Feature | Version | Status | Notes | +| -------------------- | ------------------------------------------------------------------------------- | ------ | ----- | +| index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | | +| index | [v2](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ✅ | | +| index | [v3](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | | +| pack-protocol | [v1](https://github.com/git/git/blob/master/Documentation/gitprotocol-pack.txt) | ✅ | | +| pack-protocol | [v2](https://github.com/git/git/blob/master/Documentation/gitprotocol-v2.txt) | ❌ | | +| multi-pack-index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | | +| pack-\*.rev files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | | +| pack-\*.mtimes files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | | +| cruft packs | | ❌ | | ## Capabilities -| Feature | Status | Notes | -|---|---|---| -| `multi_ack` | ❌ | | -| `multi_ack_detailed` | ❌ | | -| `no-done` | ❌ | | -| `thin-pack` | ❌ | | -| `side-band` | ⚠️ (partial) | | -| `side-band-64k` | ⚠️ (partial) | | -| `ofs-delta` | ✅ | | -| `agent` | ✅ | | -| `object-format` | ❌ | | -| `symref` | ✅ | | -| `shallow` | ✅ | | -| `deepen-since` | ✅ | | -| `deepen-not` | ❌ | | -| `deepen-relative` | ❌ | | -| `no-progress` | ✅ | | -| `include-tag` | ✅ | | -| `report-status` | ✅ | | -| `report-status-v2` | ❌ | | -| `delete-refs` | ✅ | | -| `quiet` | ❌ | | -| `atomic` | ✅ | | -| `push-options` | ✅ | | -| `allow-tip-sha1-in-want` | ✅ | | -| `allow-reachable-sha1-in-want` | ❌ | | -| `push-cert=` | ❌ | | -| `filter` | ❌ | | -| `session-id=` | ❌ | | +| Feature | Status | Notes | +| ------------------------------ | ------------ | ----- | +| `multi_ack` | ❌ | | +| `multi_ack_detailed` | ❌ | | +| `no-done` | ❌ | | +| `thin-pack` | ❌ | | +| `side-band` | ⚠️ (partial) | | +| `side-band-64k` | ⚠️ (partial) | | +| `ofs-delta` | ✅ | | +| `agent` | ✅ | | +| `object-format` | ❌ | | +| `symref` | ✅ | | +| `shallow` | ✅ | | +| `deepen-since` | ✅ | | +| `deepen-not` | ❌ | | +| `deepen-relative` | ❌ | | +| `no-progress` | ✅ | | +| `include-tag` | ✅ | | +| `report-status` | ✅ | | +| `report-status-v2` | ❌ | | +| `delete-refs` | ✅ | | +| `quiet` | ❌ | | +| `atomic` | ✅ | | +| `push-options` | ✅ | | +| `allow-tip-sha1-in-want` | ✅ | | +| `allow-reachable-sha1-in-want` | ❌ | | +| `push-cert=` | ❌ | | +| `filter` | ❌ | | +| `session-id=` | ❌ | | ## Transport Schemes -| Scheme | Status | Notes | Examples | -|---|---|---|---| -| `http(s)://` (dumb) | ❌ | | | -| `http(s)://` (smart) | ✅ | | | -| `git://` | ✅ | | | -| `ssh://` | ✅ | | | -| `file://` | ⚠️ (partial) | Warning: this is not pure Golang. This shells out to the `git` binary. | | -| Custom | ✅ | All existing schemes can be replaced by custom implementations. | - [custom_http](_examples/custom_http/main.go) | +| Scheme | Status | Notes | Examples | +| -------------------- | ------------ | ---------------------------------------------------------------------- | ---------------------------------------------- | +| `http(s)://` (dumb) | ❌ | | | +| `http(s)://` (smart) | ✅ | | | +| `git://` | ✅ | | | +| `ssh://` | ✅ | | | +| `file://` | ⚠️ (partial) | Warning: this is not pure Golang. This shells out to the `git` binary. | | +| Custom | ✅ | All existing schemes can be replaced by custom implementations. | - [custom_http](_examples/custom_http/main.go) | ## SHA256 -| Feature | Sub-feature | Status | Notes | Examples | -|---|---|---|---|---| -| `init` | | ✅ | Requires building with tag sha256. | - [init](_examples/sha256/main.go) | -| `commit` | | ✅ | Requires building with tag sha256. | - [commit](_examples/sha256/main.go) | -| `pull` | | ❌ | | | -| `fetch` | | ❌ | | | -| `push` | | ❌ | | | +| Feature | Sub-feature | Status | Notes | Examples | +| -------- | ----------- | ------ | ---------------------------------- | ------------------------------------ | +| `init` | | ✅ | Requires building with tag sha256. | - [init](_examples/sha256/main.go) | +| `commit` | | ✅ | Requires building with tag sha256. | - [commit](_examples/sha256/main.go) | +| `pull` | | ❌ | | | +| `fetch` | | ❌ | | | +| `push` | | ❌ | | | ## Other features -| Feature | Sub-feature | Status | Notes | Examples | -|---|---|---|---|---| -| `config` | `--local` | ✅ | Read and write per-repository (`.git/config`). | | -| `config` | `--global`
`--system` | ✅ | Read-only. | | -| `gitignore` | | ✅ | | | -| `gitattributes` | | ✅ | | | -| `git-worktree` | | ❌ | Multiple worktrees are not supported. | | +| Feature | Sub-feature | Status | Notes | Examples | +| --------------- | --------------------------- | ------ | ---------------------------------------------- | -------- | +| `config` | `--local` | ✅ | Read and write per-repository (`.git/config`). | | +| `config` | `--global`
`--system` | ✅ | Read-only. | | +| `gitignore` | | ✅ | | | +| `gitattributes` | | ✅ | | | +| `git-worktree` | | ❌ | Multiple worktrees are not supported. | | diff --git a/vendor/github.com/go-git/go-git/v5/Makefile b/vendor/github.com/go-git/go-git/v5/Makefile index 66adc8ce..1e103967 100644 --- a/vendor/github.com/go-git/go-git/v5/Makefile +++ b/vendor/github.com/go-git/go-git/v5/Makefile @@ -42,3 +42,12 @@ test-coverage: clean: rm -rf $(GIT_DIST_PATH) + +fuzz: + @go test -fuzz=FuzzParser $(PWD)/internal/revision + @go test -fuzz=FuzzDecoder $(PWD)/plumbing/format/config + @go test -fuzz=FuzzPatchDelta $(PWD)/plumbing/format/packfile + @go test -fuzz=FuzzParseSignedBytes $(PWD)/plumbing/object + @go test -fuzz=FuzzDecode $(PWD)/plumbing/object + @go test -fuzz=FuzzDecoder $(PWD)/plumbing/protocol/packp + @go test -fuzz=FuzzNewEndpoint $(PWD)/plumbing/transport diff --git a/vendor/github.com/go-git/go-git/v5/config/config.go b/vendor/github.com/go-git/go-git/v5/config/config.go index 82af12d2..da425a78 100644 --- a/vendor/github.com/go-git/go-git/v5/config/config.go +++ b/vendor/github.com/go-git/go-git/v5/config/config.go @@ -63,9 +63,9 @@ type Config struct { } User struct { - // Name is the personal name of the author and the commiter of a commit. + // Name is the personal name of the author and the committer of a commit. Name string - // Email is the email of the author and the commiter of a commit. + // Email is the email of the author and the committer of a commit. Email string } @@ -77,9 +77,9 @@ type Config struct { } Committer struct { - // Name is the personal name of the commiter of a commit. + // Name is the personal name of the committer of a commit. Name string - // Email is the email of the the commiter of a commit. + // Email is the email of the committer of a commit. Email string } @@ -157,8 +157,8 @@ func ReadConfig(r io.Reader) (*Config, error) { } // LoadConfig loads a config file from a given scope. The returned Config, -// contains exclusively information fom the given scope. If couldn't find a -// config file to the given scope, a empty one is returned. +// contains exclusively information from the given scope. If it couldn't find a +// config file to the given scope, an empty one is returned. func LoadConfig(scope Scope) (*Config, error) { if scope == LocalScope { return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer") diff --git a/vendor/github.com/go-git/go-git/v5/options.go b/vendor/github.com/go-git/go-git/v5/options.go index 757bdc84..8902b7e3 100644 --- a/vendor/github.com/go-git/go-git/v5/options.go +++ b/vendor/github.com/go-git/go-git/v5/options.go @@ -78,6 +78,15 @@ type CloneOptions struct { CABundle []byte // ProxyOptions provides info required for connecting to a proxy. ProxyOptions transport.ProxyOptions + // When the repository to clone is on the local machine, instead of + // using hard links, automatically setup .git/objects/info/alternates + // to share the objects with the source repository. + // The resulting repository starts out without any object of its own. + // NOTE: this is a possibly dangerous operation; do not use it unless + // you understand what it does. + // + // [Reference]: https://git-scm.com/docs/git-clone#Documentation/git-clone.txt---shared + Shared bool } // Validate validates the fields and sets the default values. @@ -737,6 +746,9 @@ type PlainOpenOptions struct { func (o *PlainOpenOptions) Validate() error { return nil } type PlainInitOptions struct { + InitOptions + // Determines if the repository will have a worktree (non-bare) or not (bare). + Bare bool ObjectFormat formatcfg.ObjectFormat } diff --git a/vendor/github.com/go-git/go-git/v5/oss-fuzz.sh b/vendor/github.com/go-git/go-git/v5/oss-fuzz.sh new file mode 100644 index 00000000..885548f4 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/oss-fuzz.sh @@ -0,0 +1,35 @@ +#!/bin/bash -eu +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +################################################################################ + + +go mod download +go get github.com/AdamKorcz/go-118-fuzz-build/testing + +if [ "$SANITIZER" != "coverage" ]; then + sed -i '/func (s \*DecoderSuite) TestDecode(/,/^}/ s/^/\/\//' plumbing/format/config/decoder_test.go + sed -n '35,$p' plumbing/format/packfile/common_test.go >> plumbing/format/packfile/delta_test.go + sed -n '20,53p' plumbing/object/object_test.go >> plumbing/object/tree_test.go + sed -i 's|func Test|// func Test|' plumbing/transport/common_test.go +fi + +compile_native_go_fuzzer $(pwd)/internal/revision FuzzParser fuzz_parser +compile_native_go_fuzzer $(pwd)/plumbing/format/config FuzzDecoder fuzz_decoder_config +compile_native_go_fuzzer $(pwd)/plumbing/format/packfile FuzzPatchDelta fuzz_patch_delta +compile_native_go_fuzzer $(pwd)/plumbing/object FuzzParseSignedBytes fuzz_parse_signed_bytes +compile_native_go_fuzzer $(pwd)/plumbing/object FuzzDecode fuzz_decode +compile_native_go_fuzzer $(pwd)/plumbing/protocol/packp FuzzDecoder fuzz_decoder_packp +compile_native_go_fuzzer $(pwd)/plumbing/transport FuzzNewEndpoint fuzz_new_endpoint diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go b/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go index b848a979..ea1a4575 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go @@ -133,7 +133,7 @@ func (m FileMode) IsMalformed() bool { m != Submodule } -// String returns the FileMode as a string in the standatd git format, +// String returns the FileMode as a string in the standard git format, // this is, an octal number padded with ceros to 7 digits. Malformed // modes are printed in that same format, for easier debugging. // diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash.go b/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash.go index 82d18561..8609848f 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash.go @@ -24,7 +24,7 @@ func reset() { algos[crypto.SHA256] = crypto.SHA256.New } -// RegisterHash allows for the hash algorithm used to be overriden. +// RegisterHash allows for the hash algorithm used to be overridden. // This ensures the hash selection for go-git must be explicit, when // overriding the default value. func RegisterHash(h crypto.Hash, f func() hash.Hash) error { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object.go b/vendor/github.com/go-git/go-git/v5/plumbing/object.go index 2655dee4..3ee9de9f 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object.go @@ -82,7 +82,7 @@ func (t ObjectType) Valid() bool { return t >= CommitObject && t <= REFDeltaObject } -// IsDelta returns true for any ObjectTyoe that represents a delta (i.e. +// IsDelta returns true for any ObjectType that represents a delta (i.e. // REFDeltaObject or OFSDeltaObject). func (t ObjectType) IsDelta() bool { return t == REFDeltaObject || t == OFSDeltaObject diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go index 8a0f35c7..ceed5d01 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go @@ -17,14 +17,25 @@ import ( ) const ( - beginpgp string = "-----BEGIN PGP SIGNATURE-----" - endpgp string = "-----END PGP SIGNATURE-----" - headerpgp string = "gpgsig" + beginpgp string = "-----BEGIN PGP SIGNATURE-----" + endpgp string = "-----END PGP SIGNATURE-----" + headerpgp string = "gpgsig" + headerencoding string = "encoding" + + // https://github.com/git/git/blob/bcb6cae2966cc407ca1afc77413b3ef11103c175/Documentation/gitformat-signature.txt#L153 + // When a merge commit is created from a signed tag, the tag is embedded in + // the commit with the "mergetag" header. + headermergetag string = "mergetag" + + defaultUtf8CommitMesageEncoding MessageEncoding = "UTF-8" ) // Hash represents the hash of an object type Hash plumbing.Hash +// MessageEncoding represents the encoding of a commit +type MessageEncoding string + // Commit points to a single tree, marking it as what the project looked like // at a certain point in time. It contains meta-information about that point // in time, such as a timestamp, the author of the changes since the last @@ -38,6 +49,9 @@ type Commit struct { // Committer is the one performing the commit, might be different from // Author. Committer Signature + // MergeTag is the embedded tag object when a merge commit is created by + // merging a signed tag. + MergeTag string // PGPSignature is the PGP signature of the commit. PGPSignature string // Message is the commit message, contains arbitrary text. @@ -46,6 +60,8 @@ type Commit struct { TreeHash plumbing.Hash // ParentHashes are the hashes of the parent commits of the commit. ParentHashes []plumbing.Hash + // Encoding is the encoding of the commit. + Encoding MessageEncoding s storer.EncodedObjectStorer } @@ -173,6 +189,7 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { } c.Hash = o.Hash() + c.Encoding = defaultUtf8CommitMesageEncoding reader, err := o.Reader() if err != nil { @@ -184,6 +201,7 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { defer sync.PutBufioReader(r) var message bool + var mergetag bool var pgpsig bool var msgbuf bytes.Buffer for { @@ -192,6 +210,16 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { return err } + if mergetag { + if len(line) > 0 && line[0] == ' ' { + line = bytes.TrimLeft(line, " ") + c.MergeTag += string(line) + continue + } else { + mergetag = false + } + } + if pgpsig { if len(line) > 0 && line[0] == ' ' { line = bytes.TrimLeft(line, " ") @@ -225,6 +253,11 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { c.Author.Decode(data) case "committer": c.Committer.Decode(data) + case headermergetag: + c.MergeTag += string(data) + "\n" + mergetag = true + case headerencoding: + c.Encoding = MessageEncoding(data) case headerpgp: c.PGPSignature += string(data) + "\n" pgpsig = true @@ -286,6 +319,28 @@ func (c *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) { return err } + if c.MergeTag != "" { + if _, err = fmt.Fprint(w, "\n"+headermergetag+" "); err != nil { + return err + } + + // Split tag information lines and re-write with a left padding and + // newline. Use join for this so it's clear that a newline should not be + // added after this section. The newline will be added either as part of + // the PGP signature or the commit message. + mergetag := strings.TrimSuffix(c.MergeTag, "\n") + lines := strings.Split(mergetag, "\n") + if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil { + return err + } + } + + if string(c.Encoding) != "" && c.Encoding != defaultUtf8CommitMesageEncoding { + if _, err = fmt.Fprintf(w, "\n%s %s", headerencoding, c.Encoding); err != nil { + return err + } + } + if c.PGPSignature != "" && includeSig { if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil { return err diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go index 8cd0a724..a9ddb538 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go @@ -101,12 +101,14 @@ func (r *ServerResponse) decodeLine(line []byte) error { return fmt.Errorf("unexpected flush") } - if bytes.Equal(line[0:3], ack) { - return r.decodeACKLine(line) - } + if len(line) >= 3 { + if bytes.Equal(line[0:3], ack) { + return r.decodeACKLine(line) + } - if bytes.Equal(line[0:3], nak) { - return nil + if bytes.Equal(line[0:3], nak) { + return nil + } } return fmt.Errorf("unexpected content %q", string(line)) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go index 895a3bf6..3da29985 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go @@ -43,7 +43,7 @@ func (d *ulReqDecoder) Decode(v *UploadRequest) error { return d.err } -// fills out the parser stiky error +// fills out the parser sticky error func (d *ulReqDecoder) error(format string, a ...interface{}) { msg := fmt.Sprintf( "pkt-line %d: %s", d.nLine, diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go b/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go index d8a9c27a..126b3742 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go @@ -42,6 +42,7 @@ type EncodedObjectStorer interface { HasEncodedObject(plumbing.Hash) error // EncodedObjectSize returns the plaintext size of the encoded object. EncodedObjectSize(plumbing.Hash) (int64, error) + AddAlternate(remote string) error } // DeltaObjectStorer is an EncodedObjectStorer that can return delta diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go index c6a054a6..b05437fb 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go @@ -108,7 +108,7 @@ type Endpoint struct { // Host is the host. Host string // Port is the port to connect, if 0 the default port for the given protocol - // wil be used. + // will be used. Port int // Path is the repository path. Path string diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go index 5fdf4250..6574116b 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go @@ -11,6 +11,7 @@ import ( "errors" "fmt" "io" + "regexp" "strings" "time" @@ -28,6 +29,10 @@ const ( var ( ErrTimeoutExceeded = errors.New("timeout exceeded") + // stdErrSkipPattern is used for skipping lines from a command's stderr output. + // Any line matching this pattern will be skipped from further + // processing and not be returned to calling code. + stdErrSkipPattern = regexp.MustCompile("^remote:( =*){0,1}$") ) // Commander creates Command instances. This is the main entry point for @@ -149,10 +154,17 @@ func (c *client) listenFirstError(r io.Reader) chan string { errLine := make(chan string, 1) go func() { s := bufio.NewScanner(r) - if s.Scan() { - errLine <- s.Text() - } else { - close(errLine) + for { + if s.Scan() { + line := s.Text() + if !stdErrSkipPattern.MatchString(line) { + errLine <- line + break + } + } else { + close(errLine) + break + } } _, _ = io.Copy(io.Discard, r) @@ -393,6 +405,7 @@ var ( gitProtocolNoSuchErr = "ERR no such repository" gitProtocolAccessDeniedErr = "ERR access denied" gogsAccessDeniedErr = "Gogs: Repository does not exist or you do not have access" + gitlabRepoNotFoundErr = "remote: ERROR: The project you were looking for could not be found" ) func isRepoNotFoundError(s string) bool { @@ -424,6 +437,10 @@ func isRepoNotFoundError(s string) bool { return true } + if strings.HasPrefix(s, gitlabRepoNotFoundErr) { + return true + } + return false } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/mocks.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/mocks.go new file mode 100644 index 00000000..bc18b27e --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/mocks.go @@ -0,0 +1,46 @@ +package common + +import ( + "bytes" + "io" + + gogitioutil "github.com/go-git/go-git/v5/utils/ioutil" + + "github.com/go-git/go-git/v5/plumbing/transport" +) + +type MockCommand struct { + stdin bytes.Buffer + stdout bytes.Buffer + stderr bytes.Buffer +} + +func (c MockCommand) StderrPipe() (io.Reader, error) { + return &c.stderr, nil +} + +func (c MockCommand) StdinPipe() (io.WriteCloser, error) { + return gogitioutil.WriteNopCloser(&c.stdin), nil +} + +func (c MockCommand) StdoutPipe() (io.Reader, error) { + return &c.stdout, nil +} + +func (c MockCommand) Start() error { + return nil +} + +func (c MockCommand) Close() error { + panic("not implemented") +} + +type MockCommander struct { + stderr string +} + +func (c MockCommander) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (Command, error) { + return &MockCommand{ + stderr: *bytes.NewBufferString(c.stderr), + }, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/remote.go b/vendor/github.com/go-git/go-git/v5/remote.go index 679e0af2..2ffffe7b 100644 --- a/vendor/github.com/go-git/go-git/v5/remote.go +++ b/vendor/github.com/go-git/go-git/v5/remote.go @@ -614,7 +614,7 @@ func (r *Remote) addOrUpdateReferences( req *packp.ReferenceUpdateRequest, forceWithLease *ForceWithLease, ) error { - // If it is not a wilcard refspec we can directly search for the reference + // If it is not a wildcard refspec we can directly search for the reference // in the references dictionary. if !rs.IsWildcard() { ref, ok := refsDict[rs.Src()] @@ -693,7 +693,7 @@ func (r *Remote) addCommit(rs config.RefSpec, remoteRef, err := remoteRefs.Reference(cmd.Name) if err == nil { if remoteRef.Type() != plumbing.HashReference { - //TODO: check actual git behavior here + // TODO: check actual git behavior here return nil } @@ -735,7 +735,7 @@ func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec, remoteRef, err := remoteRefs.Reference(cmd.Name) if err == nil { if remoteRef.Type() != plumbing.HashReference { - //TODO: check actual git behavior here + // TODO: check actual git behavior here return nil } diff --git a/vendor/github.com/go-git/go-git/v5/repository.go b/vendor/github.com/go-git/go-git/v5/repository.go index 3154ac01..48988383 100644 --- a/vendor/github.com/go-git/go-git/v5/repository.go +++ b/vendor/github.com/go-git/go-git/v5/repository.go @@ -22,6 +22,7 @@ import ( "github.com/go-git/go-git/v5/config" "github.com/go-git/go-git/v5/internal/path_util" "github.com/go-git/go-git/v5/internal/revision" + "github.com/go-git/go-git/v5/internal/url" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/cache" formatcfg "github.com/go-git/go-git/v5/plumbing/format/config" @@ -62,6 +63,7 @@ var ( ErrUnableToResolveCommit = errors.New("unable to resolve commit") ErrPackedObjectsNotSupported = errors.New("packed objects not supported") ErrSHA256NotSupported = errors.New("go-git was not compiled with SHA256 support") + ErrAlternatePathNotSupported = errors.New("alternate path must use the file scheme") ) // Repository represents a git repository @@ -235,9 +237,19 @@ func CloneContext( // if the repository will have worktree (non-bare) or not (bare), if the path // is not empty ErrRepositoryAlreadyExists is returned. func PlainInit(path string, isBare bool) (*Repository, error) { + return PlainInitWithOptions(path, &PlainInitOptions{ + Bare: isBare, + }) +} + +func PlainInitWithOptions(path string, opts *PlainInitOptions) (*Repository, error) { + if opts == nil { + opts = &PlainInitOptions{} + } + var wt, dot billy.Filesystem - if isBare { + if opts.Bare { dot = osfs.New(path) } else { wt = osfs.New(path) @@ -246,16 +258,7 @@ func PlainInit(path string, isBare bool) (*Repository, error) { s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault()) - return Init(s, wt) -} - -func PlainInitWithOptions(path string, opts *PlainInitOptions) (*Repository, error) { - wt := osfs.New(path) - dot, _ := wt.Chroot(GitDirName) - - s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault()) - - r, err := Init(s, wt) + r, err := InitWithOptions(s, wt, opts.InitOptions) if err != nil { return nil, err } @@ -265,7 +268,7 @@ func PlainInitWithOptions(path string, opts *PlainInitOptions) (*Repository, err return nil, err } - if opts != nil { + if opts.ObjectFormat != "" { if opts.ObjectFormat == formatcfg.SHA256 && hash.CryptoType != crypto.SHA256 { return nil, ErrSHA256NotSupported } @@ -886,6 +889,30 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error { return err } + // When the repository to clone is on the local machine, + // instead of using hard links, automatically setup .git/objects/info/alternates + // to share the objects with the source repository + if o.Shared { + if !url.IsLocalEndpoint(o.URL) { + return ErrAlternatePathNotSupported + } + altpath := o.URL + remoteRepo, err := PlainOpen(o.URL) + if err != nil { + return fmt.Errorf("failed to open remote repository: %w", err) + } + conf, err := remoteRepo.Config() + if err != nil { + return fmt.Errorf("failed to read remote repository configuration: %w", err) + } + if !conf.Core.IsBare { + altpath = path.Join(altpath, GitDirName) + } + if err := r.Storer.AddAlternate(altpath); err != nil { + return fmt.Errorf("failed to add alternate file to git objects dir: %w", err) + } + } + ref, err := r.fetchAndUpdateReferences(ctx, &FetchOptions{ RefSpecs: c.Fetch, Depth: o.Depth, diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go index e02e6ddf..3080e4ac 100644 --- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go @@ -8,7 +8,9 @@ import ( "fmt" "io" "os" + "path" "path/filepath" + "runtime" "sort" "strings" "time" @@ -38,6 +40,7 @@ const ( remotesPath = "remotes" logsPath = "logs" worktreesPath = "worktrees" + alternatesPath = "alternates" tmpPackedRefsPrefix = "._packed-refs" @@ -1105,10 +1108,38 @@ func (d *DotGit) Module(name string) (billy.Filesystem, error) { return d.fs.Chroot(d.fs.Join(modulePath, name)) } +func (d *DotGit) AddAlternate(remote string) error { + altpath := d.fs.Join(objectsPath, infoPath, alternatesPath) + + f, err := d.fs.OpenFile(altpath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0640) + if err != nil { + return fmt.Errorf("cannot open file: %w", err) + } + defer f.Close() + + // locking in windows throws an error, based on comments + // https://github.com/go-git/go-git/pull/860#issuecomment-1751823044 + // do not lock on windows platform. + if runtime.GOOS != "windows" { + if err = f.Lock(); err != nil { + return fmt.Errorf("cannot lock file: %w", err) + } + defer f.Unlock() + } + + line := path.Join(remote, objectsPath) + "\n" + _, err = io.WriteString(f, line) + if err != nil { + return fmt.Errorf("error writing 'alternates' file: %w", err) + } + + return nil +} + // Alternates returns DotGit(s) based off paths in objects/info/alternates if // available. This can be used to checks if it's a shared repository. func (d *DotGit) Alternates() ([]*DotGit, error) { - altpath := d.fs.Join("objects", "info", "alternates") + altpath := d.fs.Join(objectsPath, infoPath, alternatesPath) f, err := d.fs.Open(altpath) if err != nil { return nil, err diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go index 7e7a2c50..2069d3a6 100644 --- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go @@ -74,3 +74,7 @@ func (s *Storage) Filesystem() billy.Filesystem { func (s *Storage) Init() error { return s.dir.Initialize() } + +func (s *Storage) AddAlternate(remote string) error { + return s.dir.AddAlternate(remote) +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go b/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go index ef6a4455..79211c7c 100644 --- a/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go +++ b/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go @@ -202,6 +202,10 @@ func (o *ObjectStorage) DeleteLooseObject(plumbing.Hash) error { return errNotSupported } +func (o *ObjectStorage) AddAlternate(remote string) error { + return errNotSupported +} + type TxObjectStorage struct { Storage *ObjectStorage Objects map[plumbing.Hash]plumbing.EncodedObject diff --git a/vendor/github.com/go-git/go-git/v5/utils/binary/read.go b/vendor/github.com/go-git/go-git/v5/utils/binary/read.go index a14d48db..b8f9df1a 100644 --- a/vendor/github.com/go-git/go-git/v5/utils/binary/read.go +++ b/vendor/github.com/go-git/go-git/v5/utils/binary/read.go @@ -1,4 +1,4 @@ -// Package binary implements sintax-sugar functions on top of the standard +// Package binary implements syntax-sugar functions on top of the standard // library binary package package binary diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go index 9f5145a2..8090942d 100644 --- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go @@ -55,7 +55,7 @@ package merkletrie // Here is a full list of all the cases that are similar and how to // merge them together into more general cases. Each general case // is labeled with an uppercase letter for further reference, and it -// is followed by the pseudocode of the checks you have to perfrom +// is followed by the pseudocode of the checks you have to perform // on both noders to see if you are in such a case, the actions to // perform (i.e. what changes to output) and how to advance the // iterators of each tree to continue the comparison process. diff --git a/vendor/github.com/go-git/go-git/v5/worktree.go b/vendor/github.com/go-git/go-git/v5/worktree.go index f9c01af2..f8b854dd 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree.go +++ b/vendor/github.com/go-git/go-git/v5/worktree.go @@ -78,6 +78,7 @@ func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error { Force: o.Force, InsecureSkipTLS: o.InsecureSkipTLS, CABundle: o.CABundle, + ProxyOptions: o.ProxyOptions, }) updated := true diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md index 2bd78667..7ed347d3 100644 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -1,5 +1,16 @@ # Changelog +## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) + + +### Features + +* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4)) + +### Fixes + +* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior) + ## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md index 55668887..a502fdc5 100644 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -11,7 +11,7 @@ please explain why in the pull request description. ### Releasing -Commits that would precipitate a SemVer change, as desrcibed in the Conventional +Commits that would precipitate a SemVer change, as described in the Conventional Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) to create a release candidate pull request. Once submitted, `release-please` will create a release. diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index a56138cc..dc75f7d9 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -56,11 +56,15 @@ func IsInvalidLengthError(err error) bool { return ok } -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both +// the standard UUID forms defined in RFC 4122 +// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition, +// Parse accepts non-standard strings such as the raw hex encoding +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings, +// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are +// examined in the latter case. Parse should not be used to validate strings as +// it parses non-standard encodings as indicated above. func Parse(s string) (UUID, error) { var uuid UUID switch len(s) { @@ -294,3 +298,15 @@ func DisableRandPool() { poolMu.Lock() poolPos = randPoolSize } + +// UUIDs is a slice of UUID types. +type UUIDs []UUID + +// Strings returns a string slice containing the string form of each UUID in uuids. +func (uuids UUIDs) Strings() []string { + var uuidStrs = make([]string, len(uuids)) + for i, uuid := range uuids { + uuidStrs[i] = uuid.String() + } + return uuidStrs +} diff --git a/vendor/github.com/huandu/xstrings/.gitignore b/vendor/github.com/huandu/xstrings/.gitignore deleted file mode 100644 index daf913b1..00000000 --- a/vendor/github.com/huandu/xstrings/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/huandu/xstrings/CONTRIBUTING.md b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md deleted file mode 100644 index d7b4b8d5..00000000 --- a/vendor/github.com/huandu/xstrings/CONTRIBUTING.md +++ /dev/null @@ -1,23 +0,0 @@ -# Contributing # - -Thanks for your contribution in advance. No matter what you will contribute to this project, pull request or bug report or feature discussion, it's always highly appreciated. - -## New API or feature ## - -I want to speak more about how to add new functions to this package. - -Package `xstring` is a collection of useful string functions which should be implemented in Go. It's a bit subject to say which function should be included and which should not. I set up following rules in order to make it clear and as objective as possible. - -* Rule 1: Only string algorithm, which takes string as input, can be included. -* Rule 2: If a function has been implemented in package `string`, it must not be included. -* Rule 3: If a function is not language neutral, it must not be included. -* Rule 4: If a function is a part of standard library in other languages, it can be included. -* Rule 5: If a function is quite useful in some famous framework or library, it can be included. - -New function must be discussed in project issues before submitting any code. If a pull request with new functions is sent without any ref issue, it will be rejected. - -## Pull request ## - -Pull request is always welcome. Just make sure you have run `go fmt` and all test cases passed before submit. - -If the pull request is to add a new API or feature, don't forget to update README.md and add new API in function list. diff --git a/vendor/github.com/huandu/xstrings/LICENSE b/vendor/github.com/huandu/xstrings/LICENSE deleted file mode 100644 index 27017725..00000000 --- a/vendor/github.com/huandu/xstrings/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Huan Du - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md deleted file mode 100644 index 750c3c7e..00000000 --- a/vendor/github.com/huandu/xstrings/README.md +++ /dev/null @@ -1,117 +0,0 @@ -# xstrings - -[![Build Status](https://github.com/huandu/xstrings/workflows/Go/badge.svg)](https://github.com/huandu/xstrings/actions) -[![Go Doc](https://godoc.org/github.com/huandu/xstrings?status.svg)](https://pkg.go.dev/github.com/huandu/xstrings) -[![Go Report](https://goreportcard.com/badge/github.com/huandu/xstrings)](https://goreportcard.com/report/github.com/huandu/xstrings) -[![Coverage Status](https://coveralls.io/repos/github/huandu/xstrings/badge.svg?branch=master)](https://coveralls.io/github/huandu/xstrings?branch=master) - -Go package [xstrings](https://godoc.org/github.com/huandu/xstrings) is a collection of string functions, which are widely used in other languages but absent in Go package [strings](http://golang.org/pkg/strings). - -All functions are well tested and carefully tuned for performance. - -## Propose a new function - -Please review [contributing guideline](CONTRIBUTING.md) and [create new issue](https://github.com/huandu/xstrings/issues) to state why it should be included. - -## Install - -Use `go get` to install this library. - - go get github.com/huandu/xstrings - -## API document - -See [GoDoc](https://godoc.org/github.com/huandu/xstrings) for full document. - -## Function list - -Go functions have a unique naming style. One, who has experience in other language but new in Go, may have difficulties to find out right string function to use. - -Here is a list of functions in [strings](http://golang.org/pkg/strings) and [xstrings](https://godoc.org/github.com/huandu/xstrings) with enough extra information about how to map these functions to their friends in other languages. Hope this list could be helpful for fresh gophers. - -### Package `xstrings` functions - -_Keep this table sorted by Function in ascending order._ - -| Function | Friends | # | -| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | --------------------------------------------------- | -| [Center](https://godoc.org/github.com/huandu/xstrings#Center) | `str.center` in Python; `String#center` in Ruby | [#30](https://github.com/huandu/xstrings/issues/30) | -| [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) | -| [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) | -| [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) | -| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | -| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | -| [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) | -| [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) | -| [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) | -| [Len](https://godoc.org/github.com/huandu/xstrings#Len) | `mb_strlen` in PHP | [#23](https://github.com/huandu/xstrings/issues/23) | -| [Partition](https://godoc.org/github.com/huandu/xstrings#Partition) | `str.partition` in Python; `String#partition` in Ruby | [#10](https://github.com/huandu/xstrings/issues/10) | -| [Reverse](https://godoc.org/github.com/huandu/xstrings#Reverse) | `String#reverse` in Ruby; `strrev` in PHP; `reverse` in Perl | [#7](https://github.com/huandu/xstrings/issues/7) | -| [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) | -| [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) | -| [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) | -| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | -| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | -| [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) | -| [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) | -| [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) | -| [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) | -| [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | -| [ToKebab](https://godoc.org/github.com/huandu/xstrings#ToKebabCase) | - | [#41](https://github.com/huandu/xstrings/issues/41) | -| [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | -| [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) | -| [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) | -| [WordCount](https://godoc.org/github.com/huandu/xstrings#WordCount) | `str_word_count` in PHP | [#14](https://github.com/huandu/xstrings/issues/14) | -| [WordSplit](https://godoc.org/github.com/huandu/xstrings#WordSplit) | - | [#14](https://github.com/huandu/xstrings/issues/14) | - -### Package `strings` functions - -_Keep this table sorted by Function in ascending order._ - -| Function | Friends | -| --------------------------------------------------------------- | ----------------------------------------------------------------------------------- | -| [Contains](http://golang.org/pkg/strings/#Contains) | `String#include?` in Ruby | -| [ContainsAny](http://golang.org/pkg/strings/#ContainsAny) | - | -| [ContainsRune](http://golang.org/pkg/strings/#ContainsRune) | - | -| [Count](http://golang.org/pkg/strings/#Count) | `str.count` in Python; `substr_count` in PHP | -| [EqualFold](http://golang.org/pkg/strings/#EqualFold) | `stricmp` in PHP; `String#casecmp` in Ruby | -| [Fields](http://golang.org/pkg/strings/#Fields) | `str.split` in Python; `split` in Perl; `String#split` in Ruby | -| [FieldsFunc](http://golang.org/pkg/strings/#FieldsFunc) | - | -| [HasPrefix](http://golang.org/pkg/strings/#HasPrefix) | `str.startswith` in Python; `String#start_with?` in Ruby | -| [HasSuffix](http://golang.org/pkg/strings/#HasSuffix) | `str.endswith` in Python; `String#end_with?` in Ruby | -| [Index](http://golang.org/pkg/strings/#Index) | `str.index` in Python; `String#index` in Ruby; `strpos` in PHP; `index` in Perl | -| [IndexAny](http://golang.org/pkg/strings/#IndexAny) | - | -| [IndexByte](http://golang.org/pkg/strings/#IndexByte) | - | -| [IndexFunc](http://golang.org/pkg/strings/#IndexFunc) | - | -| [IndexRune](http://golang.org/pkg/strings/#IndexRune) | - | -| [Join](http://golang.org/pkg/strings/#Join) | `str.join` in Python; `Array#join` in Ruby; `implode` in PHP; `join` in Perl | -| [LastIndex](http://golang.org/pkg/strings/#LastIndex) | `str.rindex` in Python; `String#rindex`; `strrpos` in PHP; `rindex` in Perl | -| [LastIndexAny](http://golang.org/pkg/strings/#LastIndexAny) | - | -| [LastIndexFunc](http://golang.org/pkg/strings/#LastIndexFunc) | - | -| [Map](http://golang.org/pkg/strings/#Map) | `String#each_codepoint` in Ruby | -| [Repeat](http://golang.org/pkg/strings/#Repeat) | operator `*` in Python and Ruby; `str_repeat` in PHP | -| [Replace](http://golang.org/pkg/strings/#Replace) | `str.replace` in Python; `String#sub` in Ruby; `str_replace` in PHP | -| [Split](http://golang.org/pkg/strings/#Split) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | -| [SplitAfter](http://golang.org/pkg/strings/#SplitAfter) | - | -| [SplitAfterN](http://golang.org/pkg/strings/#SplitAfterN) | - | -| [SplitN](http://golang.org/pkg/strings/#SplitN) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | -| [Title](http://golang.org/pkg/strings/#Title) | `str.title` in Python | -| [ToLower](http://golang.org/pkg/strings/#ToLower) | `str.lower` in Python; `String#downcase` in Ruby; `strtolower` in PHP; `lc` in Perl | -| [ToLowerSpecial](http://golang.org/pkg/strings/#ToLowerSpecial) | - | -| [ToTitle](http://golang.org/pkg/strings/#ToTitle) | - | -| [ToTitleSpecial](http://golang.org/pkg/strings/#ToTitleSpecial) | - | -| [ToUpper](http://golang.org/pkg/strings/#ToUpper) | `str.upper` in Python; `String#upcase` in Ruby; `strtoupper` in PHP; `uc` in Perl | -| [ToUpperSpecial](http://golang.org/pkg/strings/#ToUpperSpecial) | - | -| [Trim](http://golang.org/pkg/strings/#Trim) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | -| [TrimFunc](http://golang.org/pkg/strings/#TrimFunc) | - | -| [TrimLeft](http://golang.org/pkg/strings/#TrimLeft) | `str.lstrip` in Python; `String#lstrip` in Ruby; `ltrim` in PHP | -| [TrimLeftFunc](http://golang.org/pkg/strings/#TrimLeftFunc) | - | -| [TrimPrefix](http://golang.org/pkg/strings/#TrimPrefix) | - | -| [TrimRight](http://golang.org/pkg/strings/#TrimRight) | `str.rstrip` in Python; `String#rstrip` in Ruby; `rtrim` in PHP | -| [TrimRightFunc](http://golang.org/pkg/strings/#TrimRightFunc) | - | -| [TrimSpace](http://golang.org/pkg/strings/#TrimSpace) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | -| [TrimSuffix](http://golang.org/pkg/strings/#TrimSuffix) | `String#chomp` in Ruby; `chomp` in Perl | - -## License - -This library is licensed under MIT license. See LICENSE for details. diff --git a/vendor/github.com/huandu/xstrings/common.go b/vendor/github.com/huandu/xstrings/common.go deleted file mode 100644 index f427cc84..00000000 --- a/vendor/github.com/huandu/xstrings/common.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -const bufferMaxInitGrowSize = 2048 - -// Lazy initialize a buffer. -func allocBuffer(orig, cur string) *stringBuilder { - output := &stringBuilder{} - maxSize := len(orig) * 4 - - // Avoid to reserve too much memory at once. - if maxSize > bufferMaxInitGrowSize { - maxSize = bufferMaxInitGrowSize - } - - output.Grow(maxSize) - output.WriteString(orig[:len(orig)-len(cur)]) - return output -} diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go deleted file mode 100644 index 151c3151..00000000 --- a/vendor/github.com/huandu/xstrings/convert.go +++ /dev/null @@ -1,590 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "math/rand" - "unicode" - "unicode/utf8" -) - -// ToCamelCase is to convert words separated by space, underscore and hyphen to camel case. -// -// Some samples. -// "some_words" => "SomeWords" -// "http_server" => "HttpServer" -// "no_https" => "NoHttps" -// "_complex__case_" => "_Complex_Case_" -// "some words" => "SomeWords" -func ToCamelCase(str string) string { - if len(str) == 0 { - return "" - } - - buf := &stringBuilder{} - var r0, r1 rune - var size int - - // leading connector will appear in output. - for len(str) > 0 { - r0, size = utf8.DecodeRuneInString(str) - str = str[size:] - - if !isConnector(r0) { - r0 = unicode.ToUpper(r0) - break - } - - buf.WriteRune(r0) - } - - if len(str) == 0 { - // A special case for a string contains only 1 rune. - if size != 0 { - buf.WriteRune(r0) - } - - return buf.String() - } - - for len(str) > 0 { - r1 = r0 - r0, size = utf8.DecodeRuneInString(str) - str = str[size:] - - if isConnector(r0) && isConnector(r1) { - buf.WriteRune(r1) - continue - } - - if isConnector(r1) { - r0 = unicode.ToUpper(r0) - } else { - r0 = unicode.ToLower(r0) - buf.WriteRune(r1) - } - } - - buf.WriteRune(r0) - return buf.String() -} - -// ToSnakeCase can convert all upper case characters in a string to -// snake case format. -// -// Some samples. -// "FirstName" => "first_name" -// "HTTPServer" => "http_server" -// "NoHTTPS" => "no_https" -// "GO_PATH" => "go_path" -// "GO PATH" => "go_path" // space is converted to underscore. -// "GO-PATH" => "go_path" // hyphen is converted to underscore. -// "http2xx" => "http_2xx" // insert an underscore before a number and after an alphabet. -// "HTTP20xOK" => "http_20x_ok" -// "Duration2m3s" => "duration_2m3s" -// "Bld4Floor3rd" => "bld4_floor_3rd" -func ToSnakeCase(str string) string { - return camelCaseToLowerCase(str, '_') -} - -// ToKebabCase can convert all upper case characters in a string to -// kebab case format. -// -// Some samples. -// "FirstName" => "first-name" -// "HTTPServer" => "http-server" -// "NoHTTPS" => "no-https" -// "GO_PATH" => "go-path" -// "GO PATH" => "go-path" // space is converted to '-'. -// "GO-PATH" => "go-path" // hyphen is converted to '-'. -// "http2xx" => "http-2xx" // insert an underscore before a number and after an alphabet. -// "HTTP20xOK" => "http-20x-ok" -// "Duration2m3s" => "duration-2m3s" -// "Bld4Floor3rd" => "bld4-floor-3rd" -func ToKebabCase(str string) string { - return camelCaseToLowerCase(str, '-') -} - -func camelCaseToLowerCase(str string, connector rune) string { - if len(str) == 0 { - return "" - } - - buf := &stringBuilder{} - wt, word, remaining := nextWord(str) - - for len(remaining) > 0 { - if wt != connectorWord { - toLower(buf, wt, word, connector) - } - - prev := wt - last := word - wt, word, remaining = nextWord(remaining) - - switch prev { - case numberWord: - for wt == alphabetWord || wt == numberWord { - toLower(buf, wt, word, connector) - wt, word, remaining = nextWord(remaining) - } - - if wt != invalidWord && wt != punctWord && wt != connectorWord { - buf.WriteRune(connector) - } - - case connectorWord: - toLower(buf, prev, last, connector) - - case punctWord: - // nothing. - - default: - if wt != numberWord { - if wt != connectorWord && wt != punctWord { - buf.WriteRune(connector) - } - - break - } - - if len(remaining) == 0 { - break - } - - last := word - wt, word, remaining = nextWord(remaining) - - // consider number as a part of previous word. - // e.g. "Bld4Floor" => "bld4_floor" - if wt != alphabetWord { - toLower(buf, numberWord, last, connector) - - if wt != connectorWord && wt != punctWord { - buf.WriteRune(connector) - } - - break - } - - // if there are some lower case letters following a number, - // add connector before the number. - // e.g. "HTTP2xx" => "http_2xx" - buf.WriteRune(connector) - toLower(buf, numberWord, last, connector) - - for wt == alphabetWord || wt == numberWord { - toLower(buf, wt, word, connector) - wt, word, remaining = nextWord(remaining) - } - - if wt != invalidWord && wt != connectorWord && wt != punctWord { - buf.WriteRune(connector) - } - } - } - - toLower(buf, wt, word, connector) - return buf.String() -} - -func isConnector(r rune) bool { - return r == '-' || r == '_' || unicode.IsSpace(r) -} - -type wordType int - -const ( - invalidWord wordType = iota - numberWord - upperCaseWord - alphabetWord - connectorWord - punctWord - otherWord -) - -func nextWord(str string) (wt wordType, word, remaining string) { - if len(str) == 0 { - return - } - - var offset int - remaining = str - r, size := nextValidRune(remaining, utf8.RuneError) - offset += size - - if r == utf8.RuneError { - wt = invalidWord - word = str[:offset] - remaining = str[offset:] - return - } - - switch { - case isConnector(r): - wt = connectorWord - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if !isConnector(r) { - break - } - - offset += size - remaining = remaining[size:] - } - - case unicode.IsPunct(r): - wt = punctWord - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if !unicode.IsPunct(r) { - break - } - - offset += size - remaining = remaining[size:] - } - - case unicode.IsUpper(r): - wt = upperCaseWord - remaining = remaining[size:] - - if len(remaining) == 0 { - break - } - - r, size = nextValidRune(remaining, r) - - switch { - case unicode.IsUpper(r): - prevSize := size - offset += size - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if !unicode.IsUpper(r) { - break - } - - prevSize = size - offset += size - remaining = remaining[size:] - } - - // it's a bit complex when dealing with a case like "HTTPStatus". - // it's expected to be splitted into "HTTP" and "Status". - // Therefore "S" should be in remaining instead of word. - if len(remaining) > 0 && isAlphabet(r) { - offset -= prevSize - remaining = str[offset:] - } - - case isAlphabet(r): - offset += size - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if !isAlphabet(r) || unicode.IsUpper(r) { - break - } - - offset += size - remaining = remaining[size:] - } - } - - case isAlphabet(r): - wt = alphabetWord - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if !isAlphabet(r) || unicode.IsUpper(r) { - break - } - - offset += size - remaining = remaining[size:] - } - - case unicode.IsNumber(r): - wt = numberWord - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if !unicode.IsNumber(r) { - break - } - - offset += size - remaining = remaining[size:] - } - - default: - wt = otherWord - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if size == 0 || isConnector(r) || isAlphabet(r) || unicode.IsNumber(r) || unicode.IsPunct(r) { - break - } - - offset += size - remaining = remaining[size:] - } - } - - word = str[:offset] - return -} - -func nextValidRune(str string, prev rune) (r rune, size int) { - var sz int - - for len(str) > 0 { - r, sz = utf8.DecodeRuneInString(str) - size += sz - - if r != utf8.RuneError { - return - } - - str = str[sz:] - } - - r = prev - return -} - -func toLower(buf *stringBuilder, wt wordType, str string, connector rune) { - buf.Grow(buf.Len() + len(str)) - - if wt != upperCaseWord && wt != connectorWord { - buf.WriteString(str) - return - } - - for len(str) > 0 { - r, size := utf8.DecodeRuneInString(str) - str = str[size:] - - if isConnector(r) { - buf.WriteRune(connector) - } else if unicode.IsUpper(r) { - buf.WriteRune(unicode.ToLower(r)) - } else { - buf.WriteRune(r) - } - } -} - -// SwapCase will swap characters case from upper to lower or lower to upper. -func SwapCase(str string) string { - var r rune - var size int - - buf := &stringBuilder{} - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - switch { - case unicode.IsUpper(r): - buf.WriteRune(unicode.ToLower(r)) - - case unicode.IsLower(r): - buf.WriteRune(unicode.ToUpper(r)) - - default: - buf.WriteRune(r) - } - - str = str[size:] - } - - return buf.String() -} - -// FirstRuneToUpper converts first rune to upper case if necessary. -func FirstRuneToUpper(str string) string { - if str == "" { - return str - } - - r, size := utf8.DecodeRuneInString(str) - - if !unicode.IsLower(r) { - return str - } - - buf := &stringBuilder{} - buf.WriteRune(unicode.ToUpper(r)) - buf.WriteString(str[size:]) - return buf.String() -} - -// FirstRuneToLower converts first rune to lower case if necessary. -func FirstRuneToLower(str string) string { - if str == "" { - return str - } - - r, size := utf8.DecodeRuneInString(str) - - if !unicode.IsUpper(r) { - return str - } - - buf := &stringBuilder{} - buf.WriteRune(unicode.ToLower(r)) - buf.WriteString(str[size:]) - return buf.String() -} - -// Shuffle randomizes runes in a string and returns the result. -// It uses default random source in `math/rand`. -func Shuffle(str string) string { - if str == "" { - return str - } - - runes := []rune(str) - index := 0 - - for i := len(runes) - 1; i > 0; i-- { - index = rand.Intn(i + 1) - - if i != index { - runes[i], runes[index] = runes[index], runes[i] - } - } - - return string(runes) -} - -// ShuffleSource randomizes runes in a string with given random source. -func ShuffleSource(str string, src rand.Source) string { - if str == "" { - return str - } - - runes := []rune(str) - index := 0 - r := rand.New(src) - - for i := len(runes) - 1; i > 0; i-- { - index = r.Intn(i + 1) - - if i != index { - runes[i], runes[index] = runes[index], runes[i] - } - } - - return string(runes) -} - -// Successor returns the successor to string. -// -// If there is one alphanumeric rune is found in string, increase the rune by 1. -// If increment generates a "carry", the rune to the left of it is incremented. -// This process repeats until there is no carry, adding an additional rune if necessary. -// -// If there is no alphanumeric rune, the rightmost rune will be increased by 1 -// regardless whether the result is a valid rune or not. -// -// Only following characters are alphanumeric. -// * a - z -// * A - Z -// * 0 - 9 -// -// Samples (borrowed from ruby's String#succ document): -// "abcd" => "abce" -// "THX1138" => "THX1139" -// "<>" => "<>" -// "1999zzz" => "2000aaa" -// "ZZZ9999" => "AAAA0000" -// "***" => "**+" -func Successor(str string) string { - if str == "" { - return str - } - - var r rune - var i int - carry := ' ' - runes := []rune(str) - l := len(runes) - lastAlphanumeric := l - - for i = l - 1; i >= 0; i-- { - r = runes[i] - - if ('a' <= r && r <= 'y') || - ('A' <= r && r <= 'Y') || - ('0' <= r && r <= '8') { - runes[i]++ - carry = ' ' - lastAlphanumeric = i - break - } - - switch r { - case 'z': - runes[i] = 'a' - carry = 'a' - lastAlphanumeric = i - - case 'Z': - runes[i] = 'A' - carry = 'A' - lastAlphanumeric = i - - case '9': - runes[i] = '0' - carry = '0' - lastAlphanumeric = i - } - } - - // Needs to add one character for carry. - if i < 0 && carry != ' ' { - buf := &stringBuilder{} - buf.Grow(l + 4) // Reserve enough space for write. - - if lastAlphanumeric != 0 { - buf.WriteString(str[:lastAlphanumeric]) - } - - buf.WriteRune(carry) - - for _, r = range runes[lastAlphanumeric:] { - buf.WriteRune(r) - } - - return buf.String() - } - - // No alphanumeric character. Simply increase last rune's value. - if lastAlphanumeric == l { - runes[l-1]++ - } - - return string(runes) -} diff --git a/vendor/github.com/huandu/xstrings/count.go b/vendor/github.com/huandu/xstrings/count.go deleted file mode 100644 index f96e3870..00000000 --- a/vendor/github.com/huandu/xstrings/count.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "unicode" - "unicode/utf8" -) - -// Len returns str's utf8 rune length. -func Len(str string) int { - return utf8.RuneCountInString(str) -} - -// WordCount returns number of words in a string. -// -// Word is defined as a locale dependent string containing alphabetic characters, -// which may also contain but not start with `'` and `-` characters. -func WordCount(str string) int { - var r rune - var size, n int - - inWord := false - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - switch { - case isAlphabet(r): - if !inWord { - inWord = true - n++ - } - - case inWord && (r == '\'' || r == '-'): - // Still in word. - - default: - inWord = false - } - - str = str[size:] - } - - return n -} - -const minCJKCharacter = '\u3400' - -// Checks r is a letter but not CJK character. -func isAlphabet(r rune) bool { - if !unicode.IsLetter(r) { - return false - } - - switch { - // Quick check for non-CJK character. - case r < minCJKCharacter: - return true - - // Common CJK characters. - case r >= '\u4E00' && r <= '\u9FCC': - return false - - // Rare CJK characters. - case r >= '\u3400' && r <= '\u4D85': - return false - - // Rare and historic CJK characters. - case r >= '\U00020000' && r <= '\U0002B81D': - return false - } - - return true -} - -// Width returns string width in monotype font. -// Multi-byte characters are usually twice the width of single byte characters. -// -// Algorithm comes from `mb_strwidth` in PHP. -// http://php.net/manual/en/function.mb-strwidth.php -func Width(str string) int { - var r rune - var size, n int - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - n += RuneWidth(r) - str = str[size:] - } - - return n -} - -// RuneWidth returns character width in monotype font. -// Multi-byte characters are usually twice the width of single byte characters. -// -// Algorithm comes from `mb_strwidth` in PHP. -// http://php.net/manual/en/function.mb-strwidth.php -func RuneWidth(r rune) int { - switch { - case r == utf8.RuneError || r < '\x20': - return 0 - - case '\x20' <= r && r < '\u2000': - return 1 - - case '\u2000' <= r && r < '\uFF61': - return 2 - - case '\uFF61' <= r && r < '\uFFA0': - return 1 - - case '\uFFA0' <= r: - return 2 - } - - return 0 -} diff --git a/vendor/github.com/huandu/xstrings/doc.go b/vendor/github.com/huandu/xstrings/doc.go deleted file mode 100644 index 1a6ef069..00000000 --- a/vendor/github.com/huandu/xstrings/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -// Package xstrings is to provide string algorithms which are useful but not included in `strings` package. -// See project home page for details. https://github.com/huandu/xstrings -// -// Package xstrings assumes all strings are encoded in utf8. -package xstrings diff --git a/vendor/github.com/huandu/xstrings/format.go b/vendor/github.com/huandu/xstrings/format.go deleted file mode 100644 index 8cd76c52..00000000 --- a/vendor/github.com/huandu/xstrings/format.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "unicode/utf8" -) - -// ExpandTabs can expand tabs ('\t') rune in str to one or more spaces dpending on -// current column and tabSize. -// The column number is reset to zero after each newline ('\n') occurring in the str. -// -// ExpandTabs uses RuneWidth to decide rune's width. -// For example, CJK characters will be treated as two characters. -// -// If tabSize <= 0, ExpandTabs panics with error. -// -// Samples: -// ExpandTabs("a\tbc\tdef\tghij\tk", 4) => "a bc def ghij k" -// ExpandTabs("abcdefg\thij\nk\tl", 4) => "abcdefg hij\nk l" -// ExpandTabs("z中\t文\tw", 4) => "z中 文 w" -func ExpandTabs(str string, tabSize int) string { - if tabSize <= 0 { - panic("tab size must be positive") - } - - var r rune - var i, size, column, expand int - var output *stringBuilder - - orig := str - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - if r == '\t' { - expand = tabSize - column%tabSize - - if output == nil { - output = allocBuffer(orig, str) - } - - for i = 0; i < expand; i++ { - output.WriteRune(' ') - } - - column += expand - } else { - if r == '\n' { - column = 0 - } else { - column += RuneWidth(r) - } - - if output != nil { - output.WriteRune(r) - } - } - - str = str[size:] - } - - if output == nil { - return orig - } - - return output.String() -} - -// LeftJustify returns a string with pad string at right side if str's rune length is smaller than length. -// If str's rune length is larger than length, str itself will be returned. -// -// If pad is an empty string, str will be returned. -// -// Samples: -// LeftJustify("hello", 4, " ") => "hello" -// LeftJustify("hello", 10, " ") => "hello " -// LeftJustify("hello", 10, "123") => "hello12312" -func LeftJustify(str string, length int, pad string) string { - l := Len(str) - - if l >= length || pad == "" { - return str - } - - remains := length - l - padLen := Len(pad) - - output := &stringBuilder{} - output.Grow(len(str) + (remains/padLen+1)*len(pad)) - output.WriteString(str) - writePadString(output, pad, padLen, remains) - return output.String() -} - -// RightJustify returns a string with pad string at left side if str's rune length is smaller than length. -// If str's rune length is larger than length, str itself will be returned. -// -// If pad is an empty string, str will be returned. -// -// Samples: -// RightJustify("hello", 4, " ") => "hello" -// RightJustify("hello", 10, " ") => " hello" -// RightJustify("hello", 10, "123") => "12312hello" -func RightJustify(str string, length int, pad string) string { - l := Len(str) - - if l >= length || pad == "" { - return str - } - - remains := length - l - padLen := Len(pad) - - output := &stringBuilder{} - output.Grow(len(str) + (remains/padLen+1)*len(pad)) - writePadString(output, pad, padLen, remains) - output.WriteString(str) - return output.String() -} - -// Center returns a string with pad string at both side if str's rune length is smaller than length. -// If str's rune length is larger than length, str itself will be returned. -// -// If pad is an empty string, str will be returned. -// -// Samples: -// Center("hello", 4, " ") => "hello" -// Center("hello", 10, " ") => " hello " -// Center("hello", 10, "123") => "12hello123" -func Center(str string, length int, pad string) string { - l := Len(str) - - if l >= length || pad == "" { - return str - } - - remains := length - l - padLen := Len(pad) - - output := &stringBuilder{} - output.Grow(len(str) + (remains/padLen+1)*len(pad)) - writePadString(output, pad, padLen, remains/2) - output.WriteString(str) - writePadString(output, pad, padLen, (remains+1)/2) - return output.String() -} - -func writePadString(output *stringBuilder, pad string, padLen, remains int) { - var r rune - var size int - - repeats := remains / padLen - - for i := 0; i < repeats; i++ { - output.WriteString(pad) - } - - remains = remains % padLen - - if remains != 0 { - for i := 0; i < remains; i++ { - r, size = utf8.DecodeRuneInString(pad) - output.WriteRune(r) - pad = pad[size:] - } - } -} diff --git a/vendor/github.com/huandu/xstrings/manipulate.go b/vendor/github.com/huandu/xstrings/manipulate.go deleted file mode 100644 index 64075f9b..00000000 --- a/vendor/github.com/huandu/xstrings/manipulate.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "strings" - "unicode/utf8" -) - -// Reverse a utf8 encoded string. -func Reverse(str string) string { - var size int - - tail := len(str) - buf := make([]byte, tail) - s := buf - - for len(str) > 0 { - _, size = utf8.DecodeRuneInString(str) - tail -= size - s = append(s[:tail], []byte(str[:size])...) - str = str[size:] - } - - return string(buf) -} - -// Slice a string by rune. -// -// Start must satisfy 0 <= start <= rune length. -// -// End can be positive, zero or negative. -// If end >= 0, start and end must satisfy start <= end <= rune length. -// If end < 0, it means slice to the end of string. -// -// Otherwise, Slice will panic as out of range. -func Slice(str string, start, end int) string { - var size, startPos, endPos int - - origin := str - - if start < 0 || end > len(str) || (end >= 0 && start > end) { - panic("out of range") - } - - if end >= 0 { - end -= start - } - - for start > 0 && len(str) > 0 { - _, size = utf8.DecodeRuneInString(str) - start-- - startPos += size - str = str[size:] - } - - if end < 0 { - return origin[startPos:] - } - - endPos = startPos - - for end > 0 && len(str) > 0 { - _, size = utf8.DecodeRuneInString(str) - end-- - endPos += size - str = str[size:] - } - - if len(str) == 0 && (start > 0 || end > 0) { - panic("out of range") - } - - return origin[startPos:endPos] -} - -// Partition splits a string by sep into three parts. -// The return value is a slice of strings with head, match and tail. -// -// If str contains sep, for example "hello" and "l", Partition returns -// "he", "l", "lo" -// -// If str doesn't contain sep, for example "hello" and "x", Partition returns -// "hello", "", "" -func Partition(str, sep string) (head, match, tail string) { - index := strings.Index(str, sep) - - if index == -1 { - head = str - return - } - - head = str[:index] - match = str[index : index+len(sep)] - tail = str[index+len(sep):] - return -} - -// LastPartition splits a string by last instance of sep into three parts. -// The return value is a slice of strings with head, match and tail. -// -// If str contains sep, for example "hello" and "l", LastPartition returns -// "hel", "l", "o" -// -// If str doesn't contain sep, for example "hello" and "x", LastPartition returns -// "", "", "hello" -func LastPartition(str, sep string) (head, match, tail string) { - index := strings.LastIndex(str, sep) - - if index == -1 { - tail = str - return - } - - head = str[:index] - match = str[index : index+len(sep)] - tail = str[index+len(sep):] - return -} - -// Insert src into dst at given rune index. -// Index is counted by runes instead of bytes. -// -// If index is out of range of dst, panic with out of range. -func Insert(dst, src string, index int) string { - return Slice(dst, 0, index) + src + Slice(dst, index, -1) -} - -// Scrub scrubs invalid utf8 bytes with repl string. -// Adjacent invalid bytes are replaced only once. -func Scrub(str, repl string) string { - var buf *stringBuilder - var r rune - var size, pos int - var hasError bool - - origin := str - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - if r == utf8.RuneError { - if !hasError { - if buf == nil { - buf = &stringBuilder{} - } - - buf.WriteString(origin[:pos]) - hasError = true - } - } else if hasError { - hasError = false - buf.WriteString(repl) - - origin = origin[pos:] - pos = 0 - } - - pos += size - str = str[size:] - } - - if buf != nil { - buf.WriteString(origin) - return buf.String() - } - - // No invalid byte. - return origin -} - -// WordSplit splits a string into words. Returns a slice of words. -// If there is no word in a string, return nil. -// -// Word is defined as a locale dependent string containing alphabetic characters, -// which may also contain but not start with `'` and `-` characters. -func WordSplit(str string) []string { - var word string - var words []string - var r rune - var size, pos int - - inWord := false - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - switch { - case isAlphabet(r): - if !inWord { - inWord = true - word = str - pos = 0 - } - - case inWord && (r == '\'' || r == '-'): - // Still in word. - - default: - if inWord { - inWord = false - words = append(words, word[:pos]) - } - } - - pos += size - str = str[size:] - } - - if inWord { - words = append(words, word[:pos]) - } - - return words -} diff --git a/vendor/github.com/huandu/xstrings/stringbuilder.go b/vendor/github.com/huandu/xstrings/stringbuilder.go deleted file mode 100644 index bb0919d3..00000000 --- a/vendor/github.com/huandu/xstrings/stringbuilder.go +++ /dev/null @@ -1,7 +0,0 @@ -//+build go1.10 - -package xstrings - -import "strings" - -type stringBuilder = strings.Builder diff --git a/vendor/github.com/huandu/xstrings/stringbuilder_go110.go b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go deleted file mode 100644 index dac389d1..00000000 --- a/vendor/github.com/huandu/xstrings/stringbuilder_go110.go +++ /dev/null @@ -1,9 +0,0 @@ -//+build !go1.10 - -package xstrings - -import "bytes" - -type stringBuilder struct { - bytes.Buffer -} diff --git a/vendor/github.com/huandu/xstrings/translate.go b/vendor/github.com/huandu/xstrings/translate.go deleted file mode 100644 index 42e694fb..00000000 --- a/vendor/github.com/huandu/xstrings/translate.go +++ /dev/null @@ -1,546 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "unicode" - "unicode/utf8" -) - -type runeRangeMap struct { - FromLo rune // Lower bound of range map. - FromHi rune // An inclusive higher bound of range map. - ToLo rune - ToHi rune -} - -type runeDict struct { - Dict [unicode.MaxASCII + 1]rune -} - -type runeMap map[rune]rune - -// Translator can translate string with pre-compiled from and to patterns. -// If a from/to pattern pair needs to be used more than once, it's recommended -// to create a Translator and reuse it. -type Translator struct { - quickDict *runeDict // A quick dictionary to look up rune by index. Only available for latin runes. - runeMap runeMap // Rune map for translation. - ranges []*runeRangeMap // Ranges of runes. - mappedRune rune // If mappedRune >= 0, all matched runes are translated to the mappedRune. - reverted bool // If to pattern is empty, all matched characters will be deleted. - hasPattern bool -} - -// NewTranslator creates new Translator through a from/to pattern pair. -func NewTranslator(from, to string) *Translator { - tr := &Translator{} - - if from == "" { - return tr - } - - reverted := from[0] == '^' - deletion := len(to) == 0 - - if reverted { - from = from[1:] - } - - var fromStart, fromEnd, fromRangeStep rune - var toStart, toEnd, toRangeStep rune - var fromRangeSize, toRangeSize rune - var singleRunes []rune - - // Update the to rune range. - updateRange := func() { - // No more rune to read in the to rune pattern. - if toEnd == utf8.RuneError { - return - } - - if toRangeStep == 0 { - to, toStart, toEnd, toRangeStep = nextRuneRange(to, toEnd) - return - } - - // Current range is not empty. Consume 1 rune from start. - if toStart != toEnd { - toStart += toRangeStep - return - } - - // No more rune. Repeat the last rune. - if to == "" { - toEnd = utf8.RuneError - return - } - - // Both start and end are used. Read two more runes from the to pattern. - to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) - } - - if deletion { - toStart = utf8.RuneError - toEnd = utf8.RuneError - } else { - // If from pattern is reverted, only the last rune in the to pattern will be used. - if reverted { - var size int - - for len(to) > 0 { - toStart, size = utf8.DecodeRuneInString(to) - to = to[size:] - } - - toEnd = utf8.RuneError - } else { - to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) - } - } - - fromEnd = utf8.RuneError - - for len(from) > 0 { - from, fromStart, fromEnd, fromRangeStep = nextRuneRange(from, fromEnd) - - // fromStart is a single character. Just map it with a rune in the to pattern. - if fromRangeStep == 0 { - singleRunes = tr.addRune(fromStart, toStart, singleRunes) - updateRange() - continue - } - - for toEnd != utf8.RuneError && fromStart != fromEnd { - // If mapped rune is a single character instead of a range, simply shift first - // rune in the range. - if toRangeStep == 0 { - singleRunes = tr.addRune(fromStart, toStart, singleRunes) - updateRange() - fromStart += fromRangeStep - continue - } - - fromRangeSize = (fromEnd - fromStart) * fromRangeStep - toRangeSize = (toEnd - toStart) * toRangeStep - - // Not enough runes in the to pattern. Need to read more. - if fromRangeSize > toRangeSize { - fromStart, toStart = tr.addRuneRange(fromStart, fromStart+toRangeSize*fromRangeStep, toStart, toEnd, singleRunes) - fromStart += fromRangeStep - updateRange() - - // Edge case: If fromRangeSize == toRangeSize + 1, the last fromStart value needs be considered - // as a single rune. - if fromStart == fromEnd { - singleRunes = tr.addRune(fromStart, toStart, singleRunes) - updateRange() - } - - continue - } - - fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart+fromRangeSize*toRangeStep, singleRunes) - updateRange() - break - } - - if fromStart == fromEnd { - fromEnd = utf8.RuneError - continue - } - - _, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart, singleRunes) - fromEnd = utf8.RuneError - } - - if fromEnd != utf8.RuneError { - tr.addRune(fromEnd, toStart, singleRunes) - } - - tr.reverted = reverted - tr.mappedRune = -1 - tr.hasPattern = true - - // Translate RuneError only if in deletion or reverted mode. - if deletion || reverted { - tr.mappedRune = toStart - } - - return tr -} - -func (tr *Translator) addRune(from, to rune, singleRunes []rune) []rune { - if from <= unicode.MaxASCII { - if tr.quickDict == nil { - tr.quickDict = &runeDict{} - } - - tr.quickDict.Dict[from] = to - } else { - if tr.runeMap == nil { - tr.runeMap = make(runeMap) - } - - tr.runeMap[from] = to - } - - singleRunes = append(singleRunes, from) - return singleRunes -} - -func (tr *Translator) addRuneRange(fromLo, fromHi, toLo, toHi rune, singleRunes []rune) (rune, rune) { - var r rune - var rrm *runeRangeMap - - if fromLo < fromHi { - rrm = &runeRangeMap{ - FromLo: fromLo, - FromHi: fromHi, - ToLo: toLo, - ToHi: toHi, - } - } else { - rrm = &runeRangeMap{ - FromLo: fromHi, - FromHi: fromLo, - ToLo: toHi, - ToHi: toLo, - } - } - - // If there is any single rune conflicts with this rune range, clear single rune record. - for _, r = range singleRunes { - if rrm.FromLo <= r && r <= rrm.FromHi { - if r <= unicode.MaxASCII { - tr.quickDict.Dict[r] = 0 - } else { - delete(tr.runeMap, r) - } - } - } - - tr.ranges = append(tr.ranges, rrm) - return fromHi, toHi -} - -func nextRuneRange(str string, last rune) (remaining string, start, end rune, rangeStep rune) { - var r rune - var size int - - remaining = str - escaping := false - isRange := false - - for len(remaining) > 0 { - r, size = utf8.DecodeRuneInString(remaining) - remaining = remaining[size:] - - // Parse special characters. - if !escaping { - if r == '\\' { - escaping = true - continue - } - - if r == '-' { - // Ignore slash at beginning of string. - if last == utf8.RuneError { - continue - } - - start = last - isRange = true - continue - } - } - - escaping = false - - if last != utf8.RuneError { - // This is a range which start and end are the same. - // Considier it as a normal character. - if isRange && last == r { - isRange = false - continue - } - - start = last - end = r - - if isRange { - if start < end { - rangeStep = 1 - } else { - rangeStep = -1 - } - } - - return - } - - last = r - } - - start = last - end = utf8.RuneError - return -} - -// Translate str with a from/to pattern pair. -// -// See comment in Translate function for usage and samples. -func (tr *Translator) Translate(str string) string { - if !tr.hasPattern || str == "" { - return str - } - - var r rune - var size int - var needTr bool - - orig := str - - var output *stringBuilder - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - r, needTr = tr.TranslateRune(r) - - if needTr && output == nil { - output = allocBuffer(orig, str) - } - - if r != utf8.RuneError && output != nil { - output.WriteRune(r) - } - - str = str[size:] - } - - // No character is translated. - if output == nil { - return orig - } - - return output.String() -} - -// TranslateRune return translated rune and true if r matches the from pattern. -// If r doesn't match the pattern, original r is returned and translated is false. -func (tr *Translator) TranslateRune(r rune) (result rune, translated bool) { - switch { - case tr.quickDict != nil: - if r <= unicode.MaxASCII { - result = tr.quickDict.Dict[r] - - if result != 0 { - translated = true - - if tr.mappedRune >= 0 { - result = tr.mappedRune - } - - break - } - } - - fallthrough - - case tr.runeMap != nil: - var ok bool - - if result, ok = tr.runeMap[r]; ok { - translated = true - - if tr.mappedRune >= 0 { - result = tr.mappedRune - } - - break - } - - fallthrough - - default: - var rrm *runeRangeMap - ranges := tr.ranges - - for i := len(ranges) - 1; i >= 0; i-- { - rrm = ranges[i] - - if rrm.FromLo <= r && r <= rrm.FromHi { - translated = true - - if tr.mappedRune >= 0 { - result = tr.mappedRune - break - } - - if rrm.ToLo < rrm.ToHi { - result = rrm.ToLo + r - rrm.FromLo - } else if rrm.ToLo > rrm.ToHi { - // ToHi can be smaller than ToLo if range is from higher to lower. - result = rrm.ToLo - r + rrm.FromLo - } else { - result = rrm.ToLo - } - - break - } - } - } - - if tr.reverted { - if !translated { - result = tr.mappedRune - } - - translated = !translated - } - - if !translated { - result = r - } - - return -} - -// HasPattern returns true if Translator has one pattern at least. -func (tr *Translator) HasPattern() bool { - return tr.hasPattern -} - -// Translate str with the characters defined in from replaced by characters defined in to. -// -// From and to are patterns representing a set of characters. Pattern is defined as following. -// -// * Special characters -// * '-' means a range of runes, e.g. -// * "a-z" means all characters from 'a' to 'z' inclusive; -// * "z-a" means all characters from 'z' to 'a' inclusive. -// * '^' as first character means a set of all runes excepted listed, e.g. -// * "^a-z" means all characters except 'a' to 'z' inclusive. -// * '\' escapes special characters. -// * Normal character represents itself, e.g. "abc" is a set including 'a', 'b' and 'c'. -// -// Translate will try to find a 1:1 mapping from from to to. -// If to is smaller than from, last rune in to will be used to map "out of range" characters in from. -// -// Note that '^' only works in the from pattern. It will be considered as a normal character in the to pattern. -// -// If the to pattern is an empty string, Translate works exactly the same as Delete. -// -// Samples: -// Translate("hello", "aeiou", "12345") => "h2ll4" -// Translate("hello", "a-z", "A-Z") => "HELLO" -// Translate("hello", "z-a", "a-z") => "svool" -// Translate("hello", "aeiou", "*") => "h*ll*" -// Translate("hello", "^l", "*") => "**ll*" -// Translate("hello ^ world", `\^lo`, "*") => "he*** * w*r*d" -func Translate(str, from, to string) string { - tr := NewTranslator(from, to) - return tr.Translate(str) -} - -// Delete runes in str matching the pattern. -// Pattern is defined in Translate function. -// -// Samples: -// Delete("hello", "aeiou") => "hll" -// Delete("hello", "a-k") => "llo" -// Delete("hello", "^a-k") => "he" -func Delete(str, pattern string) string { - tr := NewTranslator(pattern, "") - return tr.Translate(str) -} - -// Count how many runes in str match the pattern. -// Pattern is defined in Translate function. -// -// Samples: -// Count("hello", "aeiou") => 3 -// Count("hello", "a-k") => 3 -// Count("hello", "^a-k") => 2 -func Count(str, pattern string) int { - if pattern == "" || str == "" { - return 0 - } - - var r rune - var size int - var matched bool - - tr := NewTranslator(pattern, "") - cnt := 0 - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - str = str[size:] - - if _, matched = tr.TranslateRune(r); matched { - cnt++ - } - } - - return cnt -} - -// Squeeze deletes adjacent repeated runes in str. -// If pattern is not empty, only runes matching the pattern will be squeezed. -// -// Samples: -// Squeeze("hello", "") => "helo" -// Squeeze("hello", "m-z") => "hello" -// Squeeze("hello world", " ") => "hello world" -func Squeeze(str, pattern string) string { - var last, r rune - var size int - var skipSqueeze, matched bool - var tr *Translator - var output *stringBuilder - - orig := str - last = -1 - - if len(pattern) > 0 { - tr = NewTranslator(pattern, "") - } - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - // Need to squeeze the str. - if last == r && !skipSqueeze { - if tr != nil { - if _, matched = tr.TranslateRune(r); !matched { - skipSqueeze = true - } - } - - if output == nil { - output = allocBuffer(orig, str) - } - - if skipSqueeze { - output.WriteRune(r) - } - } else { - if output != nil { - output.WriteRune(r) - } - - last = r - skipSqueeze = false - } - - str = str[size:] - } - - if output == nil { - return orig - } - - return output.String() -} diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/github.com/imdario/mergo/.deepsource.toml deleted file mode 100644 index 8a0681af..00000000 --- a/vendor/github.com/imdario/mergo/.deepsource.toml +++ /dev/null @@ -1,12 +0,0 @@ -version = 1 - -test_patterns = [ - "*_test.go" -] - -[[analyzers]] -name = "go" -enabled = true - - [analyzers.meta] - import_path = "github.com/imdario/mergo" \ No newline at end of file diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore deleted file mode 100644 index 529c3412..00000000 --- a/vendor/github.com/imdario/mergo/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -#### joe made this: http://goel.io/joe - -#### go #### -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -#### vim #### -# Swap -[._]*.s[a-v][a-z] -[._]*.sw[a-p] -[._]s[a-v][a-z] -[._]sw[a-p] - -# Session -Session.vim - -# Temporary -.netrwhist -*~ -# Auto-generated tag files -tags diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml deleted file mode 100644 index d324c43b..00000000 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -arch: - - amd64 - - ppc64le -install: - - go get -t - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls -script: - - go test -race -v ./... -after_script: - - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md deleted file mode 100644 index 469b4490..00000000 --- a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/imdario/mergo/CONTRIBUTING.md b/vendor/github.com/imdario/mergo/CONTRIBUTING.md deleted file mode 100644 index 0a1ff9f9..00000000 --- a/vendor/github.com/imdario/mergo/CONTRIBUTING.md +++ /dev/null @@ -1,112 +0,0 @@ - -# Contributing to mergo - -First off, thanks for taking the time to contribute! ❤️ - -All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 - -> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: -> - Star the project -> - Tweet about it -> - Refer this project in your project's readme -> - Mention the project at local meetups and tell your friends/colleagues - - -## Table of Contents - -- [Code of Conduct](#code-of-conduct) -- [I Have a Question](#i-have-a-question) -- [I Want To Contribute](#i-want-to-contribute) -- [Reporting Bugs](#reporting-bugs) -- [Suggesting Enhancements](#suggesting-enhancements) - -## Code of Conduct - -This project and everyone participating in it is governed by the -[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). -By participating, you are expected to uphold this code. Please report unacceptable behavior -to <>. - - -## I Have a Question - -> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). - -Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. - -If you then still feel the need to ask a question and need clarification, we recommend the following: - -- Open an [Issue](https://github.com/imdario/mergo/issues/new). -- Provide as much context as you can about what you're running into. -- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. - -We will then take care of the issue as soon as possible. - -## I Want To Contribute - -> ### Legal Notice -> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. - -### Reporting Bugs - - -#### Before Submitting a Bug Report - -A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. - -- Make sure that you are using the latest version. -- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). -- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). -- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. -- Collect information about the bug: -- Stack trace (Traceback) -- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) -- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. -- Possibly your input and the output -- Can you reliably reproduce the issue? And can you also reproduce it with older versions? - - -#### How Do I Submit a Good Bug Report? - -> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . - - -We use GitHub issues to track bugs and errors. If you run into an issue with the project: - -- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) -- Explain the behavior you would expect and the actual behavior. -- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. -- Provide the information you collected in the previous section. - -Once it's filed: - -- The project team will label the issue accordingly. -- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. -- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. - -### Suggesting Enhancements - -This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. - - -#### Before Submitting an Enhancement - -- Make sure that you are using the latest version. -- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. -- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. -- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. - - -#### How Do I Submit a Good Enhancement Suggestion? - -Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). - -- Use a **clear and descriptive title** for the issue to identify the suggestion. -- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. -- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. -- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. -- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. - - -## Attribution -This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE deleted file mode 100644 index 68668029..00000000 --- a/vendor/github.com/imdario/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md deleted file mode 100644 index 4f028749..00000000 --- a/vendor/github.com/imdario/mergo/README.md +++ /dev/null @@ -1,236 +0,0 @@ -# Mergo - -[![GoDoc][3]][4] -[![GitHub release][5]][6] -[![GoCard][7]][8] -[![Build Status][1]][2] -[![Coverage Status][9]][10] -[![Sourcegraph][11]][12] -[![FOSSA Status][13]][14] -[![Become my sponsor][15]][16] -[![Tidelift][17]][18] - -[1]: https://travis-ci.org/imdario/mergo.png -[2]: https://travis-ci.org/imdario/mergo -[3]: https://godoc.org/github.com/imdario/mergo?status.svg -[4]: https://godoc.org/github.com/imdario/mergo -[5]: https://img.shields.io/github/release/imdario/mergo.svg -[6]: https://github.com/imdario/mergo/releases -[7]: https://goreportcard.com/badge/imdario/mergo -[8]: https://goreportcard.com/report/github.com/imdario/mergo -[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[10]: https://coveralls.io/github/imdario/mergo?branch=master -[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[12]: https://sourcegraph.com/github.com/imdario/mergo?badge -[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield -[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -[15]: https://img.shields.io/github/sponsors/imdario -[16]: https://github.com/sponsors/imdario -[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo -[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo - -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). - -### Important note - -Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. - -Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -### Donations - -If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: - -Buy Me a Coffee at ko-fi.com -Donate using Liberapay -Become my sponsor - -### Mergo in the wild - -- [moby/moby](https://github.com/moby/moby) -- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) -- [vmware/dispatch](https://github.com/vmware/dispatch) -- [Shopify/themekit](https://github.com/Shopify/themekit) -- [imdario/zas](https://github.com/imdario/zas) -- [matcornic/hermes](https://github.com/matcornic/hermes) -- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) -- [kataras/iris](https://github.com/kataras/iris) -- [michaelsauter/crane](https://github.com/michaelsauter/crane) -- [go-task/task](https://github.com/go-task/task) -- [sensu/uchiwa](https://github.com/sensu/uchiwa) -- [ory/hydra](https://github.com/ory/hydra) -- [sisatech/vcli](https://github.com/sisatech/vcli) -- [dairycart/dairycart](https://github.com/dairycart/dairycart) -- [projectcalico/felix](https://github.com/projectcalico/felix) -- [resin-os/balena](https://github.com/resin-os/balena) -- [go-kivik/kivik](https://github.com/go-kivik/kivik) -- [Telefonica/govice](https://github.com/Telefonica/govice) -- [supergiant/supergiant](supergiant/supergiant) -- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) -- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) -- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) -- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) -- [elwinar/rambler](https://github.com/elwinar/rambler) -- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) -- [jfbus/impressionist](https://github.com/jfbus/impressionist) -- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) -- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) -- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) -- [thoas/picfit](https://github.com/thoas/picfit) -- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) -- [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [containerssh/containerssh](https://github.com/containerssh/containerssh) -- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) -- [tjpnz/structbot](https://github.com/tjpnz/structbot) - -## Install - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -## Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - -```go -if err := mergo.Merge(&dst, src); err != nil { - // ... -} -``` - -Also, you can merge overwriting values using the transformer `WithOverride`. - -```go -if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... -} -``` - -Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. - -```go -if err := mergo.Map(&dst, srcMap); err != nil { - // ... -} -``` - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. - -Here is a nice example: - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" -) - -type Foo struct { - A string - B int64 -} - -func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} -} -``` - -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v3 - -### Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" -) - -type timeTransformer struct { -} - -func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil -} - -type Snapshot struct { - Time time.Time - // ... -} - -func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } -} -``` - -## Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) - -## About - -Written by [Dario Castañé](http://dario.im). - -## License - -[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/SECURITY.md b/vendor/github.com/imdario/mergo/SECURITY.md deleted file mode 100644 index a5de61f7..00000000 --- a/vendor/github.com/imdario/mergo/SECURITY.md +++ /dev/null @@ -1,14 +0,0 @@ -# Security Policy - -## Supported Versions - -| Version | Supported | -| ------- | ------------------ | -| 0.3.x | :white_check_mark: | -| < 0.3 | :x: | - -## Security contact information - -To report a security vulnerability, please use the -[Tidelift security contact](https://tidelift.com/security). -Tidelift will coordinate the fix and disclosure. diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go deleted file mode 100644 index fcd985f9..00000000 --- a/vendor/github.com/imdario/mergo/doc.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Status - -It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. - -Important note - -Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. - -Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -Install - -Do your usual installation procedure: - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - - if err := mergo.Merge(&dst, src); err != nil { - // ... - } - -Also, you can merge overwriting values using the transformer WithOverride. - - if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... - } - -Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. - - if err := mergo.Map(&dst, srcMap); err != nil { - // ... - } - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. - -Here is a nice example: - - package main - - import ( - "fmt" - "github.com/imdario/mergo" - ) - - type Foo struct { - A string - B int64 - } - - func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} - } - -Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? - - package main - - import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" - ) - - type timeTransformer struct { - } - - func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil - } - - type Snapshot struct { - Time time.Time - // ... - } - - func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } - } - -Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario - -About - -Written by Dario Castañé: https://da.rio.hn - -License - -BSD 3-Clause license, as Go language. - -*/ -package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go deleted file mode 100644 index b50d5c2a..00000000 --- a/vendor/github.com/imdario/mergo/map.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2014 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" - "unicode" - "unicode/utf8" -) - -func changeInitialCase(s string, mapper func(rune) rune) string { - if s == "" { - return s - } - r, n := utf8.DecodeRuneInString(s) - return string(mapper(r)) + s[n:] -} - -func isExported(field reflect.StructField) bool { - r, _ := utf8.DecodeRuneInString(field.Name) - return r >= 'A' && r <= 'Z' -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{typ, seen, addr} - } - zeroValue := reflect.Value{} - switch dst.Kind() { - case reflect.Map: - dstMap := dst.Interface().(map[string]interface{}) - for i, n := 0, src.NumField(); i < n; i++ { - srcType := src.Type() - field := srcType.Field(i) - if !isExported(field) { - continue - } - fieldName := field.Name - fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { - dstMap[fieldName] = src.Field(i).Interface() - } - } - case reflect.Ptr: - if dst.IsNil() { - v := reflect.New(dst.Type().Elem()) - dst.Set(v) - } - dst = dst.Elem() - fallthrough - case reflect.Struct: - srcMap := src.Interface().(map[string]interface{}) - for key := range srcMap { - config.overwriteWithEmptyValue = true - srcValue := srcMap[key] - fieldName := changeInitialCase(key, unicode.ToUpper) - dstElement := dst.FieldByName(fieldName) - if dstElement == zeroValue { - // We discard it because the field doesn't exist. - continue - } - srcElement := reflect.ValueOf(srcValue) - dstKind := dstElement.Kind() - srcKind := srcElement.Kind() - if srcKind == reflect.Ptr && dstKind != reflect.Ptr { - srcElement = srcElement.Elem() - srcKind = reflect.TypeOf(srcElement.Interface()).Kind() - } else if dstKind == reflect.Ptr { - // Can this work? I guess it can't. - if srcKind != reflect.Ptr && srcElement.CanAddr() { - srcPtr := srcElement.Addr() - srcElement = reflect.ValueOf(srcPtr) - srcKind = reflect.Ptr - } - } - - if !srcElement.IsValid() { - continue - } - if srcKind == dstKind { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else { - return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) - } - } - } - return -} - -// Map sets fields' values in dst from src. -// src can be a map with string keys or a struct. dst must be the opposite: -// if src is a map, dst must be a valid pointer to struct. If src is a struct, -// dst must be map[string]interface{}. -// It won't merge unexported (private) fields and will do recursively -// any exported field. -// If dst is a map, keys will be src fields' names in lower camel case. -// Missing key in src that doesn't match a field in dst will be skipped. This -// doesn't apply if dst is a map. -// This is separated method from Merge because it is cleaner and it keeps sane -// semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, opts...) -} - -// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: Use Map(…) with WithOverride -func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, append(opts, WithOverride)...) -} - -func _map(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerArgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - // To be friction-less, we redirect equal-type arguments - // to deepMerge. Only because arguments can be anything. - if vSrc.Kind() == vDst.Kind() { - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - } - switch vSrc.Kind() { - case reflect.Struct: - if vDst.Kind() != reflect.Map { - return ErrExpectedMapAsDestination - } - case reflect.Map: - if vDst.Kind() != reflect.Struct { - return ErrExpectedStructAsDestination - } - default: - return ErrNotSupported - } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go deleted file mode 100644 index 0ef9b213..00000000 --- a/vendor/github.com/imdario/mergo/merge.go +++ /dev/null @@ -1,409 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" -) - -func hasMergeableFields(dst reflect.Value) (exported bool) { - for i, n := 0, dst.NumField(); i < n; i++ { - field := dst.Type().Field(i) - if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasMergeableFields(dst.Field(i)) - } else if isExportedComponent(&field) { - exported = exported || len(field.PkgPath) == 0 - } - } - return -} - -func isExportedComponent(field *reflect.StructField) bool { - pkgPath := field.PkgPath - if len(pkgPath) > 0 { - return false - } - c := field.Name[0] - if 'a' <= c && c <= 'z' || c == '_' { - return false - } - return true -} - -type Config struct { - Transformers Transformers - Overwrite bool - ShouldNotDereference bool - AppendSlice bool - TypeCheck bool - overwriteWithEmptyValue bool - overwriteSliceWithEmptyValue bool - sliceDeepCopy bool - debug bool -} - -type Transformers interface { - Transformer(reflect.Type) func(dst, src reflect.Value) error -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - typeCheck := config.TypeCheck - overwriteWithEmptySrc := config.overwriteWithEmptyValue - overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue - sliceDeepCopy := config.sliceDeepCopy - - if !src.IsValid() { - return - } - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{typ, seen, addr} - } - - if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { - if fn := config.Transformers.Transformer(dst.Type()); fn != nil { - err = fn(dst, src) - return - } - } - - switch dst.Kind() { - case reflect.Struct: - if hasMergeableFields(dst) { - for i, n := 0, dst.NumField(); i < n; i++ { - if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { - return - } - } - } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { - dst.Set(src) - } - } - case reflect.Map: - if dst.IsNil() && !src.IsNil() { - if dst.CanSet() { - dst.Set(reflect.MakeMap(dst.Type())) - } else { - dst = src - return - } - } - - if src.Kind() != reflect.Map { - if overwrite && dst.CanSet() { - dst.Set(src) - } - return - } - - for _, key := range src.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - continue - } - dstElement := dst.MapIndex(key) - switch srcElement.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: - if srcElement.IsNil() { - if overwrite { - dst.SetMapIndex(key, srcElement) - } - continue - } - fallthrough - default: - if !srcElement.CanInterface() { - continue - } - switch reflect.TypeOf(srcElement.Interface()).Kind() { - case reflect.Struct: - fallthrough - case reflect.Ptr: - fallthrough - case reflect.Map: - srcMapElm := srcElement - dstMapElm := dstElement - if srcMapElm.CanInterface() { - srcMapElm = reflect.ValueOf(srcMapElm.Interface()) - if dstMapElm.IsValid() { - dstMapElm = reflect.ValueOf(dstMapElm.Interface()) - } - } - if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { - return - } - case reflect.Slice: - srcSlice := reflect.ValueOf(srcElement.Interface()) - - var dstSlice reflect.Value - if !dstElement.IsValid() || dstElement.IsNil() { - dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) - } else { - dstSlice = reflect.ValueOf(dstElement.Interface()) - } - - if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { - if typeCheck && srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = srcSlice - } else if config.AppendSlice { - if srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = reflect.AppendSlice(dstSlice, srcSlice) - } else if sliceDeepCopy { - i := 0 - for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { - srcElement := srcSlice.Index(i) - dstElement := dstSlice.Index(i) - - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - - } - dst.SetMapIndex(key, dstSlice) - } - } - - if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { - if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { - continue - } - if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { - continue - } - } - - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - dst.SetMapIndex(key, srcElement) - } - } - - // Ensure that all keys in dst are deleted if they are not in src. - if overwriteWithEmptySrc { - for _, key := range dst.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - dst.SetMapIndex(key, reflect.Value{}) - } - } - } - case reflect.Slice: - if !dst.CanSet() { - break - } - if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { - dst.Set(src) - } else if config.AppendSlice { - if src.Type() != dst.Type() { - return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) - } - dst.Set(reflect.AppendSlice(dst, src)) - } else if sliceDeepCopy { - for i := 0; i < src.Len() && i < dst.Len(); i++ { - srcElement := src.Index(i) - dstElement := dst.Index(i) - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - } - case reflect.Ptr: - fallthrough - case reflect.Interface: - if isReflectNil(src) { - if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - break - } - - if src.Kind() != reflect.Interface { - if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { - dst.Set(src) - } - } else if src.Kind() == reflect.Ptr { - if !config.ShouldNotDereference { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - } else { - if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { - dst.Set(src) - } - } - } else if dst.Elem().Type() == src.Type() { - if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { - return - } - } else { - return ErrDifferentArgumentsTypes - } - break - } - - if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { - dst.Set(src) - } - break - } - - if dst.Elem().Kind() == src.Elem().Kind() { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - break - } - default: - mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) - if mustSet { - if dst.CanSet() { - dst.Set(src) - } else { - dst = src - } - } - } - - return -} - -// Merge will fill any empty for value type attributes on the dst struct using corresponding -// src attributes if they themselves are not empty. dst and src must be valid same-type structs -// and dst must be a pointer to struct. -// It won't merge unexported (private) fields and will do recursively any exported field. -func Merge(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, opts...) -} - -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: use Merge(…) with WithOverride -func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, append(opts, WithOverride)...) -} - -// WithTransformers adds transformers to merge, allowing to customize the merging of some types. -func WithTransformers(transformers Transformers) func(*Config) { - return func(config *Config) { - config.Transformers = transformers - } -} - -// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. -func WithOverride(config *Config) { - config.Overwrite = true -} - -// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. -func WithOverwriteWithEmptyValue(config *Config) { - config.Overwrite = true - config.overwriteWithEmptyValue = true -} - -// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. -func WithOverrideEmptySlice(config *Config) { - config.overwriteSliceWithEmptyValue = true -} - -// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty -// (i.e. a non-nil pointer is never considered empty). -func WithoutDereference(config *Config) { - config.ShouldNotDereference = true -} - -// WithAppendSlice will make merge append slices instead of overwriting it. -func WithAppendSlice(config *Config) { - config.AppendSlice = true -} - -// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). -func WithTypeCheck(config *Config) { - config.TypeCheck = true -} - -// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. -func WithSliceDeepCopy(config *Config) { - config.sliceDeepCopy = true - config.Overwrite = true -} - -func merge(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerArgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - if vDst.Type() != vSrc.Type() { - return ErrDifferentArgumentsTypes - } - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} - -// IsReflectNil is the reflect value provided nil -func isReflectNil(v reflect.Value) bool { - k := v.Kind() - switch k { - case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: - // Both interface and slice are nil if first word is 0. - // Both are always bigger than a word; assume flagIndir. - return v.IsNil() - default: - return false - } -} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go deleted file mode 100644 index 0a721e2d..00000000 --- a/vendor/github.com/imdario/mergo/mergo.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "errors" - "reflect" -) - -// Errors reported by Mergo when it finds invalid arguments. -var ( - ErrNilArguments = errors.New("src and dst must not be nil") - ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs, maps, and slices are supported") - ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") - ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerArgument = errors.New("dst must be a pointer") -) - -// During deepMerge, must keep track of checks that are -// in progress. The comparison algorithm assumes that all -// checks in progress are true when it reencounters them. -// Visited are stored in a map indexed by 17 * a1 + a2; -type visit struct { - typ reflect.Type - next *visit - ptr uintptr -} - -// From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value, shouldDereference bool) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if v.IsNil() { - return true - } - if shouldDereference { - return isEmptyValue(v.Elem(), shouldDereference) - } - return false - case reflect.Func: - return v.IsNil() - case reflect.Invalid: - return true - } - return false -} - -func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { - if dst == nil || src == nil { - err = ErrNilArguments - return - } - vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { - err = ErrNotSupported - return - } - vSrc = reflect.ValueOf(src) - // We check if vSrc is a pointer to dereference it. - if vSrc.Kind() == reflect.Ptr { - vSrc = vSrc.Elem() - } - return -} diff --git a/vendor/github.com/mattn/go-runewidth/LICENSE b/vendor/github.com/mattn/go-runewidth/LICENSE deleted file mode 100644 index 91b5cef3..00000000 --- a/vendor/github.com/mattn/go-runewidth/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Yasuhiro Matsumoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/mattn/go-runewidth/README.md b/vendor/github.com/mattn/go-runewidth/README.md deleted file mode 100644 index 5e2cfd98..00000000 --- a/vendor/github.com/mattn/go-runewidth/README.md +++ /dev/null @@ -1,27 +0,0 @@ -go-runewidth -============ - -[![Build Status](https://github.com/mattn/go-runewidth/workflows/test/badge.svg?branch=master)](https://github.com/mattn/go-runewidth/actions?query=workflow%3Atest) -[![Codecov](https://codecov.io/gh/mattn/go-runewidth/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-runewidth) -[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth) -[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth) - -Provides functions to get fixed width of the character or string. - -Usage ------ - -```go -runewidth.StringWidth("つのだ☆HIRO") == 12 -``` - - -Author ------- - -Yasuhiro Matsumoto - -License -------- - -under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go deleted file mode 100644 index 7dfbb3be..00000000 --- a/vendor/github.com/mattn/go-runewidth/runewidth.go +++ /dev/null @@ -1,358 +0,0 @@ -package runewidth - -import ( - "os" - "strings" - - "github.com/rivo/uniseg" -) - -//go:generate go run script/generate.go - -var ( - // EastAsianWidth will be set true if the current locale is CJK - EastAsianWidth bool - - // StrictEmojiNeutral should be set false if handle broken fonts - StrictEmojiNeutral bool = true - - // DefaultCondition is a condition in current locale - DefaultCondition = &Condition{ - EastAsianWidth: false, - StrictEmojiNeutral: true, - } -) - -func init() { - handleEnv() -} - -func handleEnv() { - env := os.Getenv("RUNEWIDTH_EASTASIAN") - if env == "" { - EastAsianWidth = IsEastAsian() - } else { - EastAsianWidth = env == "1" - } - // update DefaultCondition - if DefaultCondition.EastAsianWidth != EastAsianWidth { - DefaultCondition.EastAsianWidth = EastAsianWidth - if len(DefaultCondition.combinedLut) > 0 { - DefaultCondition.combinedLut = DefaultCondition.combinedLut[:0] - CreateLUT() - } - } -} - -type interval struct { - first rune - last rune -} - -type table []interval - -func inTables(r rune, ts ...table) bool { - for _, t := range ts { - if inTable(r, t) { - return true - } - } - return false -} - -func inTable(r rune, t table) bool { - if r < t[0].first { - return false - } - - bot := 0 - top := len(t) - 1 - for top >= bot { - mid := (bot + top) >> 1 - - switch { - case t[mid].last < r: - bot = mid + 1 - case t[mid].first > r: - top = mid - 1 - default: - return true - } - } - - return false -} - -var private = table{ - {0x00E000, 0x00F8FF}, {0x0F0000, 0x0FFFFD}, {0x100000, 0x10FFFD}, -} - -var nonprint = table{ - {0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD}, - {0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F}, - {0x2028, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF}, - {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF}, -} - -// Condition have flag EastAsianWidth whether the current locale is CJK or not. -type Condition struct { - combinedLut []byte - EastAsianWidth bool - StrictEmojiNeutral bool -} - -// NewCondition return new instance of Condition which is current locale. -func NewCondition() *Condition { - return &Condition{ - EastAsianWidth: EastAsianWidth, - StrictEmojiNeutral: StrictEmojiNeutral, - } -} - -// RuneWidth returns the number of cells in r. -// See http://www.unicode.org/reports/tr11/ -func (c *Condition) RuneWidth(r rune) int { - if r < 0 || r > 0x10FFFF { - return 0 - } - if len(c.combinedLut) > 0 { - return int(c.combinedLut[r>>1]>>(uint(r&1)*4)) & 3 - } - // optimized version, verified by TestRuneWidthChecksums() - if !c.EastAsianWidth { - switch { - case r < 0x20: - return 0 - case (r >= 0x7F && r <= 0x9F) || r == 0xAD: // nonprint - return 0 - case r < 0x300: - return 1 - case inTable(r, narrow): - return 1 - case inTables(r, nonprint, combining): - return 0 - case inTable(r, doublewidth): - return 2 - default: - return 1 - } - } else { - switch { - case inTables(r, nonprint, combining): - return 0 - case inTable(r, narrow): - return 1 - case inTables(r, ambiguous, doublewidth): - return 2 - case !c.StrictEmojiNeutral && inTables(r, ambiguous, emoji, narrow): - return 2 - default: - return 1 - } - } -} - -// CreateLUT will create an in-memory lookup table of 557056 bytes for faster operation. -// This should not be called concurrently with other operations on c. -// If options in c is changed, CreateLUT should be called again. -func (c *Condition) CreateLUT() { - const max = 0x110000 - lut := c.combinedLut - if len(c.combinedLut) != 0 { - // Remove so we don't use it. - c.combinedLut = nil - } else { - lut = make([]byte, max/2) - } - for i := range lut { - i32 := int32(i * 2) - x0 := c.RuneWidth(i32) - x1 := c.RuneWidth(i32 + 1) - lut[i] = uint8(x0) | uint8(x1)<<4 - } - c.combinedLut = lut -} - -// StringWidth return width as you can see -func (c *Condition) StringWidth(s string) (width int) { - g := uniseg.NewGraphemes(s) - for g.Next() { - var chWidth int - for _, r := range g.Runes() { - chWidth = c.RuneWidth(r) - if chWidth > 0 { - break // Our best guess at this point is to use the width of the first non-zero-width rune. - } - } - width += chWidth - } - return -} - -// Truncate return string truncated with w cells -func (c *Condition) Truncate(s string, w int, tail string) string { - if c.StringWidth(s) <= w { - return s - } - w -= c.StringWidth(tail) - var width int - pos := len(s) - g := uniseg.NewGraphemes(s) - for g.Next() { - var chWidth int - for _, r := range g.Runes() { - chWidth = c.RuneWidth(r) - if chWidth > 0 { - break // See StringWidth() for details. - } - } - if width+chWidth > w { - pos, _ = g.Positions() - break - } - width += chWidth - } - return s[:pos] + tail -} - -// TruncateLeft cuts w cells from the beginning of the `s`. -func (c *Condition) TruncateLeft(s string, w int, prefix string) string { - if c.StringWidth(s) <= w { - return prefix - } - - var width int - pos := len(s) - - g := uniseg.NewGraphemes(s) - for g.Next() { - var chWidth int - for _, r := range g.Runes() { - chWidth = c.RuneWidth(r) - if chWidth > 0 { - break // See StringWidth() for details. - } - } - - if width+chWidth > w { - if width < w { - _, pos = g.Positions() - prefix += strings.Repeat(" ", width+chWidth-w) - } else { - pos, _ = g.Positions() - } - - break - } - - width += chWidth - } - - return prefix + s[pos:] -} - -// Wrap return string wrapped with w cells -func (c *Condition) Wrap(s string, w int) string { - width := 0 - out := "" - for _, r := range s { - cw := c.RuneWidth(r) - if r == '\n' { - out += string(r) - width = 0 - continue - } else if width+cw > w { - out += "\n" - width = 0 - out += string(r) - width += cw - continue - } - out += string(r) - width += cw - } - return out -} - -// FillLeft return string filled in left by spaces in w cells -func (c *Condition) FillLeft(s string, w int) string { - width := c.StringWidth(s) - count := w - width - if count > 0 { - b := make([]byte, count) - for i := range b { - b[i] = ' ' - } - return string(b) + s - } - return s -} - -// FillRight return string filled in left by spaces in w cells -func (c *Condition) FillRight(s string, w int) string { - width := c.StringWidth(s) - count := w - width - if count > 0 { - b := make([]byte, count) - for i := range b { - b[i] = ' ' - } - return s + string(b) - } - return s -} - -// RuneWidth returns the number of cells in r. -// See http://www.unicode.org/reports/tr11/ -func RuneWidth(r rune) int { - return DefaultCondition.RuneWidth(r) -} - -// IsAmbiguousWidth returns whether is ambiguous width or not. -func IsAmbiguousWidth(r rune) bool { - return inTables(r, private, ambiguous) -} - -// IsNeutralWidth returns whether is neutral width or not. -func IsNeutralWidth(r rune) bool { - return inTable(r, neutral) -} - -// StringWidth return width as you can see -func StringWidth(s string) (width int) { - return DefaultCondition.StringWidth(s) -} - -// Truncate return string truncated with w cells -func Truncate(s string, w int, tail string) string { - return DefaultCondition.Truncate(s, w, tail) -} - -// TruncateLeft cuts w cells from the beginning of the `s`. -func TruncateLeft(s string, w int, prefix string) string { - return DefaultCondition.TruncateLeft(s, w, prefix) -} - -// Wrap return string wrapped with w cells -func Wrap(s string, w int) string { - return DefaultCondition.Wrap(s, w) -} - -// FillLeft return string filled in left by spaces in w cells -func FillLeft(s string, w int) string { - return DefaultCondition.FillLeft(s, w) -} - -// FillRight return string filled in left by spaces in w cells -func FillRight(s string, w int) string { - return DefaultCondition.FillRight(s, w) -} - -// CreateLUT will create an in-memory lookup table of 557055 bytes for faster operation. -// This should not be called concurrently with other operations. -func CreateLUT() { - if len(DefaultCondition.combinedLut) > 0 { - return - } - DefaultCondition.CreateLUT() -} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go deleted file mode 100644 index 84b6528d..00000000 --- a/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build appengine -// +build appengine - -package runewidth - -// IsEastAsian return true if the current locale is CJK -func IsEastAsian() bool { - return false -} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go deleted file mode 100644 index c2abbc2d..00000000 --- a/vendor/github.com/mattn/go-runewidth/runewidth_js.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build js && !appengine -// +build js,!appengine - -package runewidth - -func IsEastAsian() bool { - // TODO: Implement this for the web. Detect east asian in a compatible way, and return true. - return false -} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go deleted file mode 100644 index 5a31d738..00000000 --- a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go +++ /dev/null @@ -1,81 +0,0 @@ -//go:build !windows && !js && !appengine -// +build !windows,!js,!appengine - -package runewidth - -import ( - "os" - "regexp" - "strings" -) - -var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`) - -var mblenTable = map[string]int{ - "utf-8": 6, - "utf8": 6, - "jis": 8, - "eucjp": 3, - "euckr": 2, - "euccn": 2, - "sjis": 2, - "cp932": 2, - "cp51932": 2, - "cp936": 2, - "cp949": 2, - "cp950": 2, - "big5": 2, - "gbk": 2, - "gb2312": 2, -} - -func isEastAsian(locale string) bool { - charset := strings.ToLower(locale) - r := reLoc.FindStringSubmatch(locale) - if len(r) == 2 { - charset = strings.ToLower(r[1]) - } - - if strings.HasSuffix(charset, "@cjk_narrow") { - return false - } - - for pos, b := range []byte(charset) { - if b == '@' { - charset = charset[:pos] - break - } - } - max := 1 - if m, ok := mblenTable[charset]; ok { - max = m - } - if max > 1 && (charset[0] != 'u' || - strings.HasPrefix(locale, "ja") || - strings.HasPrefix(locale, "ko") || - strings.HasPrefix(locale, "zh")) { - return true - } - return false -} - -// IsEastAsian return true if the current locale is CJK -func IsEastAsian() bool { - locale := os.Getenv("LC_ALL") - if locale == "" { - locale = os.Getenv("LC_CTYPE") - } - if locale == "" { - locale = os.Getenv("LANG") - } - - // ignore C locale - if locale == "POSIX" || locale == "C" { - return false - } - if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') { - return false - } - - return isEastAsian(locale) -} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_table.go b/vendor/github.com/mattn/go-runewidth/runewidth_table.go deleted file mode 100644 index e5d890c2..00000000 --- a/vendor/github.com/mattn/go-runewidth/runewidth_table.go +++ /dev/null @@ -1,439 +0,0 @@ -// Code generated by script/generate.go. DO NOT EDIT. - -package runewidth - -var combining = table{ - {0x0300, 0x036F}, {0x0483, 0x0489}, {0x07EB, 0x07F3}, - {0x0C00, 0x0C00}, {0x0C04, 0x0C04}, {0x0D00, 0x0D01}, - {0x135D, 0x135F}, {0x1A7F, 0x1A7F}, {0x1AB0, 0x1AC0}, - {0x1B6B, 0x1B73}, {0x1DC0, 0x1DF9}, {0x1DFB, 0x1DFF}, - {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2DE0, 0x2DFF}, - {0x3099, 0x309A}, {0xA66F, 0xA672}, {0xA674, 0xA67D}, - {0xA69E, 0xA69F}, {0xA6F0, 0xA6F1}, {0xA8E0, 0xA8F1}, - {0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, {0x10376, 0x1037A}, - {0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x11300, 0x11301}, - {0x1133B, 0x1133C}, {0x11366, 0x1136C}, {0x11370, 0x11374}, - {0x16AF0, 0x16AF4}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172}, - {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD}, - {0x1D242, 0x1D244}, {0x1E000, 0x1E006}, {0x1E008, 0x1E018}, - {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, - {0x1E8D0, 0x1E8D6}, -} - -var doublewidth = table{ - {0x1100, 0x115F}, {0x231A, 0x231B}, {0x2329, 0x232A}, - {0x23E9, 0x23EC}, {0x23F0, 0x23F0}, {0x23F3, 0x23F3}, - {0x25FD, 0x25FE}, {0x2614, 0x2615}, {0x2648, 0x2653}, - {0x267F, 0x267F}, {0x2693, 0x2693}, {0x26A1, 0x26A1}, - {0x26AA, 0x26AB}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5}, - {0x26CE, 0x26CE}, {0x26D4, 0x26D4}, {0x26EA, 0x26EA}, - {0x26F2, 0x26F3}, {0x26F5, 0x26F5}, {0x26FA, 0x26FA}, - {0x26FD, 0x26FD}, {0x2705, 0x2705}, {0x270A, 0x270B}, - {0x2728, 0x2728}, {0x274C, 0x274C}, {0x274E, 0x274E}, - {0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797}, - {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C}, - {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99}, - {0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB}, - {0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF}, - {0x3105, 0x312F}, {0x3131, 0x318E}, {0x3190, 0x31E3}, - {0x31F0, 0x321E}, {0x3220, 0x3247}, {0x3250, 0x4DBF}, - {0x4E00, 0xA48C}, {0xA490, 0xA4C6}, {0xA960, 0xA97C}, - {0xAC00, 0xD7A3}, {0xF900, 0xFAFF}, {0xFE10, 0xFE19}, - {0xFE30, 0xFE52}, {0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, - {0xFF01, 0xFF60}, {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE4}, - {0x16FF0, 0x16FF1}, {0x17000, 0x187F7}, {0x18800, 0x18CD5}, - {0x18D00, 0x18D08}, {0x1B000, 0x1B11E}, {0x1B150, 0x1B152}, - {0x1B164, 0x1B167}, {0x1B170, 0x1B2FB}, {0x1F004, 0x1F004}, - {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A}, - {0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248}, - {0x1F250, 0x1F251}, {0x1F260, 0x1F265}, {0x1F300, 0x1F320}, - {0x1F32D, 0x1F335}, {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393}, - {0x1F3A0, 0x1F3CA}, {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0}, - {0x1F3F4, 0x1F3F4}, {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440}, - {0x1F442, 0x1F4FC}, {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E}, - {0x1F550, 0x1F567}, {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596}, - {0x1F5A4, 0x1F5A4}, {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5}, - {0x1F6CC, 0x1F6CC}, {0x1F6D0, 0x1F6D2}, {0x1F6D5, 0x1F6D7}, - {0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6FC}, {0x1F7E0, 0x1F7EB}, - {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1F978}, - {0x1F97A, 0x1F9CB}, {0x1F9CD, 0x1F9FF}, {0x1FA70, 0x1FA74}, - {0x1FA78, 0x1FA7A}, {0x1FA80, 0x1FA86}, {0x1FA90, 0x1FAA8}, - {0x1FAB0, 0x1FAB6}, {0x1FAC0, 0x1FAC2}, {0x1FAD0, 0x1FAD6}, - {0x20000, 0x2FFFD}, {0x30000, 0x3FFFD}, -} - -var ambiguous = table{ - {0x00A1, 0x00A1}, {0x00A4, 0x00A4}, {0x00A7, 0x00A8}, - {0x00AA, 0x00AA}, {0x00AD, 0x00AE}, {0x00B0, 0x00B4}, - {0x00B6, 0x00BA}, {0x00BC, 0x00BF}, {0x00C6, 0x00C6}, - {0x00D0, 0x00D0}, {0x00D7, 0x00D8}, {0x00DE, 0x00E1}, - {0x00E6, 0x00E6}, {0x00E8, 0x00EA}, {0x00EC, 0x00ED}, - {0x00F0, 0x00F0}, {0x00F2, 0x00F3}, {0x00F7, 0x00FA}, - {0x00FC, 0x00FC}, {0x00FE, 0x00FE}, {0x0101, 0x0101}, - {0x0111, 0x0111}, {0x0113, 0x0113}, {0x011B, 0x011B}, - {0x0126, 0x0127}, {0x012B, 0x012B}, {0x0131, 0x0133}, - {0x0138, 0x0138}, {0x013F, 0x0142}, {0x0144, 0x0144}, - {0x0148, 0x014B}, {0x014D, 0x014D}, {0x0152, 0x0153}, - {0x0166, 0x0167}, {0x016B, 0x016B}, {0x01CE, 0x01CE}, - {0x01D0, 0x01D0}, {0x01D2, 0x01D2}, {0x01D4, 0x01D4}, - {0x01D6, 0x01D6}, {0x01D8, 0x01D8}, {0x01DA, 0x01DA}, - {0x01DC, 0x01DC}, {0x0251, 0x0251}, {0x0261, 0x0261}, - {0x02C4, 0x02C4}, {0x02C7, 0x02C7}, {0x02C9, 0x02CB}, - {0x02CD, 0x02CD}, {0x02D0, 0x02D0}, {0x02D8, 0x02DB}, - {0x02DD, 0x02DD}, {0x02DF, 0x02DF}, {0x0300, 0x036F}, - {0x0391, 0x03A1}, {0x03A3, 0x03A9}, {0x03B1, 0x03C1}, - {0x03C3, 0x03C9}, {0x0401, 0x0401}, {0x0410, 0x044F}, - {0x0451, 0x0451}, {0x2010, 0x2010}, {0x2013, 0x2016}, - {0x2018, 0x2019}, {0x201C, 0x201D}, {0x2020, 0x2022}, - {0x2024, 0x2027}, {0x2030, 0x2030}, {0x2032, 0x2033}, - {0x2035, 0x2035}, {0x203B, 0x203B}, {0x203E, 0x203E}, - {0x2074, 0x2074}, {0x207F, 0x207F}, {0x2081, 0x2084}, - {0x20AC, 0x20AC}, {0x2103, 0x2103}, {0x2105, 0x2105}, - {0x2109, 0x2109}, {0x2113, 0x2113}, {0x2116, 0x2116}, - {0x2121, 0x2122}, {0x2126, 0x2126}, {0x212B, 0x212B}, - {0x2153, 0x2154}, {0x215B, 0x215E}, {0x2160, 0x216B}, - {0x2170, 0x2179}, {0x2189, 0x2189}, {0x2190, 0x2199}, - {0x21B8, 0x21B9}, {0x21D2, 0x21D2}, {0x21D4, 0x21D4}, - {0x21E7, 0x21E7}, {0x2200, 0x2200}, {0x2202, 0x2203}, - {0x2207, 0x2208}, {0x220B, 0x220B}, {0x220F, 0x220F}, - {0x2211, 0x2211}, {0x2215, 0x2215}, {0x221A, 0x221A}, - {0x221D, 0x2220}, {0x2223, 0x2223}, {0x2225, 0x2225}, - {0x2227, 0x222C}, {0x222E, 0x222E}, {0x2234, 0x2237}, - {0x223C, 0x223D}, {0x2248, 0x2248}, {0x224C, 0x224C}, - {0x2252, 0x2252}, {0x2260, 0x2261}, {0x2264, 0x2267}, - {0x226A, 0x226B}, {0x226E, 0x226F}, {0x2282, 0x2283}, - {0x2286, 0x2287}, {0x2295, 0x2295}, {0x2299, 0x2299}, - {0x22A5, 0x22A5}, {0x22BF, 0x22BF}, {0x2312, 0x2312}, - {0x2460, 0x24E9}, {0x24EB, 0x254B}, {0x2550, 0x2573}, - {0x2580, 0x258F}, {0x2592, 0x2595}, {0x25A0, 0x25A1}, - {0x25A3, 0x25A9}, {0x25B2, 0x25B3}, {0x25B6, 0x25B7}, - {0x25BC, 0x25BD}, {0x25C0, 0x25C1}, {0x25C6, 0x25C8}, - {0x25CB, 0x25CB}, {0x25CE, 0x25D1}, {0x25E2, 0x25E5}, - {0x25EF, 0x25EF}, {0x2605, 0x2606}, {0x2609, 0x2609}, - {0x260E, 0x260F}, {0x261C, 0x261C}, {0x261E, 0x261E}, - {0x2640, 0x2640}, {0x2642, 0x2642}, {0x2660, 0x2661}, - {0x2663, 0x2665}, {0x2667, 0x266A}, {0x266C, 0x266D}, - {0x266F, 0x266F}, {0x269E, 0x269F}, {0x26BF, 0x26BF}, - {0x26C6, 0x26CD}, {0x26CF, 0x26D3}, {0x26D5, 0x26E1}, - {0x26E3, 0x26E3}, {0x26E8, 0x26E9}, {0x26EB, 0x26F1}, - {0x26F4, 0x26F4}, {0x26F6, 0x26F9}, {0x26FB, 0x26FC}, - {0x26FE, 0x26FF}, {0x273D, 0x273D}, {0x2776, 0x277F}, - {0x2B56, 0x2B59}, {0x3248, 0x324F}, {0xE000, 0xF8FF}, - {0xFE00, 0xFE0F}, {0xFFFD, 0xFFFD}, {0x1F100, 0x1F10A}, - {0x1F110, 0x1F12D}, {0x1F130, 0x1F169}, {0x1F170, 0x1F18D}, - {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF}, - {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD}, -} -var narrow = table{ - {0x0020, 0x007E}, {0x00A2, 0x00A3}, {0x00A5, 0x00A6}, - {0x00AC, 0x00AC}, {0x00AF, 0x00AF}, {0x27E6, 0x27ED}, - {0x2985, 0x2986}, -} - -var neutral = table{ - {0x0000, 0x001F}, {0x007F, 0x00A0}, {0x00A9, 0x00A9}, - {0x00AB, 0x00AB}, {0x00B5, 0x00B5}, {0x00BB, 0x00BB}, - {0x00C0, 0x00C5}, {0x00C7, 0x00CF}, {0x00D1, 0x00D6}, - {0x00D9, 0x00DD}, {0x00E2, 0x00E5}, {0x00E7, 0x00E7}, - {0x00EB, 0x00EB}, {0x00EE, 0x00EF}, {0x00F1, 0x00F1}, - {0x00F4, 0x00F6}, {0x00FB, 0x00FB}, {0x00FD, 0x00FD}, - {0x00FF, 0x0100}, {0x0102, 0x0110}, {0x0112, 0x0112}, - {0x0114, 0x011A}, {0x011C, 0x0125}, {0x0128, 0x012A}, - {0x012C, 0x0130}, {0x0134, 0x0137}, {0x0139, 0x013E}, - {0x0143, 0x0143}, {0x0145, 0x0147}, {0x014C, 0x014C}, - {0x014E, 0x0151}, {0x0154, 0x0165}, {0x0168, 0x016A}, - {0x016C, 0x01CD}, {0x01CF, 0x01CF}, {0x01D1, 0x01D1}, - {0x01D3, 0x01D3}, {0x01D5, 0x01D5}, {0x01D7, 0x01D7}, - {0x01D9, 0x01D9}, {0x01DB, 0x01DB}, {0x01DD, 0x0250}, - {0x0252, 0x0260}, {0x0262, 0x02C3}, {0x02C5, 0x02C6}, - {0x02C8, 0x02C8}, {0x02CC, 0x02CC}, {0x02CE, 0x02CF}, - {0x02D1, 0x02D7}, {0x02DC, 0x02DC}, {0x02DE, 0x02DE}, - {0x02E0, 0x02FF}, {0x0370, 0x0377}, {0x037A, 0x037F}, - {0x0384, 0x038A}, {0x038C, 0x038C}, {0x038E, 0x0390}, - {0x03AA, 0x03B0}, {0x03C2, 0x03C2}, {0x03CA, 0x0400}, - {0x0402, 0x040F}, {0x0450, 0x0450}, {0x0452, 0x052F}, - {0x0531, 0x0556}, {0x0559, 0x058A}, {0x058D, 0x058F}, - {0x0591, 0x05C7}, {0x05D0, 0x05EA}, {0x05EF, 0x05F4}, - {0x0600, 0x061C}, {0x061E, 0x070D}, {0x070F, 0x074A}, - {0x074D, 0x07B1}, {0x07C0, 0x07FA}, {0x07FD, 0x082D}, - {0x0830, 0x083E}, {0x0840, 0x085B}, {0x085E, 0x085E}, - {0x0860, 0x086A}, {0x08A0, 0x08B4}, {0x08B6, 0x08C7}, - {0x08D3, 0x0983}, {0x0985, 0x098C}, {0x098F, 0x0990}, - {0x0993, 0x09A8}, {0x09AA, 0x09B0}, {0x09B2, 0x09B2}, - {0x09B6, 0x09B9}, {0x09BC, 0x09C4}, {0x09C7, 0x09C8}, - {0x09CB, 0x09CE}, {0x09D7, 0x09D7}, {0x09DC, 0x09DD}, - {0x09DF, 0x09E3}, {0x09E6, 0x09FE}, {0x0A01, 0x0A03}, - {0x0A05, 0x0A0A}, {0x0A0F, 0x0A10}, {0x0A13, 0x0A28}, - {0x0A2A, 0x0A30}, {0x0A32, 0x0A33}, {0x0A35, 0x0A36}, - {0x0A38, 0x0A39}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, - {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, - {0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E}, {0x0A66, 0x0A76}, - {0x0A81, 0x0A83}, {0x0A85, 0x0A8D}, {0x0A8F, 0x0A91}, - {0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, {0x0AB2, 0x0AB3}, - {0x0AB5, 0x0AB9}, {0x0ABC, 0x0AC5}, {0x0AC7, 0x0AC9}, - {0x0ACB, 0x0ACD}, {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE3}, - {0x0AE6, 0x0AF1}, {0x0AF9, 0x0AFF}, {0x0B01, 0x0B03}, - {0x0B05, 0x0B0C}, {0x0B0F, 0x0B10}, {0x0B13, 0x0B28}, - {0x0B2A, 0x0B30}, {0x0B32, 0x0B33}, {0x0B35, 0x0B39}, - {0x0B3C, 0x0B44}, {0x0B47, 0x0B48}, {0x0B4B, 0x0B4D}, - {0x0B55, 0x0B57}, {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B63}, - {0x0B66, 0x0B77}, {0x0B82, 0x0B83}, {0x0B85, 0x0B8A}, - {0x0B8E, 0x0B90}, {0x0B92, 0x0B95}, {0x0B99, 0x0B9A}, - {0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F}, {0x0BA3, 0x0BA4}, - {0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9}, {0x0BBE, 0x0BC2}, - {0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCD}, {0x0BD0, 0x0BD0}, - {0x0BD7, 0x0BD7}, {0x0BE6, 0x0BFA}, {0x0C00, 0x0C0C}, - {0x0C0E, 0x0C10}, {0x0C12, 0x0C28}, {0x0C2A, 0x0C39}, - {0x0C3D, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, - {0x0C55, 0x0C56}, {0x0C58, 0x0C5A}, {0x0C60, 0x0C63}, - {0x0C66, 0x0C6F}, {0x0C77, 0x0C8C}, {0x0C8E, 0x0C90}, - {0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9}, - {0x0CBC, 0x0CC4}, {0x0CC6, 0x0CC8}, {0x0CCA, 0x0CCD}, - {0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE3}, - {0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, {0x0D00, 0x0D0C}, - {0x0D0E, 0x0D10}, {0x0D12, 0x0D44}, {0x0D46, 0x0D48}, - {0x0D4A, 0x0D4F}, {0x0D54, 0x0D63}, {0x0D66, 0x0D7F}, - {0x0D81, 0x0D83}, {0x0D85, 0x0D96}, {0x0D9A, 0x0DB1}, - {0x0DB3, 0x0DBB}, {0x0DBD, 0x0DBD}, {0x0DC0, 0x0DC6}, - {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD4}, {0x0DD6, 0x0DD6}, - {0x0DD8, 0x0DDF}, {0x0DE6, 0x0DEF}, {0x0DF2, 0x0DF4}, - {0x0E01, 0x0E3A}, {0x0E3F, 0x0E5B}, {0x0E81, 0x0E82}, - {0x0E84, 0x0E84}, {0x0E86, 0x0E8A}, {0x0E8C, 0x0EA3}, - {0x0EA5, 0x0EA5}, {0x0EA7, 0x0EBD}, {0x0EC0, 0x0EC4}, - {0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9}, - {0x0EDC, 0x0EDF}, {0x0F00, 0x0F47}, {0x0F49, 0x0F6C}, - {0x0F71, 0x0F97}, {0x0F99, 0x0FBC}, {0x0FBE, 0x0FCC}, - {0x0FCE, 0x0FDA}, {0x1000, 0x10C5}, {0x10C7, 0x10C7}, - {0x10CD, 0x10CD}, {0x10D0, 0x10FF}, {0x1160, 0x1248}, - {0x124A, 0x124D}, {0x1250, 0x1256}, {0x1258, 0x1258}, - {0x125A, 0x125D}, {0x1260, 0x1288}, {0x128A, 0x128D}, - {0x1290, 0x12B0}, {0x12B2, 0x12B5}, {0x12B8, 0x12BE}, - {0x12C0, 0x12C0}, {0x12C2, 0x12C5}, {0x12C8, 0x12D6}, - {0x12D8, 0x1310}, {0x1312, 0x1315}, {0x1318, 0x135A}, - {0x135D, 0x137C}, {0x1380, 0x1399}, {0x13A0, 0x13F5}, - {0x13F8, 0x13FD}, {0x1400, 0x169C}, {0x16A0, 0x16F8}, - {0x1700, 0x170C}, {0x170E, 0x1714}, {0x1720, 0x1736}, - {0x1740, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770}, - {0x1772, 0x1773}, {0x1780, 0x17DD}, {0x17E0, 0x17E9}, - {0x17F0, 0x17F9}, {0x1800, 0x180E}, {0x1810, 0x1819}, - {0x1820, 0x1878}, {0x1880, 0x18AA}, {0x18B0, 0x18F5}, - {0x1900, 0x191E}, {0x1920, 0x192B}, {0x1930, 0x193B}, - {0x1940, 0x1940}, {0x1944, 0x196D}, {0x1970, 0x1974}, - {0x1980, 0x19AB}, {0x19B0, 0x19C9}, {0x19D0, 0x19DA}, - {0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E}, {0x1A60, 0x1A7C}, - {0x1A7F, 0x1A89}, {0x1A90, 0x1A99}, {0x1AA0, 0x1AAD}, - {0x1AB0, 0x1AC0}, {0x1B00, 0x1B4B}, {0x1B50, 0x1B7C}, - {0x1B80, 0x1BF3}, {0x1BFC, 0x1C37}, {0x1C3B, 0x1C49}, - {0x1C4D, 0x1C88}, {0x1C90, 0x1CBA}, {0x1CBD, 0x1CC7}, - {0x1CD0, 0x1CFA}, {0x1D00, 0x1DF9}, {0x1DFB, 0x1F15}, - {0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D}, - {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B}, - {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4}, - {0x1FB6, 0x1FC4}, {0x1FC6, 0x1FD3}, {0x1FD6, 0x1FDB}, - {0x1FDD, 0x1FEF}, {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFE}, - {0x2000, 0x200F}, {0x2011, 0x2012}, {0x2017, 0x2017}, - {0x201A, 0x201B}, {0x201E, 0x201F}, {0x2023, 0x2023}, - {0x2028, 0x202F}, {0x2031, 0x2031}, {0x2034, 0x2034}, - {0x2036, 0x203A}, {0x203C, 0x203D}, {0x203F, 0x2064}, - {0x2066, 0x2071}, {0x2075, 0x207E}, {0x2080, 0x2080}, - {0x2085, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8}, - {0x20AA, 0x20AB}, {0x20AD, 0x20BF}, {0x20D0, 0x20F0}, - {0x2100, 0x2102}, {0x2104, 0x2104}, {0x2106, 0x2108}, - {0x210A, 0x2112}, {0x2114, 0x2115}, {0x2117, 0x2120}, - {0x2123, 0x2125}, {0x2127, 0x212A}, {0x212C, 0x2152}, - {0x2155, 0x215A}, {0x215F, 0x215F}, {0x216C, 0x216F}, - {0x217A, 0x2188}, {0x218A, 0x218B}, {0x219A, 0x21B7}, - {0x21BA, 0x21D1}, {0x21D3, 0x21D3}, {0x21D5, 0x21E6}, - {0x21E8, 0x21FF}, {0x2201, 0x2201}, {0x2204, 0x2206}, - {0x2209, 0x220A}, {0x220C, 0x220E}, {0x2210, 0x2210}, - {0x2212, 0x2214}, {0x2216, 0x2219}, {0x221B, 0x221C}, - {0x2221, 0x2222}, {0x2224, 0x2224}, {0x2226, 0x2226}, - {0x222D, 0x222D}, {0x222F, 0x2233}, {0x2238, 0x223B}, - {0x223E, 0x2247}, {0x2249, 0x224B}, {0x224D, 0x2251}, - {0x2253, 0x225F}, {0x2262, 0x2263}, {0x2268, 0x2269}, - {0x226C, 0x226D}, {0x2270, 0x2281}, {0x2284, 0x2285}, - {0x2288, 0x2294}, {0x2296, 0x2298}, {0x229A, 0x22A4}, - {0x22A6, 0x22BE}, {0x22C0, 0x2311}, {0x2313, 0x2319}, - {0x231C, 0x2328}, {0x232B, 0x23E8}, {0x23ED, 0x23EF}, - {0x23F1, 0x23F2}, {0x23F4, 0x2426}, {0x2440, 0x244A}, - {0x24EA, 0x24EA}, {0x254C, 0x254F}, {0x2574, 0x257F}, - {0x2590, 0x2591}, {0x2596, 0x259F}, {0x25A2, 0x25A2}, - {0x25AA, 0x25B1}, {0x25B4, 0x25B5}, {0x25B8, 0x25BB}, - {0x25BE, 0x25BF}, {0x25C2, 0x25C5}, {0x25C9, 0x25CA}, - {0x25CC, 0x25CD}, {0x25D2, 0x25E1}, {0x25E6, 0x25EE}, - {0x25F0, 0x25FC}, {0x25FF, 0x2604}, {0x2607, 0x2608}, - {0x260A, 0x260D}, {0x2610, 0x2613}, {0x2616, 0x261B}, - {0x261D, 0x261D}, {0x261F, 0x263F}, {0x2641, 0x2641}, - {0x2643, 0x2647}, {0x2654, 0x265F}, {0x2662, 0x2662}, - {0x2666, 0x2666}, {0x266B, 0x266B}, {0x266E, 0x266E}, - {0x2670, 0x267E}, {0x2680, 0x2692}, {0x2694, 0x269D}, - {0x26A0, 0x26A0}, {0x26A2, 0x26A9}, {0x26AC, 0x26BC}, - {0x26C0, 0x26C3}, {0x26E2, 0x26E2}, {0x26E4, 0x26E7}, - {0x2700, 0x2704}, {0x2706, 0x2709}, {0x270C, 0x2727}, - {0x2729, 0x273C}, {0x273E, 0x274B}, {0x274D, 0x274D}, - {0x274F, 0x2752}, {0x2756, 0x2756}, {0x2758, 0x2775}, - {0x2780, 0x2794}, {0x2798, 0x27AF}, {0x27B1, 0x27BE}, - {0x27C0, 0x27E5}, {0x27EE, 0x2984}, {0x2987, 0x2B1A}, - {0x2B1D, 0x2B4F}, {0x2B51, 0x2B54}, {0x2B5A, 0x2B73}, - {0x2B76, 0x2B95}, {0x2B97, 0x2C2E}, {0x2C30, 0x2C5E}, - {0x2C60, 0x2CF3}, {0x2CF9, 0x2D25}, {0x2D27, 0x2D27}, - {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D70}, - {0x2D7F, 0x2D96}, {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE}, - {0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6}, - {0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE}, - {0x2DE0, 0x2E52}, {0x303F, 0x303F}, {0x4DC0, 0x4DFF}, - {0xA4D0, 0xA62B}, {0xA640, 0xA6F7}, {0xA700, 0xA7BF}, - {0xA7C2, 0xA7CA}, {0xA7F5, 0xA82C}, {0xA830, 0xA839}, - {0xA840, 0xA877}, {0xA880, 0xA8C5}, {0xA8CE, 0xA8D9}, - {0xA8E0, 0xA953}, {0xA95F, 0xA95F}, {0xA980, 0xA9CD}, - {0xA9CF, 0xA9D9}, {0xA9DE, 0xA9FE}, {0xAA00, 0xAA36}, - {0xAA40, 0xAA4D}, {0xAA50, 0xAA59}, {0xAA5C, 0xAAC2}, - {0xAADB, 0xAAF6}, {0xAB01, 0xAB06}, {0xAB09, 0xAB0E}, - {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E}, - {0xAB30, 0xAB6B}, {0xAB70, 0xABED}, {0xABF0, 0xABF9}, - {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDFFF}, - {0xFB00, 0xFB06}, {0xFB13, 0xFB17}, {0xFB1D, 0xFB36}, - {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41}, - {0xFB43, 0xFB44}, {0xFB46, 0xFBC1}, {0xFBD3, 0xFD3F}, - {0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, {0xFDF0, 0xFDFD}, - {0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC}, - {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFC}, {0x10000, 0x1000B}, - {0x1000D, 0x10026}, {0x10028, 0x1003A}, {0x1003C, 0x1003D}, - {0x1003F, 0x1004D}, {0x10050, 0x1005D}, {0x10080, 0x100FA}, - {0x10100, 0x10102}, {0x10107, 0x10133}, {0x10137, 0x1018E}, - {0x10190, 0x1019C}, {0x101A0, 0x101A0}, {0x101D0, 0x101FD}, - {0x10280, 0x1029C}, {0x102A0, 0x102D0}, {0x102E0, 0x102FB}, - {0x10300, 0x10323}, {0x1032D, 0x1034A}, {0x10350, 0x1037A}, - {0x10380, 0x1039D}, {0x1039F, 0x103C3}, {0x103C8, 0x103D5}, - {0x10400, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3}, - {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563}, - {0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755}, - {0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808}, - {0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C}, - {0x1083F, 0x10855}, {0x10857, 0x1089E}, {0x108A7, 0x108AF}, - {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x1091B}, - {0x1091F, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x109B7}, - {0x109BC, 0x109CF}, {0x109D2, 0x10A03}, {0x10A05, 0x10A06}, - {0x10A0C, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A35}, - {0x10A38, 0x10A3A}, {0x10A3F, 0x10A48}, {0x10A50, 0x10A58}, - {0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6}, {0x10AEB, 0x10AF6}, - {0x10B00, 0x10B35}, {0x10B39, 0x10B55}, {0x10B58, 0x10B72}, - {0x10B78, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF}, - {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2}, - {0x10CFA, 0x10D27}, {0x10D30, 0x10D39}, {0x10E60, 0x10E7E}, - {0x10E80, 0x10EA9}, {0x10EAB, 0x10EAD}, {0x10EB0, 0x10EB1}, - {0x10F00, 0x10F27}, {0x10F30, 0x10F59}, {0x10FB0, 0x10FCB}, - {0x10FE0, 0x10FF6}, {0x11000, 0x1104D}, {0x11052, 0x1106F}, - {0x1107F, 0x110C1}, {0x110CD, 0x110CD}, {0x110D0, 0x110E8}, - {0x110F0, 0x110F9}, {0x11100, 0x11134}, {0x11136, 0x11147}, - {0x11150, 0x11176}, {0x11180, 0x111DF}, {0x111E1, 0x111F4}, - {0x11200, 0x11211}, {0x11213, 0x1123E}, {0x11280, 0x11286}, - {0x11288, 0x11288}, {0x1128A, 0x1128D}, {0x1128F, 0x1129D}, - {0x1129F, 0x112A9}, {0x112B0, 0x112EA}, {0x112F0, 0x112F9}, - {0x11300, 0x11303}, {0x11305, 0x1130C}, {0x1130F, 0x11310}, - {0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333}, - {0x11335, 0x11339}, {0x1133B, 0x11344}, {0x11347, 0x11348}, - {0x1134B, 0x1134D}, {0x11350, 0x11350}, {0x11357, 0x11357}, - {0x1135D, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374}, - {0x11400, 0x1145B}, {0x1145D, 0x11461}, {0x11480, 0x114C7}, - {0x114D0, 0x114D9}, {0x11580, 0x115B5}, {0x115B8, 0x115DD}, - {0x11600, 0x11644}, {0x11650, 0x11659}, {0x11660, 0x1166C}, - {0x11680, 0x116B8}, {0x116C0, 0x116C9}, {0x11700, 0x1171A}, - {0x1171D, 0x1172B}, {0x11730, 0x1173F}, {0x11800, 0x1183B}, - {0x118A0, 0x118F2}, {0x118FF, 0x11906}, {0x11909, 0x11909}, - {0x1190C, 0x11913}, {0x11915, 0x11916}, {0x11918, 0x11935}, - {0x11937, 0x11938}, {0x1193B, 0x11946}, {0x11950, 0x11959}, - {0x119A0, 0x119A7}, {0x119AA, 0x119D7}, {0x119DA, 0x119E4}, - {0x11A00, 0x11A47}, {0x11A50, 0x11AA2}, {0x11AC0, 0x11AF8}, - {0x11C00, 0x11C08}, {0x11C0A, 0x11C36}, {0x11C38, 0x11C45}, - {0x11C50, 0x11C6C}, {0x11C70, 0x11C8F}, {0x11C92, 0x11CA7}, - {0x11CA9, 0x11CB6}, {0x11D00, 0x11D06}, {0x11D08, 0x11D09}, - {0x11D0B, 0x11D36}, {0x11D3A, 0x11D3A}, {0x11D3C, 0x11D3D}, - {0x11D3F, 0x11D47}, {0x11D50, 0x11D59}, {0x11D60, 0x11D65}, - {0x11D67, 0x11D68}, {0x11D6A, 0x11D8E}, {0x11D90, 0x11D91}, - {0x11D93, 0x11D98}, {0x11DA0, 0x11DA9}, {0x11EE0, 0x11EF8}, - {0x11FB0, 0x11FB0}, {0x11FC0, 0x11FF1}, {0x11FFF, 0x12399}, - {0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543}, - {0x13000, 0x1342E}, {0x13430, 0x13438}, {0x14400, 0x14646}, - {0x16800, 0x16A38}, {0x16A40, 0x16A5E}, {0x16A60, 0x16A69}, - {0x16A6E, 0x16A6F}, {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF5}, - {0x16B00, 0x16B45}, {0x16B50, 0x16B59}, {0x16B5B, 0x16B61}, - {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, {0x16E40, 0x16E9A}, - {0x16F00, 0x16F4A}, {0x16F4F, 0x16F87}, {0x16F8F, 0x16F9F}, - {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, {0x1BC80, 0x1BC88}, - {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3}, {0x1D000, 0x1D0F5}, - {0x1D100, 0x1D126}, {0x1D129, 0x1D1E8}, {0x1D200, 0x1D245}, - {0x1D2E0, 0x1D2F3}, {0x1D300, 0x1D356}, {0x1D360, 0x1D378}, - {0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F}, - {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC}, - {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3}, - {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514}, - {0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E}, - {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, {0x1D54A, 0x1D550}, - {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B}, - {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006}, - {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, - {0x1E026, 0x1E02A}, {0x1E100, 0x1E12C}, {0x1E130, 0x1E13D}, - {0x1E140, 0x1E149}, {0x1E14E, 0x1E14F}, {0x1E2C0, 0x1E2F9}, - {0x1E2FF, 0x1E2FF}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6}, - {0x1E900, 0x1E94B}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F}, - {0x1EC71, 0x1ECB4}, {0x1ED01, 0x1ED3D}, {0x1EE00, 0x1EE03}, - {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, {0x1EE24, 0x1EE24}, - {0x1EE27, 0x1EE27}, {0x1EE29, 0x1EE32}, {0x1EE34, 0x1EE37}, - {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B}, {0x1EE42, 0x1EE42}, - {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49}, {0x1EE4B, 0x1EE4B}, - {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52}, {0x1EE54, 0x1EE54}, - {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59}, {0x1EE5B, 0x1EE5B}, - {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F}, {0x1EE61, 0x1EE62}, - {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A}, {0x1EE6C, 0x1EE72}, - {0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C}, {0x1EE7E, 0x1EE7E}, - {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B}, {0x1EEA1, 0x1EEA3}, - {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB}, {0x1EEF0, 0x1EEF1}, - {0x1F000, 0x1F003}, {0x1F005, 0x1F02B}, {0x1F030, 0x1F093}, - {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF}, {0x1F0C1, 0x1F0CE}, - {0x1F0D1, 0x1F0F5}, {0x1F10B, 0x1F10F}, {0x1F12E, 0x1F12F}, - {0x1F16A, 0x1F16F}, {0x1F1AD, 0x1F1AD}, {0x1F1E6, 0x1F1FF}, - {0x1F321, 0x1F32C}, {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D}, - {0x1F394, 0x1F39F}, {0x1F3CB, 0x1F3CE}, {0x1F3D4, 0x1F3DF}, - {0x1F3F1, 0x1F3F3}, {0x1F3F5, 0x1F3F7}, {0x1F43F, 0x1F43F}, - {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FE}, {0x1F53E, 0x1F54A}, - {0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594}, - {0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F}, - {0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6D3, 0x1F6D4}, - {0x1F6E0, 0x1F6EA}, {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773}, - {0x1F780, 0x1F7D8}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, - {0x1F850, 0x1F859}, {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD}, - {0x1F8B0, 0x1F8B1}, {0x1F900, 0x1F90B}, {0x1F93B, 0x1F93B}, - {0x1F946, 0x1F946}, {0x1FA00, 0x1FA53}, {0x1FA60, 0x1FA6D}, - {0x1FB00, 0x1FB92}, {0x1FB94, 0x1FBCA}, {0x1FBF0, 0x1FBF9}, - {0xE0001, 0xE0001}, {0xE0020, 0xE007F}, -} - -var emoji = table{ - {0x203C, 0x203C}, {0x2049, 0x2049}, {0x2122, 0x2122}, - {0x2139, 0x2139}, {0x2194, 0x2199}, {0x21A9, 0x21AA}, - {0x231A, 0x231B}, {0x2328, 0x2328}, {0x2388, 0x2388}, - {0x23CF, 0x23CF}, {0x23E9, 0x23F3}, {0x23F8, 0x23FA}, - {0x24C2, 0x24C2}, {0x25AA, 0x25AB}, {0x25B6, 0x25B6}, - {0x25C0, 0x25C0}, {0x25FB, 0x25FE}, {0x2600, 0x2605}, - {0x2607, 0x2612}, {0x2614, 0x2685}, {0x2690, 0x2705}, - {0x2708, 0x2712}, {0x2714, 0x2714}, {0x2716, 0x2716}, - {0x271D, 0x271D}, {0x2721, 0x2721}, {0x2728, 0x2728}, - {0x2733, 0x2734}, {0x2744, 0x2744}, {0x2747, 0x2747}, - {0x274C, 0x274C}, {0x274E, 0x274E}, {0x2753, 0x2755}, - {0x2757, 0x2757}, {0x2763, 0x2767}, {0x2795, 0x2797}, - {0x27A1, 0x27A1}, {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, - {0x2934, 0x2935}, {0x2B05, 0x2B07}, {0x2B1B, 0x2B1C}, - {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x3030, 0x3030}, - {0x303D, 0x303D}, {0x3297, 0x3297}, {0x3299, 0x3299}, - {0x1F000, 0x1F0FF}, {0x1F10D, 0x1F10F}, {0x1F12F, 0x1F12F}, - {0x1F16C, 0x1F171}, {0x1F17E, 0x1F17F}, {0x1F18E, 0x1F18E}, - {0x1F191, 0x1F19A}, {0x1F1AD, 0x1F1E5}, {0x1F201, 0x1F20F}, - {0x1F21A, 0x1F21A}, {0x1F22F, 0x1F22F}, {0x1F232, 0x1F23A}, - {0x1F23C, 0x1F23F}, {0x1F249, 0x1F3FA}, {0x1F400, 0x1F53D}, - {0x1F546, 0x1F64F}, {0x1F680, 0x1F6FF}, {0x1F774, 0x1F77F}, - {0x1F7D5, 0x1F7FF}, {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F}, - {0x1F85A, 0x1F85F}, {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F8FF}, - {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1FAFF}, - {0x1FC00, 0x1FFFD}, -} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go deleted file mode 100644 index 5f987a31..00000000 --- a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build windows && !appengine -// +build windows,!appengine - -package runewidth - -import ( - "syscall" -) - -var ( - kernel32 = syscall.NewLazyDLL("kernel32") - procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP") -) - -// IsEastAsian return true if the current locale is CJK -func IsEastAsian() bool { - r1, _, _ := procGetConsoleOutputCP.Call() - if r1 == 0 { - return false - } - - switch int(r1) { - case 932, 51932, 936, 949, 950: - return true - } - - return false -} diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE deleted file mode 100644 index 22985159..00000000 --- a/vendor/github.com/mitchellh/copystructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md deleted file mode 100644 index f0fbd2e5..00000000 --- a/vendor/github.com/mitchellh/copystructure/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# copystructure - -copystructure is a Go library for deep copying values in Go. - -This allows you to copy Go values that may contain reference values -such as maps, slices, or pointers, and copy their data as well instead -of just their references. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/copystructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). - -The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go deleted file mode 100644 index db6a6aa1..00000000 --- a/vendor/github.com/mitchellh/copystructure/copier_time.go +++ /dev/null @@ -1,15 +0,0 @@ -package copystructure - -import ( - "reflect" - "time" -) - -func init() { - Copiers[reflect.TypeOf(time.Time{})] = timeCopier -} - -func timeCopier(v interface{}) (interface{}, error) { - // Just... copy it. - return v.(time.Time), nil -} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go deleted file mode 100644 index 8089e667..00000000 --- a/vendor/github.com/mitchellh/copystructure/copystructure.go +++ /dev/null @@ -1,631 +0,0 @@ -package copystructure - -import ( - "errors" - "reflect" - "sync" - - "github.com/mitchellh/reflectwalk" -) - -const tagKey = "copy" - -// Copy returns a deep copy of v. -// -// Copy is unable to copy unexported fields in a struct (lowercase field names). -// Unexported fields can't be reflected by the Go runtime and therefore -// copystructure can't perform any data copies. -// -// For structs, copy behavior can be controlled with struct tags. For example: -// -// struct { -// Name string -// Data *bytes.Buffer `copy:"shallow"` -// } -// -// The available tag values are: -// -// * "ignore" - The field will be ignored, effectively resulting in it being -// assigned the zero value in the copy. -// -// * "shallow" - The field will be be shallow copied. This means that references -// values such as pointers, maps, slices, etc. will be directly assigned -// versus deep copied. -// -func Copy(v interface{}) (interface{}, error) { - return Config{}.Copy(v) -} - -// CopierFunc is a function that knows how to deep copy a specific type. -// Register these globally with the Copiers variable. -type CopierFunc func(interface{}) (interface{}, error) - -// Copiers is a map of types that behave specially when they are copied. -// If a type is found in this map while deep copying, this function -// will be called to copy it instead of attempting to copy all fields. -// -// The key should be the type, obtained using: reflect.TypeOf(value with type). -// -// It is unsafe to write to this map after Copies have started. If you -// are writing to this map while also copying, wrap all modifications to -// this map as well as to Copy in a mutex. -var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) - -// ShallowCopiers is a map of pointer types that behave specially -// when they are copied. If a type is found in this map while deep -// copying, the pointer value will be shallow copied and not walked -// into. -// -// The key should be the type, obtained using: reflect.TypeOf(value -// with type). -// -// It is unsafe to write to this map after Copies have started. If you -// are writing to this map while also copying, wrap all modifications to -// this map as well as to Copy in a mutex. -var ShallowCopiers map[reflect.Type]struct{} = make(map[reflect.Type]struct{}) - -// Must is a helper that wraps a call to a function returning -// (interface{}, error) and panics if the error is non-nil. It is intended -// for use in variable initializations and should only be used when a copy -// error should be a crashing case. -func Must(v interface{}, err error) interface{} { - if err != nil { - panic("copy error: " + err.Error()) - } - - return v -} - -var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") - -type Config struct { - // Lock any types that are a sync.Locker and are not a mutex while copying. - // If there is an RLocker method, use that to get the sync.Locker. - Lock bool - - // Copiers is a map of types associated with a CopierFunc. Use the global - // Copiers map if this is nil. - Copiers map[reflect.Type]CopierFunc - - // ShallowCopiers is a map of pointer types that when they are - // shallow copied no matter where they are encountered. Use the - // global ShallowCopiers if this is nil. - ShallowCopiers map[reflect.Type]struct{} -} - -func (c Config) Copy(v interface{}) (interface{}, error) { - if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { - return nil, errPointerRequired - } - - w := new(walker) - if c.Lock { - w.useLocks = true - } - - if c.Copiers == nil { - c.Copiers = Copiers - } - w.copiers = c.Copiers - - if c.ShallowCopiers == nil { - c.ShallowCopiers = ShallowCopiers - } - w.shallowCopiers = c.ShallowCopiers - - err := reflectwalk.Walk(v, w) - if err != nil { - return nil, err - } - - // Get the result. If the result is nil, then we want to turn it - // into a typed nil if we can. - result := w.Result - if result == nil { - val := reflect.ValueOf(v) - result = reflect.Indirect(reflect.New(val.Type())).Interface() - } - - return result, nil -} - -// Return the key used to index interfaces types we've seen. Store the number -// of pointers in the upper 32bits, and the depth in the lower 32bits. This is -// easy to calculate, easy to match a key with our current depth, and we don't -// need to deal with initializing and cleaning up nested maps or slices. -func ifaceKey(pointers, depth int) uint64 { - return uint64(pointers)<<32 | uint64(depth) -} - -type walker struct { - Result interface{} - - copiers map[reflect.Type]CopierFunc - shallowCopiers map[reflect.Type]struct{} - depth int - ignoreDepth int - vals []reflect.Value - cs []reflect.Value - - // This stores the number of pointers we've walked over, indexed by depth. - ps []int - - // If an interface is indirected by a pointer, we need to know the type of - // interface to create when creating the new value. Store the interface - // types here, indexed by both the walk depth and the number of pointers - // already seen at that depth. Use ifaceKey to calculate the proper uint64 - // value. - ifaceTypes map[uint64]reflect.Type - - // any locks we've taken, indexed by depth - locks []sync.Locker - // take locks while walking the structure - useLocks bool -} - -func (w *walker) Enter(l reflectwalk.Location) error { - w.depth++ - - // ensure we have enough elements to index via w.depth - for w.depth >= len(w.locks) { - w.locks = append(w.locks, nil) - } - - for len(w.ps) < w.depth+1 { - w.ps = append(w.ps, 0) - } - - return nil -} - -func (w *walker) Exit(l reflectwalk.Location) error { - locker := w.locks[w.depth] - w.locks[w.depth] = nil - if locker != nil { - defer locker.Unlock() - } - - // clear out pointers and interfaces as we exit the stack - w.ps[w.depth] = 0 - - for k := range w.ifaceTypes { - mask := uint64(^uint32(0)) - if k&mask == uint64(w.depth) { - delete(w.ifaceTypes, k) - } - } - - w.depth-- - if w.ignoreDepth > w.depth { - w.ignoreDepth = 0 - } - - if w.ignoring() { - return nil - } - - switch l { - case reflectwalk.Array: - fallthrough - case reflectwalk.Map: - fallthrough - case reflectwalk.Slice: - w.replacePointerMaybe() - - // Pop map off our container - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.MapValue: - // Pop off the key and value - mv := w.valPop() - mk := w.valPop() - m := w.cs[len(w.cs)-1] - - // If mv is the zero value, SetMapIndex deletes the key form the map, - // or in this case never adds it. We need to create a properly typed - // zero value so that this key can be set. - if !mv.IsValid() { - mv = reflect.Zero(m.Elem().Type().Elem()) - } - m.Elem().SetMapIndex(mk, mv) - case reflectwalk.ArrayElem: - // Pop off the value and the index and set it on the array - v := w.valPop() - i := w.valPop().Interface().(int) - if v.IsValid() { - a := w.cs[len(w.cs)-1] - ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call - if ae.CanSet() { - ae.Set(v) - } - } - case reflectwalk.SliceElem: - // Pop off the value and the index and set it on the slice - v := w.valPop() - i := w.valPop().Interface().(int) - if v.IsValid() { - s := w.cs[len(w.cs)-1] - se := s.Elem().Index(i) - if se.CanSet() { - se.Set(v) - } - } - case reflectwalk.Struct: - w.replacePointerMaybe() - - // Remove the struct from the container stack - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.StructField: - // Pop off the value and the field - v := w.valPop() - f := w.valPop().Interface().(reflect.StructField) - if v.IsValid() { - s := w.cs[len(w.cs)-1] - sf := reflect.Indirect(s).FieldByName(f.Name) - - if sf.CanSet() { - sf.Set(v) - } - } - case reflectwalk.WalkLoc: - // Clear out the slices for GC - w.cs = nil - w.vals = nil - } - - return nil -} - -func (w *walker) Map(m reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(m) - - // Create the map. If the map itself is nil, then just make a nil map - var newMap reflect.Value - if m.IsNil() { - newMap = reflect.New(m.Type()) - } else { - newMap = wrapPtr(reflect.MakeMap(m.Type())) - } - - w.cs = append(w.cs, newMap) - w.valPush(newMap) - return nil -} - -func (w *walker) MapElem(m, k, v reflect.Value) error { - return nil -} - -func (w *walker) PointerEnter(v bool) error { - if v { - w.ps[w.depth]++ - } - return nil -} - -func (w *walker) PointerExit(v bool) error { - if v { - w.ps[w.depth]-- - } - return nil -} - -func (w *walker) Pointer(v reflect.Value) error { - if _, ok := w.shallowCopiers[v.Type()]; ok { - // Shallow copy this value. Use the same logic as primitive, then - // return skip. - if err := w.Primitive(v); err != nil { - return err - } - - return reflectwalk.SkipEntry - } - - return nil -} - -func (w *walker) Interface(v reflect.Value) error { - if !v.IsValid() { - return nil - } - if w.ifaceTypes == nil { - w.ifaceTypes = make(map[uint64]reflect.Type) - } - - w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() - return nil -} - -func (w *walker) Primitive(v reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(v) - - // IsValid verifies the v is non-zero and CanInterface verifies - // that we're allowed to read this value (unexported fields). - var newV reflect.Value - if v.IsValid() && v.CanInterface() { - newV = reflect.New(v.Type()) - newV.Elem().Set(v) - } - - w.valPush(newV) - w.replacePointerMaybe() - return nil -} - -func (w *walker) Slice(s reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(s) - - var newS reflect.Value - if s.IsNil() { - newS = reflect.New(s.Type()) - } else { - newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) - } - - w.cs = append(w.cs, newS) - w.valPush(newS) - return nil -} - -func (w *walker) SliceElem(i int, elem reflect.Value) error { - if w.ignoring() { - return nil - } - - // We don't write the slice here because elem might still be - // arbitrarily complex. Just record the index and continue on. - w.valPush(reflect.ValueOf(i)) - - return nil -} - -func (w *walker) Array(a reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(a) - - newA := reflect.New(a.Type()) - - w.cs = append(w.cs, newA) - w.valPush(newA) - return nil -} - -func (w *walker) ArrayElem(i int, elem reflect.Value) error { - if w.ignoring() { - return nil - } - - // We don't write the array here because elem might still be - // arbitrarily complex. Just record the index and continue on. - w.valPush(reflect.ValueOf(i)) - - return nil -} - -func (w *walker) Struct(s reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(s) - - var v reflect.Value - if c, ok := w.copiers[s.Type()]; ok { - // We have a Copier for this struct, so we use that copier to - // get the copy, and we ignore anything deeper than this. - w.ignoreDepth = w.depth - - dup, err := c(s.Interface()) - if err != nil { - return err - } - - // We need to put a pointer to the value on the value stack, - // so allocate a new pointer and set it. - v = reflect.New(s.Type()) - reflect.Indirect(v).Set(reflect.ValueOf(dup)) - } else { - // No copier, we copy ourselves and allow reflectwalk to guide - // us deeper into the structure for copying. - v = reflect.New(s.Type()) - } - - // Push the value onto the value stack for setting the struct field, - // and add the struct itself to the containers stack in case we walk - // deeper so that its own fields can be modified. - w.valPush(v) - w.cs = append(w.cs, v) - - return nil -} - -func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { - if w.ignoring() { - return nil - } - - // If PkgPath is non-empty, this is a private (unexported) field. - // We do not set this unexported since the Go runtime doesn't allow us. - if f.PkgPath != "" { - return reflectwalk.SkipEntry - } - - switch f.Tag.Get(tagKey) { - case "shallow": - // If we're shallow copying then assign the value directly to the - // struct and skip the entry. - if v.IsValid() { - s := w.cs[len(w.cs)-1] - sf := reflect.Indirect(s).FieldByName(f.Name) - if sf.CanSet() { - sf.Set(v) - } - } - - return reflectwalk.SkipEntry - - case "ignore": - // Do nothing - return reflectwalk.SkipEntry - } - - // Push the field onto the stack, we'll handle it when we exit - // the struct field in Exit... - w.valPush(reflect.ValueOf(f)) - - return nil -} - -// ignore causes the walker to ignore any more values until we exit this on -func (w *walker) ignore() { - w.ignoreDepth = w.depth -} - -func (w *walker) ignoring() bool { - return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth -} - -func (w *walker) pointerPeek() bool { - return w.ps[w.depth] > 0 -} - -func (w *walker) valPop() reflect.Value { - result := w.vals[len(w.vals)-1] - w.vals = w.vals[:len(w.vals)-1] - - // If we're out of values, that means we popped everything off. In - // this case, we reset the result so the next pushed value becomes - // the result. - if len(w.vals) == 0 { - w.Result = nil - } - - return result -} - -func (w *walker) valPush(v reflect.Value) { - w.vals = append(w.vals, v) - - // If we haven't set the result yet, then this is the result since - // it is the first (outermost) value we're seeing. - if w.Result == nil && v.IsValid() { - w.Result = v.Interface() - } -} - -func (w *walker) replacePointerMaybe() { - // Determine the last pointer value. If it is NOT a pointer, then - // we need to push that onto the stack. - if !w.pointerPeek() { - w.valPush(reflect.Indirect(w.valPop())) - return - } - - v := w.valPop() - - // If the expected type is a pointer to an interface of any depth, - // such as *interface{}, **interface{}, etc., then we need to convert - // the value "v" from *CONCRETE to *interface{} so types match for - // Set. - // - // Example if v is type *Foo where Foo is a struct, v would become - // *interface{} instead. This only happens if we have an interface expectation - // at this depth. - // - // For more info, see GH-16 - if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { - y := reflect.New(iType) // Create *interface{} - y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) - v = y // v is now typed *interface{} (where *v = Foo) - } - - for i := 1; i < w.ps[w.depth]; i++ { - if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { - iface := reflect.New(iType).Elem() - iface.Set(v) - v = iface - } - - p := reflect.New(v.Type()) - p.Elem().Set(v) - v = p - } - - w.valPush(v) -} - -// if this value is a Locker, lock it and add it to the locks slice -func (w *walker) lock(v reflect.Value) { - if !w.useLocks { - return - } - - if !v.IsValid() || !v.CanInterface() { - return - } - - type rlocker interface { - RLocker() sync.Locker - } - - var locker sync.Locker - - // We can't call Interface() on a value directly, since that requires - // a copy. This is OK, since the pointer to a value which is a sync.Locker - // is also a sync.Locker. - if v.Kind() == reflect.Ptr { - switch l := v.Interface().(type) { - case rlocker: - // don't lock a mutex directly - if _, ok := l.(*sync.RWMutex); !ok { - locker = l.RLocker() - } - case sync.Locker: - locker = l - } - } else if v.CanAddr() { - switch l := v.Addr().Interface().(type) { - case rlocker: - // don't lock a mutex directly - if _, ok := l.(*sync.RWMutex); !ok { - locker = l.RLocker() - } - case sync.Locker: - locker = l - } - } - - // still no callable locker - if locker == nil { - return - } - - // don't lock a mutex directly - switch locker.(type) { - case *sync.Mutex, *sync.RWMutex: - return - } - - locker.Lock() - w.locks[w.depth] = locker -} - -// wrapPtr is a helper that takes v and always make it *v. copystructure -// stores things internally as pointers until the last moment before unwrapping -func wrapPtr(v reflect.Value) reflect.Value { - if !v.IsValid() { - return v - } - vPtr := reflect.New(v.Type()) - vPtr.Elem().Set(v) - return vPtr -} diff --git a/vendor/github.com/mitchellh/reflectwalk/.travis.yml b/vendor/github.com/mitchellh/reflectwalk/.travis.yml deleted file mode 100644 index 4f2ee4d9..00000000 --- a/vendor/github.com/mitchellh/reflectwalk/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE deleted file mode 100644 index f9c841a5..00000000 --- a/vendor/github.com/mitchellh/reflectwalk/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md deleted file mode 100644 index ac82cd2e..00000000 --- a/vendor/github.com/mitchellh/reflectwalk/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# reflectwalk - -reflectwalk is a Go library for "walking" a value in Go using reflection, -in the same way a directory tree can be "walked" on the filesystem. Walking -a complex structure can allow you to do manipulations on unknown structures -such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go deleted file mode 100644 index 6a7f1761..00000000 --- a/vendor/github.com/mitchellh/reflectwalk/location.go +++ /dev/null @@ -1,19 +0,0 @@ -package reflectwalk - -//go:generate stringer -type=Location location.go - -type Location uint - -const ( - None Location = iota - Map - MapKey - MapValue - Slice - SliceElem - Array - ArrayElem - Struct - StructField - WalkLoc -) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go deleted file mode 100644 index 70760cf4..00000000 --- a/vendor/github.com/mitchellh/reflectwalk/location_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=Location location.go"; DO NOT EDIT. - -package reflectwalk - -import "fmt" - -const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc" - -var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73} - -func (i Location) String() string { - if i >= Location(len(_Location_index)-1) { - return fmt.Sprintf("Location(%d)", i) - } - return _Location_name[_Location_index[i]:_Location_index[i+1]] -} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go deleted file mode 100644 index 7fee7b05..00000000 --- a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go +++ /dev/null @@ -1,420 +0,0 @@ -// reflectwalk is a package that allows you to "walk" complex structures -// similar to how you may "walk" a filesystem: visiting every element one -// by one and calling callback functions allowing you to handle and manipulate -// those elements. -package reflectwalk - -import ( - "errors" - "reflect" -) - -// PrimitiveWalker implementations are able to handle primitive values -// within complex structures. Primitive values are numbers, strings, -// booleans, funcs, chans. -// -// These primitive values are often members of more complex -// structures (slices, maps, etc.) that are walkable by other interfaces. -type PrimitiveWalker interface { - Primitive(reflect.Value) error -} - -// InterfaceWalker implementations are able to handle interface values as they -// are encountered during the walk. -type InterfaceWalker interface { - Interface(reflect.Value) error -} - -// MapWalker implementations are able to handle individual elements -// found within a map structure. -type MapWalker interface { - Map(m reflect.Value) error - MapElem(m, k, v reflect.Value) error -} - -// SliceWalker implementations are able to handle slice elements found -// within complex structures. -type SliceWalker interface { - Slice(reflect.Value) error - SliceElem(int, reflect.Value) error -} - -// ArrayWalker implementations are able to handle array elements found -// within complex structures. -type ArrayWalker interface { - Array(reflect.Value) error - ArrayElem(int, reflect.Value) error -} - -// StructWalker is an interface that has methods that are called for -// structs when a Walk is done. -type StructWalker interface { - Struct(reflect.Value) error - StructField(reflect.StructField, reflect.Value) error -} - -// EnterExitWalker implementations are notified before and after -// they walk deeper into complex structures (into struct fields, -// into slice elements, etc.) -type EnterExitWalker interface { - Enter(Location) error - Exit(Location) error -} - -// PointerWalker implementations are notified when the value they're -// walking is a pointer or not. Pointer is called for _every_ value whether -// it is a pointer or not. -type PointerWalker interface { - PointerEnter(bool) error - PointerExit(bool) error -} - -// PointerValueWalker implementations are notified with the value of -// a particular pointer when a pointer is walked. Pointer is called -// right before PointerEnter. -type PointerValueWalker interface { - Pointer(reflect.Value) error -} - -// SkipEntry can be returned from walk functions to skip walking -// the value of this field. This is only valid in the following functions: -// -// - Struct: skips all fields from being walked -// - StructField: skips walking the struct value -// -var SkipEntry = errors.New("skip this entry") - -// Walk takes an arbitrary value and an interface and traverses the -// value, calling callbacks on the interface if they are supported. -// The interface should implement one or more of the walker interfaces -// in this package, such as PrimitiveWalker, StructWalker, etc. -func Walk(data, walker interface{}) (err error) { - v := reflect.ValueOf(data) - ew, ok := walker.(EnterExitWalker) - if ok { - err = ew.Enter(WalkLoc) - } - - if err == nil { - err = walk(v, walker) - } - - if ok && err == nil { - err = ew.Exit(WalkLoc) - } - - return -} - -func walk(v reflect.Value, w interface{}) (err error) { - // Determine if we're receiving a pointer and if so notify the walker. - // The logic here is convoluted but very important (tests will fail if - // almost any part is changed). I will try to explain here. - // - // First, we check if the value is an interface, if so, we really need - // to check the interface's VALUE to see whether it is a pointer. - // - // Check whether the value is then a pointer. If so, then set pointer - // to true to notify the user. - // - // If we still have a pointer or an interface after the indirections, then - // we unwrap another level - // - // At this time, we also set "v" to be the dereferenced value. This is - // because once we've unwrapped the pointer we want to use that value. - pointer := false - pointerV := v - - for { - if pointerV.Kind() == reflect.Interface { - if iw, ok := w.(InterfaceWalker); ok { - if err = iw.Interface(pointerV); err != nil { - return - } - } - - pointerV = pointerV.Elem() - } - - if pointerV.Kind() == reflect.Ptr { - if pw, ok := w.(PointerValueWalker); ok { - if err = pw.Pointer(pointerV); err != nil { - if err == SkipEntry { - // Skip the rest of this entry but clear the error - return nil - } - - return - } - } - - pointer = true - v = reflect.Indirect(pointerV) - } - if pw, ok := w.(PointerWalker); ok { - if err = pw.PointerEnter(pointer); err != nil { - return - } - - defer func(pointer bool) { - if err != nil { - return - } - - err = pw.PointerExit(pointer) - }(pointer) - } - - if pointer { - pointerV = v - } - pointer = false - - // If we still have a pointer or interface we have to indirect another level. - switch pointerV.Kind() { - case reflect.Ptr, reflect.Interface: - continue - } - break - } - - // We preserve the original value here because if it is an interface - // type, we want to pass that directly into the walkPrimitive, so that - // we can set it. - originalV := v - if v.Kind() == reflect.Interface { - v = v.Elem() - } - - k := v.Kind() - if k >= reflect.Int && k <= reflect.Complex128 { - k = reflect.Int - } - - switch k { - // Primitives - case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: - err = walkPrimitive(originalV, w) - return - case reflect.Map: - err = walkMap(v, w) - return - case reflect.Slice: - err = walkSlice(v, w) - return - case reflect.Struct: - err = walkStruct(v, w) - return - case reflect.Array: - err = walkArray(v, w) - return - default: - panic("unsupported type: " + k.String()) - } -} - -func walkMap(v reflect.Value, w interface{}) error { - ew, ewok := w.(EnterExitWalker) - if ewok { - ew.Enter(Map) - } - - if mw, ok := w.(MapWalker); ok { - if err := mw.Map(v); err != nil { - return err - } - } - - for _, k := range v.MapKeys() { - kv := v.MapIndex(k) - - if mw, ok := w.(MapWalker); ok { - if err := mw.MapElem(v, k, kv); err != nil { - return err - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(MapKey) - } - - if err := walk(k, w); err != nil { - return err - } - - if ok { - ew.Exit(MapKey) - ew.Enter(MapValue) - } - - // get the map value again as it may have changed in the MapElem call - if err := walk(v.MapIndex(k), w); err != nil { - return err - } - - if ok { - ew.Exit(MapValue) - } - } - - if ewok { - ew.Exit(Map) - } - - return nil -} - -func walkPrimitive(v reflect.Value, w interface{}) error { - if pw, ok := w.(PrimitiveWalker); ok { - return pw.Primitive(v) - } - - return nil -} - -func walkSlice(v reflect.Value, w interface{}) (err error) { - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(Slice) - } - - if sw, ok := w.(SliceWalker); ok { - if err := sw.Slice(v); err != nil { - return err - } - } - - for i := 0; i < v.Len(); i++ { - elem := v.Index(i) - - if sw, ok := w.(SliceWalker); ok { - if err := sw.SliceElem(i, elem); err != nil { - return err - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(SliceElem) - } - - if err := walk(elem, w); err != nil { - return err - } - - if ok { - ew.Exit(SliceElem) - } - } - - ew, ok = w.(EnterExitWalker) - if ok { - ew.Exit(Slice) - } - - return nil -} - -func walkArray(v reflect.Value, w interface{}) (err error) { - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(Array) - } - - if aw, ok := w.(ArrayWalker); ok { - if err := aw.Array(v); err != nil { - return err - } - } - - for i := 0; i < v.Len(); i++ { - elem := v.Index(i) - - if aw, ok := w.(ArrayWalker); ok { - if err := aw.ArrayElem(i, elem); err != nil { - return err - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(ArrayElem) - } - - if err := walk(elem, w); err != nil { - return err - } - - if ok { - ew.Exit(ArrayElem) - } - } - - ew, ok = w.(EnterExitWalker) - if ok { - ew.Exit(Array) - } - - return nil -} - -func walkStruct(v reflect.Value, w interface{}) (err error) { - ew, ewok := w.(EnterExitWalker) - if ewok { - ew.Enter(Struct) - } - - skip := false - if sw, ok := w.(StructWalker); ok { - err = sw.Struct(v) - if err == SkipEntry { - skip = true - err = nil - } - if err != nil { - return - } - } - - if !skip { - vt := v.Type() - for i := 0; i < vt.NumField(); i++ { - sf := vt.Field(i) - f := v.FieldByIndex([]int{i}) - - if sw, ok := w.(StructWalker); ok { - err = sw.StructField(sf, f) - - // SkipEntry just pretends this field doesn't even exist - if err == SkipEntry { - continue - } - - if err != nil { - return - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(StructField) - } - - err = walk(f, w) - if err != nil { - return - } - - if ok { - ew.Exit(StructField) - } - } - } - - if ewok { - ew.Exit(Struct) - } - - return nil -} diff --git a/vendor/github.com/olekukonko/tablewriter/.gitignore b/vendor/github.com/olekukonko/tablewriter/.gitignore deleted file mode 100644 index b66cec63..00000000 --- a/vendor/github.com/olekukonko/tablewriter/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -# Created by .ignore support plugin (hsz.mobi) -### Go template -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - diff --git a/vendor/github.com/olekukonko/tablewriter/.travis.yml b/vendor/github.com/olekukonko/tablewriter/.travis.yml deleted file mode 100644 index 366d48a3..00000000 --- a/vendor/github.com/olekukonko/tablewriter/.travis.yml +++ /dev/null @@ -1,22 +0,0 @@ -language: go -arch: - - ppc64le - - amd64 -go: - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - 1.8 - - 1.9 - - "1.10" - - tip -jobs: - exclude : - - arch : ppc64le - go : - - 1.3 - - arch : ppc64le - go : - - 1.4 diff --git a/vendor/github.com/olekukonko/tablewriter/LICENSE.md b/vendor/github.com/olekukonko/tablewriter/LICENSE.md deleted file mode 100644 index a0769b5c..00000000 --- a/vendor/github.com/olekukonko/tablewriter/LICENSE.md +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2014 by Oleku Konko - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/olekukonko/tablewriter/README.md b/vendor/github.com/olekukonko/tablewriter/README.md deleted file mode 100644 index f06530d7..00000000 --- a/vendor/github.com/olekukonko/tablewriter/README.md +++ /dev/null @@ -1,431 +0,0 @@ -ASCII Table Writer -========= - -[![Build Status](https://travis-ci.org/olekukonko/tablewriter.png?branch=master)](https://travis-ci.org/olekukonko/tablewriter) -[![Total views](https://img.shields.io/sourcegraph/rrc/github.com/olekukonko/tablewriter.svg)](https://sourcegraph.com/github.com/olekukonko/tablewriter) -[![Godoc](https://godoc.org/github.com/olekukonko/tablewriter?status.svg)](https://godoc.org/github.com/olekukonko/tablewriter) - -Generate ASCII table on the fly ... Installation is simple as - - go get github.com/olekukonko/tablewriter - - -#### Features -- Automatic Padding -- Support Multiple Lines -- Supports Alignment -- Support Custom Separators -- Automatic Alignment of numbers & percentage -- Write directly to http , file etc via `io.Writer` -- Read directly from CSV file -- Optional row line via `SetRowLine` -- Normalise table header -- Make CSV Headers optional -- Enable or disable table border -- Set custom footer support -- Optional identical cells merging -- Set custom caption -- Optional reflowing of paragraphs in multi-line cells. - -#### Example 1 - Basic -```go -data := [][]string{ - []string{"A", "The Good", "500"}, - []string{"B", "The Very very Bad Man", "288"}, - []string{"C", "The Ugly", "120"}, - []string{"D", "The Gopher", "800"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Name", "Sign", "Rating"}) - -for _, v := range data { - table.Append(v) -} -table.Render() // Send output -``` - -##### Output 1 -``` -+------+-----------------------+--------+ -| NAME | SIGN | RATING | -+------+-----------------------+--------+ -| A | The Good | 500 | -| B | The Very very Bad Man | 288 | -| C | The Ugly | 120 | -| D | The Gopher | 800 | -+------+-----------------------+--------+ -``` - -#### Example 2 - Without Border / Footer / Bulk Append -```go -data := [][]string{ - []string{"1/1/2014", "Domain name", "2233", "$10.98"}, - []string{"1/1/2014", "January Hosting", "2233", "$54.95"}, - []string{"1/4/2014", "February Hosting", "2233", "$51.00"}, - []string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) -table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer -table.SetBorder(false) // Set Border to false -table.AppendBulk(data) // Add Bulk Data -table.Render() -``` - -##### Output 2 -``` - - DATE | DESCRIPTION | CV2 | AMOUNT ------------+--------------------------+-------+---------- - 1/1/2014 | Domain name | 2233 | $10.98 - 1/1/2014 | January Hosting | 2233 | $54.95 - 1/4/2014 | February Hosting | 2233 | $51.00 - 1/4/2014 | February Extra Bandwidth | 2233 | $30.00 ------------+--------------------------+-------+---------- - TOTAL | $146 93 - --------+---------- - -``` - - -#### Example 3 - CSV -```go -table, _ := tablewriter.NewCSV(os.Stdout, "testdata/test_info.csv", true) -table.SetAlignment(tablewriter.ALIGN_LEFT) // Set Alignment -table.Render() -``` - -##### Output 3 -``` -+----------+--------------+------+-----+---------+----------------+ -| FIELD | TYPE | NULL | KEY | DEFAULT | EXTRA | -+----------+--------------+------+-----+---------+----------------+ -| user_id | smallint(5) | NO | PRI | NULL | auto_increment | -| username | varchar(10) | NO | | NULL | | -| password | varchar(100) | NO | | NULL | | -+----------+--------------+------+-----+---------+----------------+ -``` - -#### Example 4 - Custom Separator -```go -table, _ := tablewriter.NewCSV(os.Stdout, "testdata/test.csv", true) -table.SetRowLine(true) // Enable row line - -// Change table lines -table.SetCenterSeparator("*") -table.SetColumnSeparator("╪") -table.SetRowSeparator("-") - -table.SetAlignment(tablewriter.ALIGN_LEFT) -table.Render() -``` - -##### Output 4 -``` -*------------*-----------*---------* -╪ FIRST NAME ╪ LAST NAME ╪ SSN ╪ -*------------*-----------*---------* -╪ John ╪ Barry ╪ 123456 ╪ -*------------*-----------*---------* -╪ Kathy ╪ Smith ╪ 687987 ╪ -*------------*-----------*---------* -╪ Bob ╪ McCornick ╪ 3979870 ╪ -*------------*-----------*---------* -``` - -#### Example 5 - Markdown Format -```go -data := [][]string{ - []string{"1/1/2014", "Domain name", "2233", "$10.98"}, - []string{"1/1/2014", "January Hosting", "2233", "$54.95"}, - []string{"1/4/2014", "February Hosting", "2233", "$51.00"}, - []string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) -table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false}) -table.SetCenterSeparator("|") -table.AppendBulk(data) // Add Bulk Data -table.Render() -``` - -##### Output 5 -``` -| DATE | DESCRIPTION | CV2 | AMOUNT | -|----------|--------------------------|------|--------| -| 1/1/2014 | Domain name | 2233 | $10.98 | -| 1/1/2014 | January Hosting | 2233 | $54.95 | -| 1/4/2014 | February Hosting | 2233 | $51.00 | -| 1/4/2014 | February Extra Bandwidth | 2233 | $30.00 | -``` - -#### Example 6 - Identical cells merging -```go -data := [][]string{ - []string{"1/1/2014", "Domain name", "1234", "$10.98"}, - []string{"1/1/2014", "January Hosting", "2345", "$54.95"}, - []string{"1/4/2014", "February Hosting", "3456", "$51.00"}, - []string{"1/4/2014", "February Extra Bandwidth", "4567", "$30.00"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) -table.SetFooter([]string{"", "", "Total", "$146.93"}) -table.SetAutoMergeCells(true) -table.SetRowLine(true) -table.AppendBulk(data) -table.Render() -``` - -##### Output 6 -``` -+----------+--------------------------+-------+---------+ -| DATE | DESCRIPTION | CV2 | AMOUNT | -+----------+--------------------------+-------+---------+ -| 1/1/2014 | Domain name | 1234 | $10.98 | -+ +--------------------------+-------+---------+ -| | January Hosting | 2345 | $54.95 | -+----------+--------------------------+-------+---------+ -| 1/4/2014 | February Hosting | 3456 | $51.00 | -+ +--------------------------+-------+---------+ -| | February Extra Bandwidth | 4567 | $30.00 | -+----------+--------------------------+-------+---------+ -| TOTAL | $146 93 | -+----------+--------------------------+-------+---------+ -``` - -#### Example 7 - Identical cells merging (specify the column index to merge) -```go -data := [][]string{ - []string{"1/1/2014", "Domain name", "1234", "$10.98"}, - []string{"1/1/2014", "January Hosting", "1234", "$10.98"}, - []string{"1/4/2014", "February Hosting", "3456", "$51.00"}, - []string{"1/4/2014", "February Extra Bandwidth", "4567", "$30.00"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) -table.SetFooter([]string{"", "", "Total", "$146.93"}) -table.SetAutoMergeCellsByColumnIndex([]int{2, 3}) -table.SetRowLine(true) -table.AppendBulk(data) -table.Render() -``` - -##### Output 7 -``` -+----------+--------------------------+-------+---------+ -| DATE | DESCRIPTION | CV2 | AMOUNT | -+----------+--------------------------+-------+---------+ -| 1/1/2014 | Domain name | 1234 | $10.98 | -+----------+--------------------------+ + + -| 1/1/2014 | January Hosting | | | -+----------+--------------------------+-------+---------+ -| 1/4/2014 | February Hosting | 3456 | $51.00 | -+----------+--------------------------+-------+---------+ -| 1/4/2014 | February Extra Bandwidth | 4567 | $30.00 | -+----------+--------------------------+-------+---------+ -| TOTAL | $146.93 | -+----------+--------------------------+-------+---------+ -``` - - -#### Table with color -```go -data := [][]string{ - []string{"1/1/2014", "Domain name", "2233", "$10.98"}, - []string{"1/1/2014", "January Hosting", "2233", "$54.95"}, - []string{"1/4/2014", "February Hosting", "2233", "$51.00"}, - []string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) -table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer -table.SetBorder(false) // Set Border to false - -table.SetHeaderColor(tablewriter.Colors{tablewriter.Bold, tablewriter.BgGreenColor}, - tablewriter.Colors{tablewriter.FgHiRedColor, tablewriter.Bold, tablewriter.BgBlackColor}, - tablewriter.Colors{tablewriter.BgRedColor, tablewriter.FgWhiteColor}, - tablewriter.Colors{tablewriter.BgCyanColor, tablewriter.FgWhiteColor}) - -table.SetColumnColor(tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor}, - tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiRedColor}, - tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor}, - tablewriter.Colors{tablewriter.Bold, tablewriter.FgBlackColor}) - -table.SetFooterColor(tablewriter.Colors{}, tablewriter.Colors{}, - tablewriter.Colors{tablewriter.Bold}, - tablewriter.Colors{tablewriter.FgHiRedColor}) - -table.AppendBulk(data) -table.Render() -``` - -#### Table with color Output -![Table with Color](https://cloud.githubusercontent.com/assets/6460392/21101956/bbc7b356-c0a1-11e6-9f36-dba694746efc.png) - -#### Example - 8 Table Cells with Color - -Individual Cell Colors from `func Rich` take precedence over Column Colors - -```go -data := [][]string{ - []string{"Test1Merge", "HelloCol2 - 1", "HelloCol3 - 1", "HelloCol4 - 1"}, - []string{"Test1Merge", "HelloCol2 - 2", "HelloCol3 - 2", "HelloCol4 - 2"}, - []string{"Test1Merge", "HelloCol2 - 3", "HelloCol3 - 3", "HelloCol4 - 3"}, - []string{"Test2Merge", "HelloCol2 - 4", "HelloCol3 - 4", "HelloCol4 - 4"}, - []string{"Test2Merge", "HelloCol2 - 5", "HelloCol3 - 5", "HelloCol4 - 5"}, - []string{"Test2Merge", "HelloCol2 - 6", "HelloCol3 - 6", "HelloCol4 - 6"}, - []string{"Test2Merge", "HelloCol2 - 7", "HelloCol3 - 7", "HelloCol4 - 7"}, - []string{"Test3Merge", "HelloCol2 - 8", "HelloCol3 - 8", "HelloCol4 - 8"}, - []string{"Test3Merge", "HelloCol2 - 9", "HelloCol3 - 9", "HelloCol4 - 9"}, - []string{"Test3Merge", "HelloCol2 - 10", "HelloCol3 -10", "HelloCol4 - 10"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Col1", "Col2", "Col3", "Col4"}) -table.SetFooter([]string{"", "", "Footer3", "Footer4"}) -table.SetBorder(false) - -table.SetHeaderColor(tablewriter.Colors{tablewriter.Bold, tablewriter.BgGreenColor}, - tablewriter.Colors{tablewriter.FgHiRedColor, tablewriter.Bold, tablewriter.BgBlackColor}, - tablewriter.Colors{tablewriter.BgRedColor, tablewriter.FgWhiteColor}, - tablewriter.Colors{tablewriter.BgCyanColor, tablewriter.FgWhiteColor}) - -table.SetColumnColor(tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor}, - tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiRedColor}, - tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor}, - tablewriter.Colors{tablewriter.Bold, tablewriter.FgBlackColor}) - -table.SetFooterColor(tablewriter.Colors{}, tablewriter.Colors{}, - tablewriter.Colors{tablewriter.Bold}, - tablewriter.Colors{tablewriter.FgHiRedColor}) - -colorData1 := []string{"TestCOLOR1Merge", "HelloCol2 - COLOR1", "HelloCol3 - COLOR1", "HelloCol4 - COLOR1"} -colorData2 := []string{"TestCOLOR2Merge", "HelloCol2 - COLOR2", "HelloCol3 - COLOR2", "HelloCol4 - COLOR2"} - -for i, row := range data { - if i == 4 { - table.Rich(colorData1, []tablewriter.Colors{tablewriter.Colors{}, tablewriter.Colors{tablewriter.Normal, tablewriter.FgCyanColor}, tablewriter.Colors{tablewriter.Bold, tablewriter.FgWhiteColor}, tablewriter.Colors{}}) - table.Rich(colorData2, []tablewriter.Colors{tablewriter.Colors{tablewriter.Normal, tablewriter.FgMagentaColor}, tablewriter.Colors{}, tablewriter.Colors{tablewriter.Bold, tablewriter.BgRedColor}, tablewriter.Colors{tablewriter.FgHiGreenColor, tablewriter.Italic, tablewriter.BgHiCyanColor}}) - } - table.Append(row) -} - -table.SetAutoMergeCells(true) -table.Render() - -``` - -##### Table cells with color Output -![Table cells with Color](https://user-images.githubusercontent.com/9064687/63969376-bcd88d80-ca6f-11e9-9466-c3d954700b25.png) - -#### Example 9 - Set table caption -```go -data := [][]string{ - []string{"A", "The Good", "500"}, - []string{"B", "The Very very Bad Man", "288"}, - []string{"C", "The Ugly", "120"}, - []string{"D", "The Gopher", "800"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Name", "Sign", "Rating"}) -table.SetCaption(true, "Movie ratings.") - -for _, v := range data { - table.Append(v) -} -table.Render() // Send output -``` - -Note: Caption text will wrap with total width of rendered table. - -##### Output 9 -``` -+------+-----------------------+--------+ -| NAME | SIGN | RATING | -+------+-----------------------+--------+ -| A | The Good | 500 | -| B | The Very very Bad Man | 288 | -| C | The Ugly | 120 | -| D | The Gopher | 800 | -+------+-----------------------+--------+ -Movie ratings. -``` - -#### Example 10 - Set NoWhiteSpace and TablePadding option -```go -data := [][]string{ - {"node1.example.com", "Ready", "compute", "1.11"}, - {"node2.example.com", "Ready", "compute", "1.11"}, - {"node3.example.com", "Ready", "compute", "1.11"}, - {"node4.example.com", "NotReady", "compute", "1.11"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Name", "Status", "Role", "Version"}) -table.SetAutoWrapText(false) -table.SetAutoFormatHeaders(true) -table.SetHeaderAlignment(ALIGN_LEFT) -table.SetAlignment(ALIGN_LEFT) -table.SetCenterSeparator("") -table.SetColumnSeparator("") -table.SetRowSeparator("") -table.SetHeaderLine(false) -table.SetBorder(false) -table.SetTablePadding("\t") // pad with tabs -table.SetNoWhiteSpace(true) -table.AppendBulk(data) // Add Bulk Data -table.Render() -``` - -##### Output 10 -``` -NAME STATUS ROLE VERSION -node1.example.com Ready compute 1.11 -node2.example.com Ready compute 1.11 -node3.example.com Ready compute 1.11 -node4.example.com NotReady compute 1.11 -``` - -#### Render table into a string - -Instead of rendering the table to `io.Stdout` you can also render it into a string. Go 1.10 introduced the `strings.Builder` type which implements the `io.Writer` interface and can therefore be used for this task. Example: - -```go -package main - -import ( - "strings" - "fmt" - - "github.com/olekukonko/tablewriter" -) - -func main() { - tableString := &strings.Builder{} - table := tablewriter.NewWriter(tableString) - - /* - * Code to fill the table - */ - - table.Render() - - fmt.Println(tableString.String()) -} -``` - -#### TODO -- ~~Import Directly from CSV~~ - `done` -- ~~Support for `SetFooter`~~ - `done` -- ~~Support for `SetBorder`~~ - `done` -- ~~Support table with uneven rows~~ - `done` -- ~~Support custom alignment~~ -- General Improvement & Optimisation -- `NewHTML` Parse table from HTML diff --git a/vendor/github.com/olekukonko/tablewriter/csv.go b/vendor/github.com/olekukonko/tablewriter/csv.go deleted file mode 100644 index 98878303..00000000 --- a/vendor/github.com/olekukonko/tablewriter/csv.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Table Writer API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package tablewriter - -import ( - "encoding/csv" - "io" - "os" -) - -// Start A new table by importing from a CSV file -// Takes io.Writer and csv File name -func NewCSV(writer io.Writer, fileName string, hasHeader bool) (*Table, error) { - file, err := os.Open(fileName) - if err != nil { - return &Table{}, err - } - defer file.Close() - csvReader := csv.NewReader(file) - t, err := NewCSVReader(writer, csvReader, hasHeader) - return t, err -} - -// Start a New Table Writer with csv.Reader -// This enables customisation such as reader.Comma = ';' -// See http://golang.org/src/pkg/encoding/csv/reader.go?s=3213:3671#L94 -func NewCSVReader(writer io.Writer, csvReader *csv.Reader, hasHeader bool) (*Table, error) { - t := NewWriter(writer) - if hasHeader { - // Read the first row - headers, err := csvReader.Read() - if err != nil { - return &Table{}, err - } - t.SetHeader(headers) - } - for { - record, err := csvReader.Read() - if err == io.EOF { - break - } else if err != nil { - return &Table{}, err - } - t.Append(record) - } - return t, nil -} diff --git a/vendor/github.com/olekukonko/tablewriter/table.go b/vendor/github.com/olekukonko/tablewriter/table.go deleted file mode 100644 index f913149c..00000000 --- a/vendor/github.com/olekukonko/tablewriter/table.go +++ /dev/null @@ -1,967 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Table Writer API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -// Create & Generate text based table -package tablewriter - -import ( - "bytes" - "fmt" - "io" - "regexp" - "strings" -) - -const ( - MAX_ROW_WIDTH = 30 -) - -const ( - CENTER = "+" - ROW = "-" - COLUMN = "|" - SPACE = " " - NEWLINE = "\n" -) - -const ( - ALIGN_DEFAULT = iota - ALIGN_CENTER - ALIGN_RIGHT - ALIGN_LEFT -) - -var ( - decimal = regexp.MustCompile(`^-?(?:\d{1,3}(?:,\d{3})*|\d+)(?:\.\d+)?$`) - percent = regexp.MustCompile(`^-?\d+\.?\d*$%$`) -) - -type Border struct { - Left bool - Right bool - Top bool - Bottom bool -} - -type Table struct { - out io.Writer - rows [][]string - lines [][][]string - cs map[int]int - rs map[int]int - headers [][]string - footers [][]string - caption bool - captionText string - autoFmt bool - autoWrap bool - reflowText bool - mW int - pCenter string - pRow string - pColumn string - tColumn int - tRow int - hAlign int - fAlign int - align int - newLine string - rowLine bool - autoMergeCells bool - columnsToAutoMergeCells map[int]bool - noWhiteSpace bool - tablePadding string - hdrLine bool - borders Border - colSize int - headerParams []string - columnsParams []string - footerParams []string - columnsAlign []int -} - -// Start New Table -// Take io.Writer Directly -func NewWriter(writer io.Writer) *Table { - t := &Table{ - out: writer, - rows: [][]string{}, - lines: [][][]string{}, - cs: make(map[int]int), - rs: make(map[int]int), - headers: [][]string{}, - footers: [][]string{}, - caption: false, - captionText: "Table caption.", - autoFmt: true, - autoWrap: true, - reflowText: true, - mW: MAX_ROW_WIDTH, - pCenter: CENTER, - pRow: ROW, - pColumn: COLUMN, - tColumn: -1, - tRow: -1, - hAlign: ALIGN_DEFAULT, - fAlign: ALIGN_DEFAULT, - align: ALIGN_DEFAULT, - newLine: NEWLINE, - rowLine: false, - hdrLine: true, - borders: Border{Left: true, Right: true, Bottom: true, Top: true}, - colSize: -1, - headerParams: []string{}, - columnsParams: []string{}, - footerParams: []string{}, - columnsAlign: []int{}} - return t -} - -// Render table output -func (t *Table) Render() { - if t.borders.Top { - t.printLine(true) - } - t.printHeading() - if t.autoMergeCells { - t.printRowsMergeCells() - } else { - t.printRows() - } - if !t.rowLine && t.borders.Bottom { - t.printLine(true) - } - t.printFooter() - - if t.caption { - t.printCaption() - } -} - -const ( - headerRowIdx = -1 - footerRowIdx = -2 -) - -// Set table header -func (t *Table) SetHeader(keys []string) { - t.colSize = len(keys) - for i, v := range keys { - lines := t.parseDimension(v, i, headerRowIdx) - t.headers = append(t.headers, lines) - } -} - -// Set table Footer -func (t *Table) SetFooter(keys []string) { - //t.colSize = len(keys) - for i, v := range keys { - lines := t.parseDimension(v, i, footerRowIdx) - t.footers = append(t.footers, lines) - } -} - -// Set table Caption -func (t *Table) SetCaption(caption bool, captionText ...string) { - t.caption = caption - if len(captionText) == 1 { - t.captionText = captionText[0] - } -} - -// Turn header autoformatting on/off. Default is on (true). -func (t *Table) SetAutoFormatHeaders(auto bool) { - t.autoFmt = auto -} - -// Turn automatic multiline text adjustment on/off. Default is on (true). -func (t *Table) SetAutoWrapText(auto bool) { - t.autoWrap = auto -} - -// Turn automatic reflowing of multiline text when rewrapping. Default is on (true). -func (t *Table) SetReflowDuringAutoWrap(auto bool) { - t.reflowText = auto -} - -// Set the Default column width -func (t *Table) SetColWidth(width int) { - t.mW = width -} - -// Set the minimal width for a column -func (t *Table) SetColMinWidth(column int, width int) { - t.cs[column] = width -} - -// Set the Column Separator -func (t *Table) SetColumnSeparator(sep string) { - t.pColumn = sep -} - -// Set the Row Separator -func (t *Table) SetRowSeparator(sep string) { - t.pRow = sep -} - -// Set the center Separator -func (t *Table) SetCenterSeparator(sep string) { - t.pCenter = sep -} - -// Set Header Alignment -func (t *Table) SetHeaderAlignment(hAlign int) { - t.hAlign = hAlign -} - -// Set Footer Alignment -func (t *Table) SetFooterAlignment(fAlign int) { - t.fAlign = fAlign -} - -// Set Table Alignment -func (t *Table) SetAlignment(align int) { - t.align = align -} - -// Set No White Space -func (t *Table) SetNoWhiteSpace(allow bool) { - t.noWhiteSpace = allow -} - -// Set Table Padding -func (t *Table) SetTablePadding(padding string) { - t.tablePadding = padding -} - -func (t *Table) SetColumnAlignment(keys []int) { - for _, v := range keys { - switch v { - case ALIGN_CENTER: - break - case ALIGN_LEFT: - break - case ALIGN_RIGHT: - break - default: - v = ALIGN_DEFAULT - } - t.columnsAlign = append(t.columnsAlign, v) - } -} - -// Set New Line -func (t *Table) SetNewLine(nl string) { - t.newLine = nl -} - -// Set Header Line -// This would enable / disable a line after the header -func (t *Table) SetHeaderLine(line bool) { - t.hdrLine = line -} - -// Set Row Line -// This would enable / disable a line on each row of the table -func (t *Table) SetRowLine(line bool) { - t.rowLine = line -} - -// Set Auto Merge Cells -// This would enable / disable the merge of cells with identical values -func (t *Table) SetAutoMergeCells(auto bool) { - t.autoMergeCells = auto -} - -// Set Auto Merge Cells By Column Index -// This would enable / disable the merge of cells with identical values for specific columns -// If cols is empty, it is the same as `SetAutoMergeCells(true)`. -func (t *Table) SetAutoMergeCellsByColumnIndex(cols []int) { - t.autoMergeCells = true - - if len(cols) > 0 { - m := make(map[int]bool) - for _, col := range cols { - m[col] = true - } - t.columnsToAutoMergeCells = m - } -} - -// Set Table Border -// This would enable / disable line around the table -func (t *Table) SetBorder(border bool) { - t.SetBorders(Border{border, border, border, border}) -} - -func (t *Table) SetBorders(border Border) { - t.borders = border -} - -// Append row to table -func (t *Table) Append(row []string) { - rowSize := len(t.headers) - if rowSize > t.colSize { - t.colSize = rowSize - } - - n := len(t.lines) - line := [][]string{} - for i, v := range row { - - // Detect string width - // Detect String height - // Break strings into words - out := t.parseDimension(v, i, n) - - // Append broken words - line = append(line, out) - } - t.lines = append(t.lines, line) -} - -// Append row to table with color attributes -func (t *Table) Rich(row []string, colors []Colors) { - rowSize := len(t.headers) - if rowSize > t.colSize { - t.colSize = rowSize - } - - n := len(t.lines) - line := [][]string{} - for i, v := range row { - - // Detect string width - // Detect String height - // Break strings into words - out := t.parseDimension(v, i, n) - - if len(colors) > i { - color := colors[i] - out[0] = format(out[0], color) - } - - // Append broken words - line = append(line, out) - } - t.lines = append(t.lines, line) -} - -// Allow Support for Bulk Append -// Eliminates repeated for loops -func (t *Table) AppendBulk(rows [][]string) { - for _, row := range rows { - t.Append(row) - } -} - -// NumLines to get the number of lines -func (t *Table) NumLines() int { - return len(t.lines) -} - -// Clear rows -func (t *Table) ClearRows() { - t.lines = [][][]string{} -} - -// Clear footer -func (t *Table) ClearFooter() { - t.footers = [][]string{} -} - -// Center based on position and border. -func (t *Table) center(i int) string { - if i == -1 && !t.borders.Left { - return t.pRow - } - - if i == len(t.cs)-1 && !t.borders.Right { - return t.pRow - } - - return t.pCenter -} - -// Print line based on row width -func (t *Table) printLine(nl bool) { - fmt.Fprint(t.out, t.center(-1)) - for i := 0; i < len(t.cs); i++ { - v := t.cs[i] - fmt.Fprintf(t.out, "%s%s%s%s", - t.pRow, - strings.Repeat(string(t.pRow), v), - t.pRow, - t.center(i)) - } - if nl { - fmt.Fprint(t.out, t.newLine) - } -} - -// Print line based on row width with our without cell separator -func (t *Table) printLineOptionalCellSeparators(nl bool, displayCellSeparator []bool) { - fmt.Fprint(t.out, t.pCenter) - for i := 0; i < len(t.cs); i++ { - v := t.cs[i] - if i > len(displayCellSeparator) || displayCellSeparator[i] { - // Display the cell separator - fmt.Fprintf(t.out, "%s%s%s%s", - t.pRow, - strings.Repeat(string(t.pRow), v), - t.pRow, - t.pCenter) - } else { - // Don't display the cell separator for this cell - fmt.Fprintf(t.out, "%s%s", - strings.Repeat(" ", v+2), - t.pCenter) - } - } - if nl { - fmt.Fprint(t.out, t.newLine) - } -} - -// Return the PadRight function if align is left, PadLeft if align is right, -// and Pad by default -func pad(align int) func(string, string, int) string { - padFunc := Pad - switch align { - case ALIGN_LEFT: - padFunc = PadRight - case ALIGN_RIGHT: - padFunc = PadLeft - } - return padFunc -} - -// Print heading information -func (t *Table) printHeading() { - // Check if headers is available - if len(t.headers) < 1 { - return - } - - // Identify last column - end := len(t.cs) - 1 - - // Get pad function - padFunc := pad(t.hAlign) - - // Checking for ANSI escape sequences for header - is_esc_seq := false - if len(t.headerParams) > 0 { - is_esc_seq = true - } - - // Maximum height. - max := t.rs[headerRowIdx] - - // Print Heading - for x := 0; x < max; x++ { - // Check if border is set - // Replace with space if not set - if !t.noWhiteSpace { - fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE)) - } - - for y := 0; y <= end; y++ { - v := t.cs[y] - h := "" - - if y < len(t.headers) && x < len(t.headers[y]) { - h = t.headers[y][x] - } - if t.autoFmt { - h = Title(h) - } - pad := ConditionString((y == end && !t.borders.Left), SPACE, t.pColumn) - if t.noWhiteSpace { - pad = ConditionString((y == end && !t.borders.Left), SPACE, t.tablePadding) - } - if is_esc_seq { - if !t.noWhiteSpace { - fmt.Fprintf(t.out, " %s %s", - format(padFunc(h, SPACE, v), - t.headerParams[y]), pad) - } else { - fmt.Fprintf(t.out, "%s %s", - format(padFunc(h, SPACE, v), - t.headerParams[y]), pad) - } - } else { - if !t.noWhiteSpace { - fmt.Fprintf(t.out, " %s %s", - padFunc(h, SPACE, v), - pad) - } else { - // the spaces between breaks the kube formatting - fmt.Fprintf(t.out, "%s%s", - padFunc(h, SPACE, v), - pad) - } - } - } - // Next line - fmt.Fprint(t.out, t.newLine) - } - if t.hdrLine { - t.printLine(true) - } -} - -// Print heading information -func (t *Table) printFooter() { - // Check if headers is available - if len(t.footers) < 1 { - return - } - - // Only print line if border is not set - if !t.borders.Bottom { - t.printLine(true) - } - - // Identify last column - end := len(t.cs) - 1 - - // Get pad function - padFunc := pad(t.fAlign) - - // Checking for ANSI escape sequences for header - is_esc_seq := false - if len(t.footerParams) > 0 { - is_esc_seq = true - } - - // Maximum height. - max := t.rs[footerRowIdx] - - // Print Footer - erasePad := make([]bool, len(t.footers)) - for x := 0; x < max; x++ { - // Check if border is set - // Replace with space if not set - fmt.Fprint(t.out, ConditionString(t.borders.Bottom, t.pColumn, SPACE)) - - for y := 0; y <= end; y++ { - v := t.cs[y] - f := "" - if y < len(t.footers) && x < len(t.footers[y]) { - f = t.footers[y][x] - } - if t.autoFmt { - f = Title(f) - } - pad := ConditionString((y == end && !t.borders.Top), SPACE, t.pColumn) - - if erasePad[y] || (x == 0 && len(f) == 0) { - pad = SPACE - erasePad[y] = true - } - - if is_esc_seq { - fmt.Fprintf(t.out, " %s %s", - format(padFunc(f, SPACE, v), - t.footerParams[y]), pad) - } else { - fmt.Fprintf(t.out, " %s %s", - padFunc(f, SPACE, v), - pad) - } - - //fmt.Fprintf(t.out, " %s %s", - // padFunc(f, SPACE, v), - // pad) - } - // Next line - fmt.Fprint(t.out, t.newLine) - //t.printLine(true) - } - - hasPrinted := false - - for i := 0; i <= end; i++ { - v := t.cs[i] - pad := t.pRow - center := t.pCenter - length := len(t.footers[i][0]) - - if length > 0 { - hasPrinted = true - } - - // Set center to be space if length is 0 - if length == 0 && !t.borders.Right { - center = SPACE - } - - // Print first junction - if i == 0 { - if length > 0 && !t.borders.Left { - center = t.pRow - } - fmt.Fprint(t.out, center) - } - - // Pad With space of length is 0 - if length == 0 { - pad = SPACE - } - // Ignore left space as it has printed before - if hasPrinted || t.borders.Left { - pad = t.pRow - center = t.pCenter - } - - // Change Center end position - if center != SPACE { - if i == end && !t.borders.Right { - center = t.pRow - } - } - - // Change Center start position - if center == SPACE { - if i < end && len(t.footers[i+1][0]) != 0 { - if !t.borders.Left { - center = t.pRow - } else { - center = t.pCenter - } - } - } - - // Print the footer - fmt.Fprintf(t.out, "%s%s%s%s", - pad, - strings.Repeat(string(pad), v), - pad, - center) - - } - - fmt.Fprint(t.out, t.newLine) -} - -// Print caption text -func (t Table) printCaption() { - width := t.getTableWidth() - paragraph, _ := WrapString(t.captionText, width) - for linecount := 0; linecount < len(paragraph); linecount++ { - fmt.Fprintln(t.out, paragraph[linecount]) - } -} - -// Calculate the total number of characters in a row -func (t Table) getTableWidth() int { - var chars int - for _, v := range t.cs { - chars += v - } - - // Add chars, spaces, seperators to calculate the total width of the table. - // ncols := t.colSize - // spaces := ncols * 2 - // seps := ncols + 1 - - return (chars + (3 * t.colSize) + 2) -} - -func (t Table) printRows() { - for i, lines := range t.lines { - t.printRow(lines, i) - } -} - -func (t *Table) fillAlignment(num int) { - if len(t.columnsAlign) < num { - t.columnsAlign = make([]int, num) - for i := range t.columnsAlign { - t.columnsAlign[i] = t.align - } - } -} - -// Print Row Information -// Adjust column alignment based on type - -func (t *Table) printRow(columns [][]string, rowIdx int) { - // Get Maximum Height - max := t.rs[rowIdx] - total := len(columns) - - // TODO Fix uneven col size - // if total < t.colSize { - // for n := t.colSize - total; n < t.colSize ; n++ { - // columns = append(columns, []string{SPACE}) - // t.cs[n] = t.mW - // } - //} - - // Pad Each Height - pads := []int{} - - // Checking for ANSI escape sequences for columns - is_esc_seq := false - if len(t.columnsParams) > 0 { - is_esc_seq = true - } - t.fillAlignment(total) - - for i, line := range columns { - length := len(line) - pad := max - length - pads = append(pads, pad) - for n := 0; n < pad; n++ { - columns[i] = append(columns[i], " ") - } - } - //fmt.Println(max, "\n") - for x := 0; x < max; x++ { - for y := 0; y < total; y++ { - - // Check if border is set - if !t.noWhiteSpace { - fmt.Fprint(t.out, ConditionString((!t.borders.Left && y == 0), SPACE, t.pColumn)) - fmt.Fprintf(t.out, SPACE) - } - - str := columns[y][x] - - // Embedding escape sequence with column value - if is_esc_seq { - str = format(str, t.columnsParams[y]) - } - - // This would print alignment - // Default alignment would use multiple configuration - switch t.columnsAlign[y] { - case ALIGN_CENTER: // - fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y])) - case ALIGN_RIGHT: - fmt.Fprintf(t.out, "%s", PadLeft(str, SPACE, t.cs[y])) - case ALIGN_LEFT: - fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y])) - default: - if decimal.MatchString(strings.TrimSpace(str)) || percent.MatchString(strings.TrimSpace(str)) { - fmt.Fprintf(t.out, "%s", PadLeft(str, SPACE, t.cs[y])) - } else { - fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y])) - - // TODO Custom alignment per column - //if max == 1 || pads[y] > 0 { - // fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y])) - //} else { - // fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y])) - //} - - } - } - if !t.noWhiteSpace { - fmt.Fprintf(t.out, SPACE) - } else { - fmt.Fprintf(t.out, t.tablePadding) - } - } - // Check if border is set - // Replace with space if not set - if !t.noWhiteSpace { - fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE)) - } - fmt.Fprint(t.out, t.newLine) - } - - if t.rowLine { - t.printLine(true) - } -} - -// Print the rows of the table and merge the cells that are identical -func (t *Table) printRowsMergeCells() { - var previousLine []string - var displayCellBorder []bool - var tmpWriter bytes.Buffer - for i, lines := range t.lines { - // We store the display of the current line in a tmp writer, as we need to know which border needs to be print above - previousLine, displayCellBorder = t.printRowMergeCells(&tmpWriter, lines, i, previousLine) - if i > 0 { //We don't need to print borders above first line - if t.rowLine { - t.printLineOptionalCellSeparators(true, displayCellBorder) - } - } - tmpWriter.WriteTo(t.out) - } - //Print the end of the table - if t.rowLine { - t.printLine(true) - } -} - -// Print Row Information to a writer and merge identical cells. -// Adjust column alignment based on type - -func (t *Table) printRowMergeCells(writer io.Writer, columns [][]string, rowIdx int, previousLine []string) ([]string, []bool) { - // Get Maximum Height - max := t.rs[rowIdx] - total := len(columns) - - // Pad Each Height - pads := []int{} - - // Checking for ANSI escape sequences for columns - is_esc_seq := false - if len(t.columnsParams) > 0 { - is_esc_seq = true - } - for i, line := range columns { - length := len(line) - pad := max - length - pads = append(pads, pad) - for n := 0; n < pad; n++ { - columns[i] = append(columns[i], " ") - } - } - - var displayCellBorder []bool - t.fillAlignment(total) - for x := 0; x < max; x++ { - for y := 0; y < total; y++ { - - // Check if border is set - fmt.Fprint(writer, ConditionString((!t.borders.Left && y == 0), SPACE, t.pColumn)) - - fmt.Fprintf(writer, SPACE) - - str := columns[y][x] - - // Embedding escape sequence with column value - if is_esc_seq { - str = format(str, t.columnsParams[y]) - } - - if t.autoMergeCells { - var mergeCell bool - if t.columnsToAutoMergeCells != nil { - // Check to see if the column index is in columnsToAutoMergeCells. - if t.columnsToAutoMergeCells[y] { - mergeCell = true - } - } else { - // columnsToAutoMergeCells was not set. - mergeCell = true - } - //Store the full line to merge mutli-lines cells - fullLine := strings.TrimRight(strings.Join(columns[y], " "), " ") - if len(previousLine) > y && fullLine == previousLine[y] && fullLine != "" && mergeCell { - // If this cell is identical to the one above but not empty, we don't display the border and keep the cell empty. - displayCellBorder = append(displayCellBorder, false) - str = "" - } else { - // First line or different content, keep the content and print the cell border - displayCellBorder = append(displayCellBorder, true) - } - } - - // This would print alignment - // Default alignment would use multiple configuration - switch t.columnsAlign[y] { - case ALIGN_CENTER: // - fmt.Fprintf(writer, "%s", Pad(str, SPACE, t.cs[y])) - case ALIGN_RIGHT: - fmt.Fprintf(writer, "%s", PadLeft(str, SPACE, t.cs[y])) - case ALIGN_LEFT: - fmt.Fprintf(writer, "%s", PadRight(str, SPACE, t.cs[y])) - default: - if decimal.MatchString(strings.TrimSpace(str)) || percent.MatchString(strings.TrimSpace(str)) { - fmt.Fprintf(writer, "%s", PadLeft(str, SPACE, t.cs[y])) - } else { - fmt.Fprintf(writer, "%s", PadRight(str, SPACE, t.cs[y])) - } - } - fmt.Fprintf(writer, SPACE) - } - // Check if border is set - // Replace with space if not set - fmt.Fprint(writer, ConditionString(t.borders.Left, t.pColumn, SPACE)) - fmt.Fprint(writer, t.newLine) - } - - //The new previous line is the current one - previousLine = make([]string, total) - for y := 0; y < total; y++ { - previousLine[y] = strings.TrimRight(strings.Join(columns[y], " "), " ") //Store the full line for multi-lines cells - } - //Returns the newly added line and wether or not a border should be displayed above. - return previousLine, displayCellBorder -} - -func (t *Table) parseDimension(str string, colKey, rowKey int) []string { - var ( - raw []string - maxWidth int - ) - - raw = getLines(str) - maxWidth = 0 - for _, line := range raw { - if w := DisplayWidth(line); w > maxWidth { - maxWidth = w - } - } - - // If wrapping, ensure that all paragraphs in the cell fit in the - // specified width. - if t.autoWrap { - // If there's a maximum allowed width for wrapping, use that. - if maxWidth > t.mW { - maxWidth = t.mW - } - - // In the process of doing so, we need to recompute maxWidth. This - // is because perhaps a word in the cell is longer than the - // allowed maximum width in t.mW. - newMaxWidth := maxWidth - newRaw := make([]string, 0, len(raw)) - - if t.reflowText { - // Make a single paragraph of everything. - raw = []string{strings.Join(raw, " ")} - } - for i, para := range raw { - paraLines, _ := WrapString(para, maxWidth) - for _, line := range paraLines { - if w := DisplayWidth(line); w > newMaxWidth { - newMaxWidth = w - } - } - if i > 0 { - newRaw = append(newRaw, " ") - } - newRaw = append(newRaw, paraLines...) - } - raw = newRaw - maxWidth = newMaxWidth - } - - // Store the new known maximum width. - v, ok := t.cs[colKey] - if !ok || v < maxWidth || v == 0 { - t.cs[colKey] = maxWidth - } - - // Remember the number of lines for the row printer. - h := len(raw) - v, ok = t.rs[rowKey] - - if !ok || v < h || v == 0 { - t.rs[rowKey] = h - } - //fmt.Printf("Raw %+v %d\n", raw, len(raw)) - return raw -} diff --git a/vendor/github.com/olekukonko/tablewriter/table_with_color.go b/vendor/github.com/olekukonko/tablewriter/table_with_color.go deleted file mode 100644 index ae7a364a..00000000 --- a/vendor/github.com/olekukonko/tablewriter/table_with_color.go +++ /dev/null @@ -1,136 +0,0 @@ -package tablewriter - -import ( - "fmt" - "strconv" - "strings" -) - -const ESC = "\033" -const SEP = ";" - -const ( - BgBlackColor int = iota + 40 - BgRedColor - BgGreenColor - BgYellowColor - BgBlueColor - BgMagentaColor - BgCyanColor - BgWhiteColor -) - -const ( - FgBlackColor int = iota + 30 - FgRedColor - FgGreenColor - FgYellowColor - FgBlueColor - FgMagentaColor - FgCyanColor - FgWhiteColor -) - -const ( - BgHiBlackColor int = iota + 100 - BgHiRedColor - BgHiGreenColor - BgHiYellowColor - BgHiBlueColor - BgHiMagentaColor - BgHiCyanColor - BgHiWhiteColor -) - -const ( - FgHiBlackColor int = iota + 90 - FgHiRedColor - FgHiGreenColor - FgHiYellowColor - FgHiBlueColor - FgHiMagentaColor - FgHiCyanColor - FgHiWhiteColor -) - -const ( - Normal = 0 - Bold = 1 - UnderlineSingle = 4 - Italic -) - -type Colors []int - -func startFormat(seq string) string { - return fmt.Sprintf("%s[%sm", ESC, seq) -} - -func stopFormat() string { - return fmt.Sprintf("%s[%dm", ESC, Normal) -} - -// Making the SGR (Select Graphic Rendition) sequence. -func makeSequence(codes []int) string { - codesInString := []string{} - for _, code := range codes { - codesInString = append(codesInString, strconv.Itoa(code)) - } - return strings.Join(codesInString, SEP) -} - -// Adding ANSI escape sequences before and after string -func format(s string, codes interface{}) string { - var seq string - - switch v := codes.(type) { - - case string: - seq = v - case []int: - seq = makeSequence(v) - case Colors: - seq = makeSequence(v) - default: - return s - } - - if len(seq) == 0 { - return s - } - return startFormat(seq) + s + stopFormat() -} - -// Adding header colors (ANSI codes) -func (t *Table) SetHeaderColor(colors ...Colors) { - if t.colSize != len(colors) { - panic("Number of header colors must be equal to number of headers.") - } - for i := 0; i < len(colors); i++ { - t.headerParams = append(t.headerParams, makeSequence(colors[i])) - } -} - -// Adding column colors (ANSI codes) -func (t *Table) SetColumnColor(colors ...Colors) { - if t.colSize != len(colors) { - panic("Number of column colors must be equal to number of headers.") - } - for i := 0; i < len(colors); i++ { - t.columnsParams = append(t.columnsParams, makeSequence(colors[i])) - } -} - -// Adding column colors (ANSI codes) -func (t *Table) SetFooterColor(colors ...Colors) { - if len(t.footers) != len(colors) { - panic("Number of footer colors must be equal to number of footer.") - } - for i := 0; i < len(colors); i++ { - t.footerParams = append(t.footerParams, makeSequence(colors[i])) - } -} - -func Color(colors ...int) []int { - return colors -} diff --git a/vendor/github.com/olekukonko/tablewriter/util.go b/vendor/github.com/olekukonko/tablewriter/util.go deleted file mode 100644 index 380e7ab3..00000000 --- a/vendor/github.com/olekukonko/tablewriter/util.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Table Writer API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package tablewriter - -import ( - "math" - "regexp" - "strings" - - "github.com/mattn/go-runewidth" -) - -var ansi = regexp.MustCompile("\033\\[(?:[0-9]{1,3}(?:;[0-9]{1,3})*)?[m|K]") - -func DisplayWidth(str string) int { - return runewidth.StringWidth(ansi.ReplaceAllLiteralString(str, "")) -} - -// Simple Condition for string -// Returns value based on condition -func ConditionString(cond bool, valid, inValid string) string { - if cond { - return valid - } - return inValid -} - -func isNumOrSpace(r rune) bool { - return ('0' <= r && r <= '9') || r == ' ' -} - -// Format Table Header -// Replace _ , . and spaces -func Title(name string) string { - origLen := len(name) - rs := []rune(name) - for i, r := range rs { - switch r { - case '_': - rs[i] = ' ' - case '.': - // ignore floating number 0.0 - if (i != 0 && !isNumOrSpace(rs[i-1])) || (i != len(rs)-1 && !isNumOrSpace(rs[i+1])) { - rs[i] = ' ' - } - } - } - name = string(rs) - name = strings.TrimSpace(name) - if len(name) == 0 && origLen > 0 { - // Keep at least one character. This is important to preserve - // empty lines in multi-line headers/footers. - name = " " - } - return strings.ToUpper(name) -} - -// Pad String -// Attempts to place string in the center -func Pad(s, pad string, width int) string { - gap := width - DisplayWidth(s) - if gap > 0 { - gapLeft := int(math.Ceil(float64(gap / 2))) - gapRight := gap - gapLeft - return strings.Repeat(string(pad), gapLeft) + s + strings.Repeat(string(pad), gapRight) - } - return s -} - -// Pad String Right position -// This would place string at the left side of the screen -func PadRight(s, pad string, width int) string { - gap := width - DisplayWidth(s) - if gap > 0 { - return s + strings.Repeat(string(pad), gap) - } - return s -} - -// Pad String Left position -// This would place string at the right side of the screen -func PadLeft(s, pad string, width int) string { - gap := width - DisplayWidth(s) - if gap > 0 { - return strings.Repeat(string(pad), gap) + s - } - return s -} diff --git a/vendor/github.com/olekukonko/tablewriter/wrap.go b/vendor/github.com/olekukonko/tablewriter/wrap.go deleted file mode 100644 index a092ee1f..00000000 --- a/vendor/github.com/olekukonko/tablewriter/wrap.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Table Writer API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package tablewriter - -import ( - "math" - "strings" - - "github.com/mattn/go-runewidth" -) - -var ( - nl = "\n" - sp = " " -) - -const defaultPenalty = 1e5 - -// Wrap wraps s into a paragraph of lines of length lim, with minimal -// raggedness. -func WrapString(s string, lim int) ([]string, int) { - words := strings.Split(strings.Replace(s, nl, sp, -1), sp) - var lines []string - max := 0 - for _, v := range words { - max = runewidth.StringWidth(v) - if max > lim { - lim = max - } - } - for _, line := range WrapWords(words, 1, lim, defaultPenalty) { - lines = append(lines, strings.Join(line, sp)) - } - return lines, lim -} - -// WrapWords is the low-level line-breaking algorithm, useful if you need more -// control over the details of the text wrapping process. For most uses, -// WrapString will be sufficient and more convenient. -// -// WrapWords splits a list of words into lines with minimal "raggedness", -// treating each rune as one unit, accounting for spc units between adjacent -// words on each line, and attempting to limit lines to lim units. Raggedness -// is the total error over all lines, where error is the square of the -// difference of the length of the line and lim. Too-long lines (which only -// happen when a single word is longer than lim units) have pen penalty units -// added to the error. -func WrapWords(words []string, spc, lim, pen int) [][]string { - n := len(words) - - length := make([][]int, n) - for i := 0; i < n; i++ { - length[i] = make([]int, n) - length[i][i] = runewidth.StringWidth(words[i]) - for j := i + 1; j < n; j++ { - length[i][j] = length[i][j-1] + spc + runewidth.StringWidth(words[j]) - } - } - nbrk := make([]int, n) - cost := make([]int, n) - for i := range cost { - cost[i] = math.MaxInt32 - } - for i := n - 1; i >= 0; i-- { - if length[i][n-1] <= lim { - cost[i] = 0 - nbrk[i] = n - } else { - for j := i + 1; j < n; j++ { - d := lim - length[i][j-1] - c := d*d + cost[j] - if length[i][j-1] > lim { - c += pen // too-long lines get a worse penalty - } - if c < cost[i] { - cost[i] = c - nbrk[i] = j - } - } - } - } - var lines [][]string - i := 0 - for i < n { - lines = append(lines, words[i:nbrk[i]]) - i = nbrk[i] - } - return lines -} - -// getLines decomposes a multiline string into a slice of strings. -func getLines(s string) []string { - return strings.Split(s, nl) -} diff --git a/vendor/github.com/rivo/uniseg/LICENSE.txt b/vendor/github.com/rivo/uniseg/LICENSE.txt deleted file mode 100644 index 5040f1ef..00000000 --- a/vendor/github.com/rivo/uniseg/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2019 Oliver Kuederle - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/rivo/uniseg/README.md b/vendor/github.com/rivo/uniseg/README.md deleted file mode 100644 index f8da293e..00000000 --- a/vendor/github.com/rivo/uniseg/README.md +++ /dev/null @@ -1,62 +0,0 @@ -# Unicode Text Segmentation for Go - -[![Godoc Reference](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/rivo/uniseg) -[![Go Report](https://img.shields.io/badge/go%20report-A%2B-brightgreen.svg)](https://goreportcard.com/report/github.com/rivo/uniseg) - -This Go package implements Unicode Text Segmentation according to [Unicode Standard Annex #29](http://unicode.org/reports/tr29/) (Unicode version 12.0.0). - -At this point, only the determination of grapheme cluster boundaries is implemented. - -## Background - -In Go, [strings are read-only slices of bytes](https://blog.golang.org/strings). They can be turned into Unicode code points using the `for` loop or by casting: `[]rune(str)`. However, multiple code points may be combined into one user-perceived character or what the Unicode specification calls "grapheme cluster". Here are some examples: - -|String|Bytes (UTF-8)|Code points (runes)|Grapheme clusters| -|-|-|-|-| -|Käse|6 bytes: `4b 61 cc 88 73 65`|5 code points: `4b 61 308 73 65`|4 clusters: `[4b],[61 308],[73],[65]`| -|🏳️‍🌈|14 bytes: `f0 9f 8f b3 ef b8 8f e2 80 8d f0 9f 8c 88`|4 code points: `1f3f3 fe0f 200d 1f308`|1 cluster: `[1f3f3 fe0f 200d 1f308]`| -|🇩🇪|8 bytes: `f0 9f 87 a9 f0 9f 87 aa`|2 code points: `1f1e9 1f1ea`|1 cluster: `[1f1e9 1f1ea]`| - -This package provides a tool to iterate over these grapheme clusters. This may be used to determine the number of user-perceived characters, to split strings in their intended places, or to extract individual characters which form a unit. - -## Installation - -```bash -go get github.com/rivo/uniseg -``` - -## Basic Example - -```go -package uniseg - -import ( - "fmt" - - "github.com/rivo/uniseg" -) - -func main() { - gr := uniseg.NewGraphemes("👍🏼!") - for gr.Next() { - fmt.Printf("%x ", gr.Runes()) - } - // Output: [1f44d 1f3fc] [21] -} -``` - -## Documentation - -Refer to https://godoc.org/github.com/rivo/uniseg for the package's documentation. - -## Dependencies - -This package does not depend on any packages outside the standard library. - -## Your Feedback - -Add your issue here on GitHub. Feel free to get in touch if you have any questions. - -## Version - -Version tags will be introduced once Golang modules are official. Consider this version 0.1. diff --git a/vendor/github.com/rivo/uniseg/doc.go b/vendor/github.com/rivo/uniseg/doc.go deleted file mode 100644 index 60c737d7..00000000 --- a/vendor/github.com/rivo/uniseg/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -/* -Package uniseg implements Unicode Text Segmentation according to Unicode -Standard Annex #29 (http://unicode.org/reports/tr29/). - -At this point, only the determination of grapheme cluster boundaries is -implemented. -*/ -package uniseg diff --git a/vendor/github.com/rivo/uniseg/grapheme.go b/vendor/github.com/rivo/uniseg/grapheme.go deleted file mode 100644 index 207157f5..00000000 --- a/vendor/github.com/rivo/uniseg/grapheme.go +++ /dev/null @@ -1,268 +0,0 @@ -package uniseg - -import "unicode/utf8" - -// The states of the grapheme cluster parser. -const ( - grAny = iota - grCR - grControlLF - grL - grLVV - grLVTT - grPrepend - grExtendedPictographic - grExtendedPictographicZWJ - grRIOdd - grRIEven -) - -// The grapheme cluster parser's breaking instructions. -const ( - grNoBoundary = iota - grBoundary -) - -// The grapheme cluster parser's state transitions. Maps (state, property) to -// (new state, breaking instruction, rule number). The breaking instruction -// always refers to the boundary between the last and next code point. -// -// This map is queried as follows: -// -// 1. Find specific state + specific property. Stop if found. -// 2. Find specific state + any property. -// 3. Find any state + specific property. -// 4. If only (2) or (3) (but not both) was found, stop. -// 5. If both (2) and (3) were found, use state and breaking instruction from -// the transition with the lower rule number, prefer (3) if rule numbers -// are equal. Stop. -// 6. Assume grAny and grBoundary. -var grTransitions = map[[2]int][3]int{ - // GB5 - {grAny, prCR}: {grCR, grBoundary, 50}, - {grAny, prLF}: {grControlLF, grBoundary, 50}, - {grAny, prControl}: {grControlLF, grBoundary, 50}, - - // GB4 - {grCR, prAny}: {grAny, grBoundary, 40}, - {grControlLF, prAny}: {grAny, grBoundary, 40}, - - // GB3. - {grCR, prLF}: {grAny, grNoBoundary, 30}, - - // GB6. - {grAny, prL}: {grL, grBoundary, 9990}, - {grL, prL}: {grL, grNoBoundary, 60}, - {grL, prV}: {grLVV, grNoBoundary, 60}, - {grL, prLV}: {grLVV, grNoBoundary, 60}, - {grL, prLVT}: {grLVTT, grNoBoundary, 60}, - - // GB7. - {grAny, prLV}: {grLVV, grBoundary, 9990}, - {grAny, prV}: {grLVV, grBoundary, 9990}, - {grLVV, prV}: {grLVV, grNoBoundary, 70}, - {grLVV, prT}: {grLVTT, grNoBoundary, 70}, - - // GB8. - {grAny, prLVT}: {grLVTT, grBoundary, 9990}, - {grAny, prT}: {grLVTT, grBoundary, 9990}, - {grLVTT, prT}: {grLVTT, grNoBoundary, 80}, - - // GB9. - {grAny, prExtend}: {grAny, grNoBoundary, 90}, - {grAny, prZWJ}: {grAny, grNoBoundary, 90}, - - // GB9a. - {grAny, prSpacingMark}: {grAny, grNoBoundary, 91}, - - // GB9b. - {grAny, prPreprend}: {grPrepend, grBoundary, 9990}, - {grPrepend, prAny}: {grAny, grNoBoundary, 92}, - - // GB11. - {grAny, prExtendedPictographic}: {grExtendedPictographic, grBoundary, 9990}, - {grExtendedPictographic, prExtend}: {grExtendedPictographic, grNoBoundary, 110}, - {grExtendedPictographic, prZWJ}: {grExtendedPictographicZWJ, grNoBoundary, 110}, - {grExtendedPictographicZWJ, prExtendedPictographic}: {grExtendedPictographic, grNoBoundary, 110}, - - // GB12 / GB13. - {grAny, prRegionalIndicator}: {grRIOdd, grBoundary, 9990}, - {grRIOdd, prRegionalIndicator}: {grRIEven, grNoBoundary, 120}, - {grRIEven, prRegionalIndicator}: {grRIOdd, grBoundary, 120}, -} - -// Graphemes implements an iterator over Unicode extended grapheme clusters, -// specified in the Unicode Standard Annex #29. Grapheme clusters correspond to -// "user-perceived characters". These characters often consist of multiple -// code points (e.g. the "woman kissing woman" emoji consists of 8 code points: -// woman + ZWJ + heavy black heart (2 code points) + ZWJ + kiss mark + ZWJ + -// woman) and the rules described in Annex #29 must be applied to group those -// code points into clusters perceived by the user as one character. -type Graphemes struct { - // The code points over which this class iterates. - codePoints []rune - - // The (byte-based) indices of the code points into the original string plus - // len(original string). Thus, len(indices) = len(codePoints) + 1. - indices []int - - // The current grapheme cluster to be returned. These are indices into - // codePoints/indices. If start == end, we either haven't started iterating - // yet (0) or the iteration has already completed (1). - start, end int - - // The index of the next code point to be parsed. - pos int - - // The current state of the code point parser. - state int -} - -// NewGraphemes returns a new grapheme cluster iterator. -func NewGraphemes(s string) *Graphemes { - l := utf8.RuneCountInString(s) - codePoints := make([]rune, l) - indices := make([]int, l+1) - i := 0 - for pos, r := range s { - codePoints[i] = r - indices[i] = pos - i++ - } - indices[l] = len(s) - g := &Graphemes{ - codePoints: codePoints, - indices: indices, - } - g.Next() // Parse ahead. - return g -} - -// Next advances the iterator by one grapheme cluster and returns false if no -// clusters are left. This function must be called before the first cluster is -// accessed. -func (g *Graphemes) Next() bool { - g.start = g.end - - // The state transition gives us a boundary instruction BEFORE the next code - // point so we always need to stay ahead by one code point. - - // Parse the next code point. - for g.pos <= len(g.codePoints) { - // GB2. - if g.pos == len(g.codePoints) { - g.end = g.pos - g.pos++ - break - } - - // Determine the property of the next character. - nextProperty := property(g.codePoints[g.pos]) - g.pos++ - - // Find the applicable transition. - var boundary bool - transition, ok := grTransitions[[2]int{g.state, nextProperty}] - if ok { - // We have a specific transition. We'll use it. - g.state = transition[0] - boundary = transition[1] == grBoundary - } else { - // No specific transition found. Try the less specific ones. - transAnyProp, okAnyProp := grTransitions[[2]int{g.state, prAny}] - transAnyState, okAnyState := grTransitions[[2]int{grAny, nextProperty}] - if okAnyProp && okAnyState { - // Both apply. We'll use a mix (see comments for grTransitions). - g.state = transAnyState[0] - boundary = transAnyState[1] == grBoundary - if transAnyProp[2] < transAnyState[2] { - g.state = transAnyProp[0] - boundary = transAnyProp[1] == grBoundary - } - } else if okAnyProp { - // We only have a specific state. - g.state = transAnyProp[0] - boundary = transAnyProp[1] == grBoundary - // This branch will probably never be reached because okAnyState will - // always be true given the current transition map. But we keep it here - // for future modifications to the transition map where this may not be - // true anymore. - } else if okAnyState { - // We only have a specific property. - g.state = transAnyState[0] - boundary = transAnyState[1] == grBoundary - } else { - // No known transition. GB999: Any x Any. - g.state = grAny - boundary = true - } - } - - // If we found a cluster boundary, let's stop here. The current cluster will - // be the one that just ended. - if g.pos-1 == 0 /* GB1 */ || boundary { - g.end = g.pos - 1 - break - } - } - - return g.start != g.end -} - -// Runes returns a slice of runes (code points) which corresponds to the current -// grapheme cluster. If the iterator is already past the end or Next() has not -// yet been called, nil is returned. -func (g *Graphemes) Runes() []rune { - if g.start == g.end { - return nil - } - return g.codePoints[g.start:g.end] -} - -// Str returns a substring of the original string which corresponds to the -// current grapheme cluster. If the iterator is already past the end or Next() -// has not yet been called, an empty string is returned. -func (g *Graphemes) Str() string { - if g.start == g.end { - return "" - } - return string(g.codePoints[g.start:g.end]) -} - -// Bytes returns a byte slice which corresponds to the current grapheme cluster. -// If the iterator is already past the end or Next() has not yet been called, -// nil is returned. -func (g *Graphemes) Bytes() []byte { - if g.start == g.end { - return nil - } - return []byte(string(g.codePoints[g.start:g.end])) -} - -// Positions returns the interval of the current grapheme cluster as byte -// positions into the original string. The first returned value "from" indexes -// the first byte and the second returned value "to" indexes the first byte that -// is not included anymore, i.e. str[from:to] is the current grapheme cluster of -// the original string "str". If Next() has not yet been called, both values are -// 0. If the iterator is already past the end, both values are 1. -func (g *Graphemes) Positions() (int, int) { - return g.indices[g.start], g.indices[g.end] -} - -// Reset puts the iterator into its initial state such that the next call to -// Next() sets it to the first grapheme cluster again. -func (g *Graphemes) Reset() { - g.start, g.end, g.pos, g.state = 0, 0, 0, grAny - g.Next() // Parse ahead again. -} - -// GraphemeClusterCount returns the number of user-perceived characters -// (grapheme clusters) for the given string. To calculate this number, it -// iterates through the string using the Graphemes iterator. -func GraphemeClusterCount(s string) (n int) { - g := NewGraphemes(s) - for g.Next() { - n++ - } - return -} diff --git a/vendor/github.com/rivo/uniseg/properties.go b/vendor/github.com/rivo/uniseg/properties.go deleted file mode 100644 index a75ab588..00000000 --- a/vendor/github.com/rivo/uniseg/properties.go +++ /dev/null @@ -1,1658 +0,0 @@ -package uniseg - -// The unicode properties. Only the ones needed in the context of this package -// are included. -const ( - prAny = iota - prPreprend - prCR - prLF - prControl - prExtend - prRegionalIndicator - prSpacingMark - prL - prV - prT - prLV - prLVT - prZWJ - prExtendedPictographic -) - -// Maps code point ranges to their properties. In the context of this package, -// any code point that is not contained may map to "prAny". The code point -// ranges in this slice are numerically sorted. -// -// These ranges were taken from -// http://www.unicode.org/Public/UCD/latest/ucd/auxiliary/GraphemeBreakProperty.txt -// as well as -// https://unicode.org/Public/emoji/latest/emoji-data.txt -// ("Extended_Pictographic" only) on March 11, 2019. See -// https://www.unicode.org/license.html for the Unicode license agreement. -var codePoints = [][3]int{ - {0x0000, 0x0009, prControl}, // Cc [10] .. - {0x000A, 0x000A, prLF}, // Cc - {0x000B, 0x000C, prControl}, // Cc [2] .. - {0x000D, 0x000D, prCR}, // Cc - {0x000E, 0x001F, prControl}, // Cc [18] .. - {0x007F, 0x009F, prControl}, // Cc [33] .. - {0x00A9, 0x00A9, prExtendedPictographic}, // 1.1 [1] (©️) copyright - {0x00AD, 0x00AD, prControl}, // Cf SOFT HYPHEN - {0x00AE, 0x00AE, prExtendedPictographic}, // 1.1 [1] (®️) registered - {0x0300, 0x036F, prExtend}, // Mn [112] COMBINING GRAVE ACCENT..COMBINING LATIN SMALL LETTER X - {0x0483, 0x0487, prExtend}, // Mn [5] COMBINING CYRILLIC TITLO..COMBINING CYRILLIC POKRYTIE - {0x0488, 0x0489, prExtend}, // Me [2] COMBINING CYRILLIC HUNDRED THOUSANDS SIGN..COMBINING CYRILLIC MILLIONS SIGN - {0x0591, 0x05BD, prExtend}, // Mn [45] HEBREW ACCENT ETNAHTA..HEBREW POINT METEG - {0x05BF, 0x05BF, prExtend}, // Mn HEBREW POINT RAFE - {0x05C1, 0x05C2, prExtend}, // Mn [2] HEBREW POINT SHIN DOT..HEBREW POINT SIN DOT - {0x05C4, 0x05C5, prExtend}, // Mn [2] HEBREW MARK UPPER DOT..HEBREW MARK LOWER DOT - {0x05C7, 0x05C7, prExtend}, // Mn HEBREW POINT QAMATS QATAN - {0x0600, 0x0605, prPreprend}, // Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER MARK ABOVE - {0x0610, 0x061A, prExtend}, // Mn [11] ARABIC SIGN SALLALLAHOU ALAYHE WASSALLAM..ARABIC SMALL KASRA - {0x061C, 0x061C, prControl}, // Cf ARABIC LETTER MARK - {0x064B, 0x065F, prExtend}, // Mn [21] ARABIC FATHATAN..ARABIC WAVY HAMZA BELOW - {0x0670, 0x0670, prExtend}, // Mn ARABIC LETTER SUPERSCRIPT ALEF - {0x06D6, 0x06DC, prExtend}, // Mn [7] ARABIC SMALL HIGH LIGATURE SAD WITH LAM WITH ALEF MAKSURA..ARABIC SMALL HIGH SEEN - {0x06DD, 0x06DD, prPreprend}, // Cf ARABIC END OF AYAH - {0x06DF, 0x06E4, prExtend}, // Mn [6] ARABIC SMALL HIGH ROUNDED ZERO..ARABIC SMALL HIGH MADDA - {0x06E7, 0x06E8, prExtend}, // Mn [2] ARABIC SMALL HIGH YEH..ARABIC SMALL HIGH NOON - {0x06EA, 0x06ED, prExtend}, // Mn [4] ARABIC EMPTY CENTRE LOW STOP..ARABIC SMALL LOW MEEM - {0x070F, 0x070F, prPreprend}, // Cf SYRIAC ABBREVIATION MARK - {0x0711, 0x0711, prExtend}, // Mn SYRIAC LETTER SUPERSCRIPT ALAPH - {0x0730, 0x074A, prExtend}, // Mn [27] SYRIAC PTHAHA ABOVE..SYRIAC BARREKH - {0x07A6, 0x07B0, prExtend}, // Mn [11] THAANA ABAFILI..THAANA SUKUN - {0x07EB, 0x07F3, prExtend}, // Mn [9] NKO COMBINING SHORT HIGH TONE..NKO COMBINING DOUBLE DOT ABOVE - {0x07FD, 0x07FD, prExtend}, // Mn NKO DANTAYALAN - {0x0816, 0x0819, prExtend}, // Mn [4] SAMARITAN MARK IN..SAMARITAN MARK DAGESH - {0x081B, 0x0823, prExtend}, // Mn [9] SAMARITAN MARK EPENTHETIC YUT..SAMARITAN VOWEL SIGN A - {0x0825, 0x0827, prExtend}, // Mn [3] SAMARITAN VOWEL SIGN SHORT A..SAMARITAN VOWEL SIGN U - {0x0829, 0x082D, prExtend}, // Mn [5] SAMARITAN VOWEL SIGN LONG I..SAMARITAN MARK NEQUDAA - {0x0859, 0x085B, prExtend}, // Mn [3] MANDAIC AFFRICATION MARK..MANDAIC GEMINATION MARK - {0x08D3, 0x08E1, prExtend}, // Mn [15] ARABIC SMALL LOW WAW..ARABIC SMALL HIGH SIGN SAFHA - {0x08E2, 0x08E2, prPreprend}, // Cf ARABIC DISPUTED END OF AYAH - {0x08E3, 0x0902, prExtend}, // Mn [32] ARABIC TURNED DAMMA BELOW..DEVANAGARI SIGN ANUSVARA - {0x0903, 0x0903, prSpacingMark}, // Mc DEVANAGARI SIGN VISARGA - {0x093A, 0x093A, prExtend}, // Mn DEVANAGARI VOWEL SIGN OE - {0x093B, 0x093B, prSpacingMark}, // Mc DEVANAGARI VOWEL SIGN OOE - {0x093C, 0x093C, prExtend}, // Mn DEVANAGARI SIGN NUKTA - {0x093E, 0x0940, prSpacingMark}, // Mc [3] DEVANAGARI VOWEL SIGN AA..DEVANAGARI VOWEL SIGN II - {0x0941, 0x0948, prExtend}, // Mn [8] DEVANAGARI VOWEL SIGN U..DEVANAGARI VOWEL SIGN AI - {0x0949, 0x094C, prSpacingMark}, // Mc [4] DEVANAGARI VOWEL SIGN CANDRA O..DEVANAGARI VOWEL SIGN AU - {0x094D, 0x094D, prExtend}, // Mn DEVANAGARI SIGN VIRAMA - {0x094E, 0x094F, prSpacingMark}, // Mc [2] DEVANAGARI VOWEL SIGN PRISHTHAMATRA E..DEVANAGARI VOWEL SIGN AW - {0x0951, 0x0957, prExtend}, // Mn [7] DEVANAGARI STRESS SIGN UDATTA..DEVANAGARI VOWEL SIGN UUE - {0x0962, 0x0963, prExtend}, // Mn [2] DEVANAGARI VOWEL SIGN VOCALIC L..DEVANAGARI VOWEL SIGN VOCALIC LL - {0x0981, 0x0981, prExtend}, // Mn BENGALI SIGN CANDRABINDU - {0x0982, 0x0983, prSpacingMark}, // Mc [2] BENGALI SIGN ANUSVARA..BENGALI SIGN VISARGA - {0x09BC, 0x09BC, prExtend}, // Mn BENGALI SIGN NUKTA - {0x09BE, 0x09BE, prExtend}, // Mc BENGALI VOWEL SIGN AA - {0x09BF, 0x09C0, prSpacingMark}, // Mc [2] BENGALI VOWEL SIGN I..BENGALI VOWEL SIGN II - {0x09C1, 0x09C4, prExtend}, // Mn [4] BENGALI VOWEL SIGN U..BENGALI VOWEL SIGN VOCALIC RR - {0x09C7, 0x09C8, prSpacingMark}, // Mc [2] BENGALI VOWEL SIGN E..BENGALI VOWEL SIGN AI - {0x09CB, 0x09CC, prSpacingMark}, // Mc [2] BENGALI VOWEL SIGN O..BENGALI VOWEL SIGN AU - {0x09CD, 0x09CD, prExtend}, // Mn BENGALI SIGN VIRAMA - {0x09D7, 0x09D7, prExtend}, // Mc BENGALI AU LENGTH MARK - {0x09E2, 0x09E3, prExtend}, // Mn [2] BENGALI VOWEL SIGN VOCALIC L..BENGALI VOWEL SIGN VOCALIC LL - {0x09FE, 0x09FE, prExtend}, // Mn BENGALI SANDHI MARK - {0x0A01, 0x0A02, prExtend}, // Mn [2] GURMUKHI SIGN ADAK BINDI..GURMUKHI SIGN BINDI - {0x0A03, 0x0A03, prSpacingMark}, // Mc GURMUKHI SIGN VISARGA - {0x0A3C, 0x0A3C, prExtend}, // Mn GURMUKHI SIGN NUKTA - {0x0A3E, 0x0A40, prSpacingMark}, // Mc [3] GURMUKHI VOWEL SIGN AA..GURMUKHI VOWEL SIGN II - {0x0A41, 0x0A42, prExtend}, // Mn [2] GURMUKHI VOWEL SIGN U..GURMUKHI VOWEL SIGN UU - {0x0A47, 0x0A48, prExtend}, // Mn [2] GURMUKHI VOWEL SIGN EE..GURMUKHI VOWEL SIGN AI - {0x0A4B, 0x0A4D, prExtend}, // Mn [3] GURMUKHI VOWEL SIGN OO..GURMUKHI SIGN VIRAMA - {0x0A51, 0x0A51, prExtend}, // Mn GURMUKHI SIGN UDAAT - {0x0A70, 0x0A71, prExtend}, // Mn [2] GURMUKHI TIPPI..GURMUKHI ADDAK - {0x0A75, 0x0A75, prExtend}, // Mn GURMUKHI SIGN YAKASH - {0x0A81, 0x0A82, prExtend}, // Mn [2] GUJARATI SIGN CANDRABINDU..GUJARATI SIGN ANUSVARA - {0x0A83, 0x0A83, prSpacingMark}, // Mc GUJARATI SIGN VISARGA - {0x0ABC, 0x0ABC, prExtend}, // Mn GUJARATI SIGN NUKTA - {0x0ABE, 0x0AC0, prSpacingMark}, // Mc [3] GUJARATI VOWEL SIGN AA..GUJARATI VOWEL SIGN II - {0x0AC1, 0x0AC5, prExtend}, // Mn [5] GUJARATI VOWEL SIGN U..GUJARATI VOWEL SIGN CANDRA E - {0x0AC7, 0x0AC8, prExtend}, // Mn [2] GUJARATI VOWEL SIGN E..GUJARATI VOWEL SIGN AI - {0x0AC9, 0x0AC9, prSpacingMark}, // Mc GUJARATI VOWEL SIGN CANDRA O - {0x0ACB, 0x0ACC, prSpacingMark}, // Mc [2] GUJARATI VOWEL SIGN O..GUJARATI VOWEL SIGN AU - {0x0ACD, 0x0ACD, prExtend}, // Mn GUJARATI SIGN VIRAMA - {0x0AE2, 0x0AE3, prExtend}, // Mn [2] GUJARATI VOWEL SIGN VOCALIC L..GUJARATI VOWEL SIGN VOCALIC LL - {0x0AFA, 0x0AFF, prExtend}, // Mn [6] GUJARATI SIGN SUKUN..GUJARATI SIGN TWO-CIRCLE NUKTA ABOVE - {0x0B01, 0x0B01, prExtend}, // Mn ORIYA SIGN CANDRABINDU - {0x0B02, 0x0B03, prSpacingMark}, // Mc [2] ORIYA SIGN ANUSVARA..ORIYA SIGN VISARGA - {0x0B3C, 0x0B3C, prExtend}, // Mn ORIYA SIGN NUKTA - {0x0B3E, 0x0B3E, prExtend}, // Mc ORIYA VOWEL SIGN AA - {0x0B3F, 0x0B3F, prExtend}, // Mn ORIYA VOWEL SIGN I - {0x0B40, 0x0B40, prSpacingMark}, // Mc ORIYA VOWEL SIGN II - {0x0B41, 0x0B44, prExtend}, // Mn [4] ORIYA VOWEL SIGN U..ORIYA VOWEL SIGN VOCALIC RR - {0x0B47, 0x0B48, prSpacingMark}, // Mc [2] ORIYA VOWEL SIGN E..ORIYA VOWEL SIGN AI - {0x0B4B, 0x0B4C, prSpacingMark}, // Mc [2] ORIYA VOWEL SIGN O..ORIYA VOWEL SIGN AU - {0x0B4D, 0x0B4D, prExtend}, // Mn ORIYA SIGN VIRAMA - {0x0B56, 0x0B56, prExtend}, // Mn ORIYA AI LENGTH MARK - {0x0B57, 0x0B57, prExtend}, // Mc ORIYA AU LENGTH MARK - {0x0B62, 0x0B63, prExtend}, // Mn [2] ORIYA VOWEL SIGN VOCALIC L..ORIYA VOWEL SIGN VOCALIC LL - {0x0B82, 0x0B82, prExtend}, // Mn TAMIL SIGN ANUSVARA - {0x0BBE, 0x0BBE, prExtend}, // Mc TAMIL VOWEL SIGN AA - {0x0BBF, 0x0BBF, prSpacingMark}, // Mc TAMIL VOWEL SIGN I - {0x0BC0, 0x0BC0, prExtend}, // Mn TAMIL VOWEL SIGN II - {0x0BC1, 0x0BC2, prSpacingMark}, // Mc [2] TAMIL VOWEL SIGN U..TAMIL VOWEL SIGN UU - {0x0BC6, 0x0BC8, prSpacingMark}, // Mc [3] TAMIL VOWEL SIGN E..TAMIL VOWEL SIGN AI - {0x0BCA, 0x0BCC, prSpacingMark}, // Mc [3] TAMIL VOWEL SIGN O..TAMIL VOWEL SIGN AU - {0x0BCD, 0x0BCD, prExtend}, // Mn TAMIL SIGN VIRAMA - {0x0BD7, 0x0BD7, prExtend}, // Mc TAMIL AU LENGTH MARK - {0x0C00, 0x0C00, prExtend}, // Mn TELUGU SIGN COMBINING CANDRABINDU ABOVE - {0x0C01, 0x0C03, prSpacingMark}, // Mc [3] TELUGU SIGN CANDRABINDU..TELUGU SIGN VISARGA - {0x0C04, 0x0C04, prExtend}, // Mn TELUGU SIGN COMBINING ANUSVARA ABOVE - {0x0C3E, 0x0C40, prExtend}, // Mn [3] TELUGU VOWEL SIGN AA..TELUGU VOWEL SIGN II - {0x0C41, 0x0C44, prSpacingMark}, // Mc [4] TELUGU VOWEL SIGN U..TELUGU VOWEL SIGN VOCALIC RR - {0x0C46, 0x0C48, prExtend}, // Mn [3] TELUGU VOWEL SIGN E..TELUGU VOWEL SIGN AI - {0x0C4A, 0x0C4D, prExtend}, // Mn [4] TELUGU VOWEL SIGN O..TELUGU SIGN VIRAMA - {0x0C55, 0x0C56, prExtend}, // Mn [2] TELUGU LENGTH MARK..TELUGU AI LENGTH MARK - {0x0C62, 0x0C63, prExtend}, // Mn [2] TELUGU VOWEL SIGN VOCALIC L..TELUGU VOWEL SIGN VOCALIC LL - {0x0C81, 0x0C81, prExtend}, // Mn KANNADA SIGN CANDRABINDU - {0x0C82, 0x0C83, prSpacingMark}, // Mc [2] KANNADA SIGN ANUSVARA..KANNADA SIGN VISARGA - {0x0CBC, 0x0CBC, prExtend}, // Mn KANNADA SIGN NUKTA - {0x0CBE, 0x0CBE, prSpacingMark}, // Mc KANNADA VOWEL SIGN AA - {0x0CBF, 0x0CBF, prExtend}, // Mn KANNADA VOWEL SIGN I - {0x0CC0, 0x0CC1, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN II..KANNADA VOWEL SIGN U - {0x0CC2, 0x0CC2, prExtend}, // Mc KANNADA VOWEL SIGN UU - {0x0CC3, 0x0CC4, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN VOCALIC R..KANNADA VOWEL SIGN VOCALIC RR - {0x0CC6, 0x0CC6, prExtend}, // Mn KANNADA VOWEL SIGN E - {0x0CC7, 0x0CC8, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN EE..KANNADA VOWEL SIGN AI - {0x0CCA, 0x0CCB, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN O..KANNADA VOWEL SIGN OO - {0x0CCC, 0x0CCD, prExtend}, // Mn [2] KANNADA VOWEL SIGN AU..KANNADA SIGN VIRAMA - {0x0CD5, 0x0CD6, prExtend}, // Mc [2] KANNADA LENGTH MARK..KANNADA AI LENGTH MARK - {0x0CE2, 0x0CE3, prExtend}, // Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL - {0x0D00, 0x0D01, prExtend}, // Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU - {0x0D02, 0x0D03, prSpacingMark}, // Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA - {0x0D3B, 0x0D3C, prExtend}, // Mn [2] MALAYALAM SIGN VERTICAL BAR VIRAMA..MALAYALAM SIGN CIRCULAR VIRAMA - {0x0D3E, 0x0D3E, prExtend}, // Mc MALAYALAM VOWEL SIGN AA - {0x0D3F, 0x0D40, prSpacingMark}, // Mc [2] MALAYALAM VOWEL SIGN I..MALAYALAM VOWEL SIGN II - {0x0D41, 0x0D44, prExtend}, // Mn [4] MALAYALAM VOWEL SIGN U..MALAYALAM VOWEL SIGN VOCALIC RR - {0x0D46, 0x0D48, prSpacingMark}, // Mc [3] MALAYALAM VOWEL SIGN E..MALAYALAM VOWEL SIGN AI - {0x0D4A, 0x0D4C, prSpacingMark}, // Mc [3] MALAYALAM VOWEL SIGN O..MALAYALAM VOWEL SIGN AU - {0x0D4D, 0x0D4D, prExtend}, // Mn MALAYALAM SIGN VIRAMA - {0x0D4E, 0x0D4E, prPreprend}, // Lo MALAYALAM LETTER DOT REPH - {0x0D57, 0x0D57, prExtend}, // Mc MALAYALAM AU LENGTH MARK - {0x0D62, 0x0D63, prExtend}, // Mn [2] MALAYALAM VOWEL SIGN VOCALIC L..MALAYALAM VOWEL SIGN VOCALIC LL - {0x0D82, 0x0D83, prSpacingMark}, // Mc [2] SINHALA SIGN ANUSVARAYA..SINHALA SIGN VISARGAYA - {0x0DCA, 0x0DCA, prExtend}, // Mn SINHALA SIGN AL-LAKUNA - {0x0DCF, 0x0DCF, prExtend}, // Mc SINHALA VOWEL SIGN AELA-PILLA - {0x0DD0, 0x0DD1, prSpacingMark}, // Mc [2] SINHALA VOWEL SIGN KETTI AEDA-PILLA..SINHALA VOWEL SIGN DIGA AEDA-PILLA - {0x0DD2, 0x0DD4, prExtend}, // Mn [3] SINHALA VOWEL SIGN KETTI IS-PILLA..SINHALA VOWEL SIGN KETTI PAA-PILLA - {0x0DD6, 0x0DD6, prExtend}, // Mn SINHALA VOWEL SIGN DIGA PAA-PILLA - {0x0DD8, 0x0DDE, prSpacingMark}, // Mc [7] SINHALA VOWEL SIGN GAETTA-PILLA..SINHALA VOWEL SIGN KOMBUVA HAA GAYANUKITTA - {0x0DDF, 0x0DDF, prExtend}, // Mc SINHALA VOWEL SIGN GAYANUKITTA - {0x0DF2, 0x0DF3, prSpacingMark}, // Mc [2] SINHALA VOWEL SIGN DIGA GAETTA-PILLA..SINHALA VOWEL SIGN DIGA GAYANUKITTA - {0x0E31, 0x0E31, prExtend}, // Mn THAI CHARACTER MAI HAN-AKAT - {0x0E33, 0x0E33, prSpacingMark}, // Lo THAI CHARACTER SARA AM - {0x0E34, 0x0E3A, prExtend}, // Mn [7] THAI CHARACTER SARA I..THAI CHARACTER PHINTHU - {0x0E47, 0x0E4E, prExtend}, // Mn [8] THAI CHARACTER MAITAIKHU..THAI CHARACTER YAMAKKAN - {0x0EB1, 0x0EB1, prExtend}, // Mn LAO VOWEL SIGN MAI KAN - {0x0EB3, 0x0EB3, prSpacingMark}, // Lo LAO VOWEL SIGN AM - {0x0EB4, 0x0EBC, prExtend}, // Mn [9] LAO VOWEL SIGN I..LAO SEMIVOWEL SIGN LO - {0x0EC8, 0x0ECD, prExtend}, // Mn [6] LAO TONE MAI EK..LAO NIGGAHITA - {0x0F18, 0x0F19, prExtend}, // Mn [2] TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN ASTROLOGICAL SIGN SDONG TSHUGS - {0x0F35, 0x0F35, prExtend}, // Mn TIBETAN MARK NGAS BZUNG NYI ZLA - {0x0F37, 0x0F37, prExtend}, // Mn TIBETAN MARK NGAS BZUNG SGOR RTAGS - {0x0F39, 0x0F39, prExtend}, // Mn TIBETAN MARK TSA -PHRU - {0x0F3E, 0x0F3F, prSpacingMark}, // Mc [2] TIBETAN SIGN YAR TSHES..TIBETAN SIGN MAR TSHES - {0x0F71, 0x0F7E, prExtend}, // Mn [14] TIBETAN VOWEL SIGN AA..TIBETAN SIGN RJES SU NGA RO - {0x0F7F, 0x0F7F, prSpacingMark}, // Mc TIBETAN SIGN RNAM BCAD - {0x0F80, 0x0F84, prExtend}, // Mn [5] TIBETAN VOWEL SIGN REVERSED I..TIBETAN MARK HALANTA - {0x0F86, 0x0F87, prExtend}, // Mn [2] TIBETAN SIGN LCI RTAGS..TIBETAN SIGN YANG RTAGS - {0x0F8D, 0x0F97, prExtend}, // Mn [11] TIBETAN SUBJOINED SIGN LCE TSA CAN..TIBETAN SUBJOINED LETTER JA - {0x0F99, 0x0FBC, prExtend}, // Mn [36] TIBETAN SUBJOINED LETTER NYA..TIBETAN SUBJOINED LETTER FIXED-FORM RA - {0x0FC6, 0x0FC6, prExtend}, // Mn TIBETAN SYMBOL PADMA GDAN - {0x102D, 0x1030, prExtend}, // Mn [4] MYANMAR VOWEL SIGN I..MYANMAR VOWEL SIGN UU - {0x1031, 0x1031, prSpacingMark}, // Mc MYANMAR VOWEL SIGN E - {0x1032, 0x1037, prExtend}, // Mn [6] MYANMAR VOWEL SIGN AI..MYANMAR SIGN DOT BELOW - {0x1039, 0x103A, prExtend}, // Mn [2] MYANMAR SIGN VIRAMA..MYANMAR SIGN ASAT - {0x103B, 0x103C, prSpacingMark}, // Mc [2] MYANMAR CONSONANT SIGN MEDIAL YA..MYANMAR CONSONANT SIGN MEDIAL RA - {0x103D, 0x103E, prExtend}, // Mn [2] MYANMAR CONSONANT SIGN MEDIAL WA..MYANMAR CONSONANT SIGN MEDIAL HA - {0x1056, 0x1057, prSpacingMark}, // Mc [2] MYANMAR VOWEL SIGN VOCALIC R..MYANMAR VOWEL SIGN VOCALIC RR - {0x1058, 0x1059, prExtend}, // Mn [2] MYANMAR VOWEL SIGN VOCALIC L..MYANMAR VOWEL SIGN VOCALIC LL - {0x105E, 0x1060, prExtend}, // Mn [3] MYANMAR CONSONANT SIGN MON MEDIAL NA..MYANMAR CONSONANT SIGN MON MEDIAL LA - {0x1071, 0x1074, prExtend}, // Mn [4] MYANMAR VOWEL SIGN GEBA KAREN I..MYANMAR VOWEL SIGN KAYAH EE - {0x1082, 0x1082, prExtend}, // Mn MYANMAR CONSONANT SIGN SHAN MEDIAL WA - {0x1084, 0x1084, prSpacingMark}, // Mc MYANMAR VOWEL SIGN SHAN E - {0x1085, 0x1086, prExtend}, // Mn [2] MYANMAR VOWEL SIGN SHAN E ABOVE..MYANMAR VOWEL SIGN SHAN FINAL Y - {0x108D, 0x108D, prExtend}, // Mn MYANMAR SIGN SHAN COUNCIL EMPHATIC TONE - {0x109D, 0x109D, prExtend}, // Mn MYANMAR VOWEL SIGN AITON AI - {0x1100, 0x115F, prL}, // Lo [96] HANGUL CHOSEONG KIYEOK..HANGUL CHOSEONG FILLER - {0x1160, 0x11A7, prV}, // Lo [72] HANGUL JUNGSEONG FILLER..HANGUL JUNGSEONG O-YAE - {0x11A8, 0x11FF, prT}, // Lo [88] HANGUL JONGSEONG KIYEOK..HANGUL JONGSEONG SSANGNIEUN - {0x135D, 0x135F, prExtend}, // Mn [3] ETHIOPIC COMBINING GEMINATION AND VOWEL LENGTH MARK..ETHIOPIC COMBINING GEMINATION MARK - {0x1712, 0x1714, prExtend}, // Mn [3] TAGALOG VOWEL SIGN I..TAGALOG SIGN VIRAMA - {0x1732, 0x1734, prExtend}, // Mn [3] HANUNOO VOWEL SIGN I..HANUNOO SIGN PAMUDPOD - {0x1752, 0x1753, prExtend}, // Mn [2] BUHID VOWEL SIGN I..BUHID VOWEL SIGN U - {0x1772, 0x1773, prExtend}, // Mn [2] TAGBANWA VOWEL SIGN I..TAGBANWA VOWEL SIGN U - {0x17B4, 0x17B5, prExtend}, // Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA - {0x17B6, 0x17B6, prSpacingMark}, // Mc KHMER VOWEL SIGN AA - {0x17B7, 0x17BD, prExtend}, // Mn [7] KHMER VOWEL SIGN I..KHMER VOWEL SIGN UA - {0x17BE, 0x17C5, prSpacingMark}, // Mc [8] KHMER VOWEL SIGN OE..KHMER VOWEL SIGN AU - {0x17C6, 0x17C6, prExtend}, // Mn KHMER SIGN NIKAHIT - {0x17C7, 0x17C8, prSpacingMark}, // Mc [2] KHMER SIGN REAHMUK..KHMER SIGN YUUKALEAPINTU - {0x17C9, 0x17D3, prExtend}, // Mn [11] KHMER SIGN MUUSIKATOAN..KHMER SIGN BATHAMASAT - {0x17DD, 0x17DD, prExtend}, // Mn KHMER SIGN ATTHACAN - {0x180B, 0x180D, prExtend}, // Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE - {0x180E, 0x180E, prControl}, // Cf MONGOLIAN VOWEL SEPARATOR - {0x1885, 0x1886, prExtend}, // Mn [2] MONGOLIAN LETTER ALI GALI BALUDA..MONGOLIAN LETTER ALI GALI THREE BALUDA - {0x18A9, 0x18A9, prExtend}, // Mn MONGOLIAN LETTER ALI GALI DAGALGA - {0x1920, 0x1922, prExtend}, // Mn [3] LIMBU VOWEL SIGN A..LIMBU VOWEL SIGN U - {0x1923, 0x1926, prSpacingMark}, // Mc [4] LIMBU VOWEL SIGN EE..LIMBU VOWEL SIGN AU - {0x1927, 0x1928, prExtend}, // Mn [2] LIMBU VOWEL SIGN E..LIMBU VOWEL SIGN O - {0x1929, 0x192B, prSpacingMark}, // Mc [3] LIMBU SUBJOINED LETTER YA..LIMBU SUBJOINED LETTER WA - {0x1930, 0x1931, prSpacingMark}, // Mc [2] LIMBU SMALL LETTER KA..LIMBU SMALL LETTER NGA - {0x1932, 0x1932, prExtend}, // Mn LIMBU SMALL LETTER ANUSVARA - {0x1933, 0x1938, prSpacingMark}, // Mc [6] LIMBU SMALL LETTER TA..LIMBU SMALL LETTER LA - {0x1939, 0x193B, prExtend}, // Mn [3] LIMBU SIGN MUKPHRENG..LIMBU SIGN SA-I - {0x1A17, 0x1A18, prExtend}, // Mn [2] BUGINESE VOWEL SIGN I..BUGINESE VOWEL SIGN U - {0x1A19, 0x1A1A, prSpacingMark}, // Mc [2] BUGINESE VOWEL SIGN E..BUGINESE VOWEL SIGN O - {0x1A1B, 0x1A1B, prExtend}, // Mn BUGINESE VOWEL SIGN AE - {0x1A55, 0x1A55, prSpacingMark}, // Mc TAI THAM CONSONANT SIGN MEDIAL RA - {0x1A56, 0x1A56, prExtend}, // Mn TAI THAM CONSONANT SIGN MEDIAL LA - {0x1A57, 0x1A57, prSpacingMark}, // Mc TAI THAM CONSONANT SIGN LA TANG LAI - {0x1A58, 0x1A5E, prExtend}, // Mn [7] TAI THAM SIGN MAI KANG LAI..TAI THAM CONSONANT SIGN SA - {0x1A60, 0x1A60, prExtend}, // Mn TAI THAM SIGN SAKOT - {0x1A62, 0x1A62, prExtend}, // Mn TAI THAM VOWEL SIGN MAI SAT - {0x1A65, 0x1A6C, prExtend}, // Mn [8] TAI THAM VOWEL SIGN I..TAI THAM VOWEL SIGN OA BELOW - {0x1A6D, 0x1A72, prSpacingMark}, // Mc [6] TAI THAM VOWEL SIGN OY..TAI THAM VOWEL SIGN THAM AI - {0x1A73, 0x1A7C, prExtend}, // Mn [10] TAI THAM VOWEL SIGN OA ABOVE..TAI THAM SIGN KHUEN-LUE KARAN - {0x1A7F, 0x1A7F, prExtend}, // Mn TAI THAM COMBINING CRYPTOGRAMMIC DOT - {0x1AB0, 0x1ABD, prExtend}, // Mn [14] COMBINING DOUBLED CIRCUMFLEX ACCENT..COMBINING PARENTHESES BELOW - {0x1ABE, 0x1ABE, prExtend}, // Me COMBINING PARENTHESES OVERLAY - {0x1B00, 0x1B03, prExtend}, // Mn [4] BALINESE SIGN ULU RICEM..BALINESE SIGN SURANG - {0x1B04, 0x1B04, prSpacingMark}, // Mc BALINESE SIGN BISAH - {0x1B34, 0x1B34, prExtend}, // Mn BALINESE SIGN REREKAN - {0x1B35, 0x1B35, prExtend}, // Mc BALINESE VOWEL SIGN TEDUNG - {0x1B36, 0x1B3A, prExtend}, // Mn [5] BALINESE VOWEL SIGN ULU..BALINESE VOWEL SIGN RA REPA - {0x1B3B, 0x1B3B, prSpacingMark}, // Mc BALINESE VOWEL SIGN RA REPA TEDUNG - {0x1B3C, 0x1B3C, prExtend}, // Mn BALINESE VOWEL SIGN LA LENGA - {0x1B3D, 0x1B41, prSpacingMark}, // Mc [5] BALINESE VOWEL SIGN LA LENGA TEDUNG..BALINESE VOWEL SIGN TALING REPA TEDUNG - {0x1B42, 0x1B42, prExtend}, // Mn BALINESE VOWEL SIGN PEPET - {0x1B43, 0x1B44, prSpacingMark}, // Mc [2] BALINESE VOWEL SIGN PEPET TEDUNG..BALINESE ADEG ADEG - {0x1B6B, 0x1B73, prExtend}, // Mn [9] BALINESE MUSICAL SYMBOL COMBINING TEGEH..BALINESE MUSICAL SYMBOL COMBINING GONG - {0x1B80, 0x1B81, prExtend}, // Mn [2] SUNDANESE SIGN PANYECEK..SUNDANESE SIGN PANGLAYAR - {0x1B82, 0x1B82, prSpacingMark}, // Mc SUNDANESE SIGN PANGWISAD - {0x1BA1, 0x1BA1, prSpacingMark}, // Mc SUNDANESE CONSONANT SIGN PAMINGKAL - {0x1BA2, 0x1BA5, prExtend}, // Mn [4] SUNDANESE CONSONANT SIGN PANYAKRA..SUNDANESE VOWEL SIGN PANYUKU - {0x1BA6, 0x1BA7, prSpacingMark}, // Mc [2] SUNDANESE VOWEL SIGN PANAELAENG..SUNDANESE VOWEL SIGN PANOLONG - {0x1BA8, 0x1BA9, prExtend}, // Mn [2] SUNDANESE VOWEL SIGN PAMEPET..SUNDANESE VOWEL SIGN PANEULEUNG - {0x1BAA, 0x1BAA, prSpacingMark}, // Mc SUNDANESE SIGN PAMAAEH - {0x1BAB, 0x1BAD, prExtend}, // Mn [3] SUNDANESE SIGN VIRAMA..SUNDANESE CONSONANT SIGN PASANGAN WA - {0x1BE6, 0x1BE6, prExtend}, // Mn BATAK SIGN TOMPI - {0x1BE7, 0x1BE7, prSpacingMark}, // Mc BATAK VOWEL SIGN E - {0x1BE8, 0x1BE9, prExtend}, // Mn [2] BATAK VOWEL SIGN PAKPAK E..BATAK VOWEL SIGN EE - {0x1BEA, 0x1BEC, prSpacingMark}, // Mc [3] BATAK VOWEL SIGN I..BATAK VOWEL SIGN O - {0x1BED, 0x1BED, prExtend}, // Mn BATAK VOWEL SIGN KARO O - {0x1BEE, 0x1BEE, prSpacingMark}, // Mc BATAK VOWEL SIGN U - {0x1BEF, 0x1BF1, prExtend}, // Mn [3] BATAK VOWEL SIGN U FOR SIMALUNGUN SA..BATAK CONSONANT SIGN H - {0x1BF2, 0x1BF3, prSpacingMark}, // Mc [2] BATAK PANGOLAT..BATAK PANONGONAN - {0x1C24, 0x1C2B, prSpacingMark}, // Mc [8] LEPCHA SUBJOINED LETTER YA..LEPCHA VOWEL SIGN UU - {0x1C2C, 0x1C33, prExtend}, // Mn [8] LEPCHA VOWEL SIGN E..LEPCHA CONSONANT SIGN T - {0x1C34, 0x1C35, prSpacingMark}, // Mc [2] LEPCHA CONSONANT SIGN NYIN-DO..LEPCHA CONSONANT SIGN KANG - {0x1C36, 0x1C37, prExtend}, // Mn [2] LEPCHA SIGN RAN..LEPCHA SIGN NUKTA - {0x1CD0, 0x1CD2, prExtend}, // Mn [3] VEDIC TONE KARSHANA..VEDIC TONE PRENKHA - {0x1CD4, 0x1CE0, prExtend}, // Mn [13] VEDIC SIGN YAJURVEDIC MIDLINE SVARITA..VEDIC TONE RIGVEDIC KASHMIRI INDEPENDENT SVARITA - {0x1CE1, 0x1CE1, prSpacingMark}, // Mc VEDIC TONE ATHARVAVEDIC INDEPENDENT SVARITA - {0x1CE2, 0x1CE8, prExtend}, // Mn [7] VEDIC SIGN VISARGA SVARITA..VEDIC SIGN VISARGA ANUDATTA WITH TAIL - {0x1CED, 0x1CED, prExtend}, // Mn VEDIC SIGN TIRYAK - {0x1CF4, 0x1CF4, prExtend}, // Mn VEDIC TONE CANDRA ABOVE - {0x1CF7, 0x1CF7, prSpacingMark}, // Mc VEDIC SIGN ATIKRAMA - {0x1CF8, 0x1CF9, prExtend}, // Mn [2] VEDIC TONE RING ABOVE..VEDIC TONE DOUBLE RING ABOVE - {0x1DC0, 0x1DF9, prExtend}, // Mn [58] COMBINING DOTTED GRAVE ACCENT..COMBINING WIDE INVERTED BRIDGE BELOW - {0x1DFB, 0x1DFF, prExtend}, // Mn [5] COMBINING DELETION MARK..COMBINING RIGHT ARROWHEAD AND DOWN ARROWHEAD BELOW - {0x200B, 0x200B, prControl}, // Cf ZERO WIDTH SPACE - {0x200C, 0x200C, prExtend}, // Cf ZERO WIDTH NON-JOINER - {0x200D, 0x200D, prZWJ}, // Cf ZERO WIDTH JOINER - {0x200E, 0x200F, prControl}, // Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT MARK - {0x2028, 0x2028, prControl}, // Zl LINE SEPARATOR - {0x2029, 0x2029, prControl}, // Zp PARAGRAPH SEPARATOR - {0x202A, 0x202E, prControl}, // Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE - {0x203C, 0x203C, prExtendedPictographic}, // 1.1 [1] (‼️) double exclamation mark - {0x2049, 0x2049, prExtendedPictographic}, // 3.0 [1] (⁉️) exclamation question mark - {0x2060, 0x2064, prControl}, // Cf [5] WORD JOINER..INVISIBLE PLUS - {0x2065, 0x2065, prControl}, // Cn - {0x2066, 0x206F, prControl}, // Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES - {0x20D0, 0x20DC, prExtend}, // Mn [13] COMBINING LEFT HARPOON ABOVE..COMBINING FOUR DOTS ABOVE - {0x20DD, 0x20E0, prExtend}, // Me [4] COMBINING ENCLOSING CIRCLE..COMBINING ENCLOSING CIRCLE BACKSLASH - {0x20E1, 0x20E1, prExtend}, // Mn COMBINING LEFT RIGHT ARROW ABOVE - {0x20E2, 0x20E4, prExtend}, // Me [3] COMBINING ENCLOSING SCREEN..COMBINING ENCLOSING UPWARD POINTING TRIANGLE - {0x20E5, 0x20F0, prExtend}, // Mn [12] COMBINING REVERSE SOLIDUS OVERLAY..COMBINING ASTERISK ABOVE - {0x2122, 0x2122, prExtendedPictographic}, // 1.1 [1] (™️) trade mark - {0x2139, 0x2139, prExtendedPictographic}, // 3.0 [1] (ℹ️) information - {0x2194, 0x2199, prExtendedPictographic}, // 1.1 [6] (↔️..↙️) left-right arrow..down-left arrow - {0x21A9, 0x21AA, prExtendedPictographic}, // 1.1 [2] (↩️..↪️) right arrow curving left..left arrow curving right - {0x231A, 0x231B, prExtendedPictographic}, // 1.1 [2] (⌚..⌛) watch..hourglass done - {0x2328, 0x2328, prExtendedPictographic}, // 1.1 [1] (⌨️) keyboard - {0x2388, 0x2388, prExtendedPictographic}, // 3.0 [1] (⎈) HELM SYMBOL - {0x23CF, 0x23CF, prExtendedPictographic}, // 4.0 [1] (⏏️) eject button - {0x23E9, 0x23F3, prExtendedPictographic}, // 6.0 [11] (⏩..⏳) fast-forward button..hourglass not done - {0x23F8, 0x23FA, prExtendedPictographic}, // 7.0 [3] (⏸️..⏺️) pause button..record button - {0x24C2, 0x24C2, prExtendedPictographic}, // 1.1 [1] (Ⓜ️) circled M - {0x25AA, 0x25AB, prExtendedPictographic}, // 1.1 [2] (▪️..▫️) black small square..white small square - {0x25B6, 0x25B6, prExtendedPictographic}, // 1.1 [1] (▶️) play button - {0x25C0, 0x25C0, prExtendedPictographic}, // 1.1 [1] (◀️) reverse button - {0x25FB, 0x25FE, prExtendedPictographic}, // 3.2 [4] (◻️..◾) white medium square..black medium-small square - {0x2600, 0x2605, prExtendedPictographic}, // 1.1 [6] (☀️..★) sun..BLACK STAR - {0x2607, 0x2612, prExtendedPictographic}, // 1.1 [12] (☇..☒) LIGHTNING..BALLOT BOX WITH X - {0x2614, 0x2615, prExtendedPictographic}, // 4.0 [2] (☔..☕) umbrella with rain drops..hot beverage - {0x2616, 0x2617, prExtendedPictographic}, // 3.2 [2] (☖..☗) WHITE SHOGI PIECE..BLACK SHOGI PIECE - {0x2618, 0x2618, prExtendedPictographic}, // 4.1 [1] (☘️) shamrock - {0x2619, 0x2619, prExtendedPictographic}, // 3.0 [1] (☙) REVERSED ROTATED FLORAL HEART BULLET - {0x261A, 0x266F, prExtendedPictographic}, // 1.1 [86] (☚..♯) BLACK LEFT POINTING INDEX..MUSIC SHARP SIGN - {0x2670, 0x2671, prExtendedPictographic}, // 3.0 [2] (♰..♱) WEST SYRIAC CROSS..EAST SYRIAC CROSS - {0x2672, 0x267D, prExtendedPictographic}, // 3.2 [12] (♲..♽) UNIVERSAL RECYCLING SYMBOL..PARTIALLY-RECYCLED PAPER SYMBOL - {0x267E, 0x267F, prExtendedPictographic}, // 4.1 [2] (♾️..♿) infinity..wheelchair symbol - {0x2680, 0x2685, prExtendedPictographic}, // 3.2 [6] (⚀..⚅) DIE FACE-1..DIE FACE-6 - {0x2690, 0x2691, prExtendedPictographic}, // 4.0 [2] (⚐..⚑) WHITE FLAG..BLACK FLAG - {0x2692, 0x269C, prExtendedPictographic}, // 4.1 [11] (⚒️..⚜️) hammer and pick..fleur-de-lis - {0x269D, 0x269D, prExtendedPictographic}, // 5.1 [1] (⚝) OUTLINED WHITE STAR - {0x269E, 0x269F, prExtendedPictographic}, // 5.2 [2] (⚞..⚟) THREE LINES CONVERGING RIGHT..THREE LINES CONVERGING LEFT - {0x26A0, 0x26A1, prExtendedPictographic}, // 4.0 [2] (⚠️..⚡) warning..high voltage - {0x26A2, 0x26B1, prExtendedPictographic}, // 4.1 [16] (⚢..⚱️) DOUBLED FEMALE SIGN..funeral urn - {0x26B2, 0x26B2, prExtendedPictographic}, // 5.0 [1] (⚲) NEUTER - {0x26B3, 0x26BC, prExtendedPictographic}, // 5.1 [10] (⚳..⚼) CERES..SESQUIQUADRATE - {0x26BD, 0x26BF, prExtendedPictographic}, // 5.2 [3] (⚽..⚿) soccer ball..SQUARED KEY - {0x26C0, 0x26C3, prExtendedPictographic}, // 5.1 [4] (⛀..⛃) WHITE DRAUGHTS MAN..BLACK DRAUGHTS KING - {0x26C4, 0x26CD, prExtendedPictographic}, // 5.2 [10] (⛄..⛍) snowman without snow..DISABLED CAR - {0x26CE, 0x26CE, prExtendedPictographic}, // 6.0 [1] (⛎) Ophiuchus - {0x26CF, 0x26E1, prExtendedPictographic}, // 5.2 [19] (⛏️..⛡) pick..RESTRICTED LEFT ENTRY-2 - {0x26E2, 0x26E2, prExtendedPictographic}, // 6.0 [1] (⛢) ASTRONOMICAL SYMBOL FOR URANUS - {0x26E3, 0x26E3, prExtendedPictographic}, // 5.2 [1] (⛣) HEAVY CIRCLE WITH STROKE AND TWO DOTS ABOVE - {0x26E4, 0x26E7, prExtendedPictographic}, // 6.0 [4] (⛤..⛧) PENTAGRAM..INVERTED PENTAGRAM - {0x26E8, 0x26FF, prExtendedPictographic}, // 5.2 [24] (⛨..⛿) BLACK CROSS ON SHIELD..WHITE FLAG WITH HORIZONTAL MIDDLE BLACK STRIPE - {0x2700, 0x2700, prExtendedPictographic}, // 7.0 [1] (✀) BLACK SAFETY SCISSORS - {0x2701, 0x2704, prExtendedPictographic}, // 1.1 [4] (✁..✄) UPPER BLADE SCISSORS..WHITE SCISSORS - {0x2705, 0x2705, prExtendedPictographic}, // 6.0 [1] (✅) check mark button - {0x2708, 0x2709, prExtendedPictographic}, // 1.1 [2] (✈️..✉️) airplane..envelope - {0x270A, 0x270B, prExtendedPictographic}, // 6.0 [2] (✊..✋) raised fist..raised hand - {0x270C, 0x2712, prExtendedPictographic}, // 1.1 [7] (✌️..✒️) victory hand..black nib - {0x2714, 0x2714, prExtendedPictographic}, // 1.1 [1] (✔️) check mark - {0x2716, 0x2716, prExtendedPictographic}, // 1.1 [1] (✖️) multiplication sign - {0x271D, 0x271D, prExtendedPictographic}, // 1.1 [1] (✝️) latin cross - {0x2721, 0x2721, prExtendedPictographic}, // 1.1 [1] (✡️) star of David - {0x2728, 0x2728, prExtendedPictographic}, // 6.0 [1] (✨) sparkles - {0x2733, 0x2734, prExtendedPictographic}, // 1.1 [2] (✳️..✴️) eight-spoked asterisk..eight-pointed star - {0x2744, 0x2744, prExtendedPictographic}, // 1.1 [1] (❄️) snowflake - {0x2747, 0x2747, prExtendedPictographic}, // 1.1 [1] (❇️) sparkle - {0x274C, 0x274C, prExtendedPictographic}, // 6.0 [1] (❌) cross mark - {0x274E, 0x274E, prExtendedPictographic}, // 6.0 [1] (❎) cross mark button - {0x2753, 0x2755, prExtendedPictographic}, // 6.0 [3] (❓..❕) question mark..white exclamation mark - {0x2757, 0x2757, prExtendedPictographic}, // 5.2 [1] (❗) exclamation mark - {0x2763, 0x2767, prExtendedPictographic}, // 1.1 [5] (❣️..❧) heart exclamation..ROTATED FLORAL HEART BULLET - {0x2795, 0x2797, prExtendedPictographic}, // 6.0 [3] (➕..➗) plus sign..division sign - {0x27A1, 0x27A1, prExtendedPictographic}, // 1.1 [1] (➡️) right arrow - {0x27B0, 0x27B0, prExtendedPictographic}, // 6.0 [1] (➰) curly loop - {0x27BF, 0x27BF, prExtendedPictographic}, // 6.0 [1] (➿) double curly loop - {0x2934, 0x2935, prExtendedPictographic}, // 3.2 [2] (⤴️..⤵️) right arrow curving up..right arrow curving down - {0x2B05, 0x2B07, prExtendedPictographic}, // 4.0 [3] (⬅️..⬇️) left arrow..down arrow - {0x2B1B, 0x2B1C, prExtendedPictographic}, // 5.1 [2] (⬛..⬜) black large square..white large square - {0x2B50, 0x2B50, prExtendedPictographic}, // 5.1 [1] (⭐) star - {0x2B55, 0x2B55, prExtendedPictographic}, // 5.2 [1] (⭕) hollow red circle - {0x2CEF, 0x2CF1, prExtend}, // Mn [3] COPTIC COMBINING NI ABOVE..COPTIC COMBINING SPIRITUS LENIS - {0x2D7F, 0x2D7F, prExtend}, // Mn TIFINAGH CONSONANT JOINER - {0x2DE0, 0x2DFF, prExtend}, // Mn [32] COMBINING CYRILLIC LETTER BE..COMBINING CYRILLIC LETTER IOTIFIED BIG YUS - {0x302A, 0x302D, prExtend}, // Mn [4] IDEOGRAPHIC LEVEL TONE MARK..IDEOGRAPHIC ENTERING TONE MARK - {0x302E, 0x302F, prExtend}, // Mc [2] HANGUL SINGLE DOT TONE MARK..HANGUL DOUBLE DOT TONE MARK - {0x3030, 0x3030, prExtendedPictographic}, // 1.1 [1] (〰️) wavy dash - {0x303D, 0x303D, prExtendedPictographic}, // 3.2 [1] (〽️) part alternation mark - {0x3099, 0x309A, prExtend}, // Mn [2] COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK..COMBINING KATAKANA-HIRAGANA SEMI-VOICED SOUND MARK - {0x3297, 0x3297, prExtendedPictographic}, // 1.1 [1] (㊗️) Japanese “congratulations” button - {0x3299, 0x3299, prExtendedPictographic}, // 1.1 [1] (㊙️) Japanese “secret” button - {0xA66F, 0xA66F, prExtend}, // Mn COMBINING CYRILLIC VZMET - {0xA670, 0xA672, prExtend}, // Me [3] COMBINING CYRILLIC TEN MILLIONS SIGN..COMBINING CYRILLIC THOUSAND MILLIONS SIGN - {0xA674, 0xA67D, prExtend}, // Mn [10] COMBINING CYRILLIC LETTER UKRAINIAN IE..COMBINING CYRILLIC PAYEROK - {0xA69E, 0xA69F, prExtend}, // Mn [2] COMBINING CYRILLIC LETTER EF..COMBINING CYRILLIC LETTER IOTIFIED E - {0xA6F0, 0xA6F1, prExtend}, // Mn [2] BAMUM COMBINING MARK KOQNDON..BAMUM COMBINING MARK TUKWENTIS - {0xA802, 0xA802, prExtend}, // Mn SYLOTI NAGRI SIGN DVISVARA - {0xA806, 0xA806, prExtend}, // Mn SYLOTI NAGRI SIGN HASANTA - {0xA80B, 0xA80B, prExtend}, // Mn SYLOTI NAGRI SIGN ANUSVARA - {0xA823, 0xA824, prSpacingMark}, // Mc [2] SYLOTI NAGRI VOWEL SIGN A..SYLOTI NAGRI VOWEL SIGN I - {0xA825, 0xA826, prExtend}, // Mn [2] SYLOTI NAGRI VOWEL SIGN U..SYLOTI NAGRI VOWEL SIGN E - {0xA827, 0xA827, prSpacingMark}, // Mc SYLOTI NAGRI VOWEL SIGN OO - {0xA880, 0xA881, prSpacingMark}, // Mc [2] SAURASHTRA SIGN ANUSVARA..SAURASHTRA SIGN VISARGA - {0xA8B4, 0xA8C3, prSpacingMark}, // Mc [16] SAURASHTRA CONSONANT SIGN HAARU..SAURASHTRA VOWEL SIGN AU - {0xA8C4, 0xA8C5, prExtend}, // Mn [2] SAURASHTRA SIGN VIRAMA..SAURASHTRA SIGN CANDRABINDU - {0xA8E0, 0xA8F1, prExtend}, // Mn [18] COMBINING DEVANAGARI DIGIT ZERO..COMBINING DEVANAGARI SIGN AVAGRAHA - {0xA8FF, 0xA8FF, prExtend}, // Mn DEVANAGARI VOWEL SIGN AY - {0xA926, 0xA92D, prExtend}, // Mn [8] KAYAH LI VOWEL UE..KAYAH LI TONE CALYA PLOPHU - {0xA947, 0xA951, prExtend}, // Mn [11] REJANG VOWEL SIGN I..REJANG CONSONANT SIGN R - {0xA952, 0xA953, prSpacingMark}, // Mc [2] REJANG CONSONANT SIGN H..REJANG VIRAMA - {0xA960, 0xA97C, prL}, // Lo [29] HANGUL CHOSEONG TIKEUT-MIEUM..HANGUL CHOSEONG SSANGYEORINHIEUH - {0xA980, 0xA982, prExtend}, // Mn [3] JAVANESE SIGN PANYANGGA..JAVANESE SIGN LAYAR - {0xA983, 0xA983, prSpacingMark}, // Mc JAVANESE SIGN WIGNYAN - {0xA9B3, 0xA9B3, prExtend}, // Mn JAVANESE SIGN CECAK TELU - {0xA9B4, 0xA9B5, prSpacingMark}, // Mc [2] JAVANESE VOWEL SIGN TARUNG..JAVANESE VOWEL SIGN TOLONG - {0xA9B6, 0xA9B9, prExtend}, // Mn [4] JAVANESE VOWEL SIGN WULU..JAVANESE VOWEL SIGN SUKU MENDUT - {0xA9BA, 0xA9BB, prSpacingMark}, // Mc [2] JAVANESE VOWEL SIGN TALING..JAVANESE VOWEL SIGN DIRGA MURE - {0xA9BC, 0xA9BD, prExtend}, // Mn [2] JAVANESE VOWEL SIGN PEPET..JAVANESE CONSONANT SIGN KERET - {0xA9BE, 0xA9C0, prSpacingMark}, // Mc [3] JAVANESE CONSONANT SIGN PENGKAL..JAVANESE PANGKON - {0xA9E5, 0xA9E5, prExtend}, // Mn MYANMAR SIGN SHAN SAW - {0xAA29, 0xAA2E, prExtend}, // Mn [6] CHAM VOWEL SIGN AA..CHAM VOWEL SIGN OE - {0xAA2F, 0xAA30, prSpacingMark}, // Mc [2] CHAM VOWEL SIGN O..CHAM VOWEL SIGN AI - {0xAA31, 0xAA32, prExtend}, // Mn [2] CHAM VOWEL SIGN AU..CHAM VOWEL SIGN UE - {0xAA33, 0xAA34, prSpacingMark}, // Mc [2] CHAM CONSONANT SIGN YA..CHAM CONSONANT SIGN RA - {0xAA35, 0xAA36, prExtend}, // Mn [2] CHAM CONSONANT SIGN LA..CHAM CONSONANT SIGN WA - {0xAA43, 0xAA43, prExtend}, // Mn CHAM CONSONANT SIGN FINAL NG - {0xAA4C, 0xAA4C, prExtend}, // Mn CHAM CONSONANT SIGN FINAL M - {0xAA4D, 0xAA4D, prSpacingMark}, // Mc CHAM CONSONANT SIGN FINAL H - {0xAA7C, 0xAA7C, prExtend}, // Mn MYANMAR SIGN TAI LAING TONE-2 - {0xAAB0, 0xAAB0, prExtend}, // Mn TAI VIET MAI KANG - {0xAAB2, 0xAAB4, prExtend}, // Mn [3] TAI VIET VOWEL I..TAI VIET VOWEL U - {0xAAB7, 0xAAB8, prExtend}, // Mn [2] TAI VIET MAI KHIT..TAI VIET VOWEL IA - {0xAABE, 0xAABF, prExtend}, // Mn [2] TAI VIET VOWEL AM..TAI VIET TONE MAI EK - {0xAAC1, 0xAAC1, prExtend}, // Mn TAI VIET TONE MAI THO - {0xAAEB, 0xAAEB, prSpacingMark}, // Mc MEETEI MAYEK VOWEL SIGN II - {0xAAEC, 0xAAED, prExtend}, // Mn [2] MEETEI MAYEK VOWEL SIGN UU..MEETEI MAYEK VOWEL SIGN AAI - {0xAAEE, 0xAAEF, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN AU..MEETEI MAYEK VOWEL SIGN AAU - {0xAAF5, 0xAAF5, prSpacingMark}, // Mc MEETEI MAYEK VOWEL SIGN VISARGA - {0xAAF6, 0xAAF6, prExtend}, // Mn MEETEI MAYEK VIRAMA - {0xABE3, 0xABE4, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN ONAP..MEETEI MAYEK VOWEL SIGN INAP - {0xABE5, 0xABE5, prExtend}, // Mn MEETEI MAYEK VOWEL SIGN ANAP - {0xABE6, 0xABE7, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN YENAP..MEETEI MAYEK VOWEL SIGN SOUNAP - {0xABE8, 0xABE8, prExtend}, // Mn MEETEI MAYEK VOWEL SIGN UNAP - {0xABE9, 0xABEA, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN CHEINAP..MEETEI MAYEK VOWEL SIGN NUNG - {0xABEC, 0xABEC, prSpacingMark}, // Mc MEETEI MAYEK LUM IYEK - {0xABED, 0xABED, prExtend}, // Mn MEETEI MAYEK APUN IYEK - {0xAC00, 0xAC00, prLV}, // Lo HANGUL SYLLABLE GA - {0xAC01, 0xAC1B, prLVT}, // Lo [27] HANGUL SYLLABLE GAG..HANGUL SYLLABLE GAH - {0xAC1C, 0xAC1C, prLV}, // Lo HANGUL SYLLABLE GAE - {0xAC1D, 0xAC37, prLVT}, // Lo [27] HANGUL SYLLABLE GAEG..HANGUL SYLLABLE GAEH - {0xAC38, 0xAC38, prLV}, // Lo HANGUL SYLLABLE GYA - {0xAC39, 0xAC53, prLVT}, // Lo [27] HANGUL SYLLABLE GYAG..HANGUL SYLLABLE GYAH - {0xAC54, 0xAC54, prLV}, // Lo HANGUL SYLLABLE GYAE - {0xAC55, 0xAC6F, prLVT}, // Lo [27] HANGUL SYLLABLE GYAEG..HANGUL SYLLABLE GYAEH - {0xAC70, 0xAC70, prLV}, // Lo HANGUL SYLLABLE GEO - {0xAC71, 0xAC8B, prLVT}, // Lo [27] HANGUL SYLLABLE GEOG..HANGUL SYLLABLE GEOH - {0xAC8C, 0xAC8C, prLV}, // Lo HANGUL SYLLABLE GE - {0xAC8D, 0xACA7, prLVT}, // Lo [27] HANGUL SYLLABLE GEG..HANGUL SYLLABLE GEH - {0xACA8, 0xACA8, prLV}, // Lo HANGUL SYLLABLE GYEO - {0xACA9, 0xACC3, prLVT}, // Lo [27] HANGUL SYLLABLE GYEOG..HANGUL SYLLABLE GYEOH - {0xACC4, 0xACC4, prLV}, // Lo HANGUL SYLLABLE GYE - {0xACC5, 0xACDF, prLVT}, // Lo [27] HANGUL SYLLABLE GYEG..HANGUL SYLLABLE GYEH - {0xACE0, 0xACE0, prLV}, // Lo HANGUL SYLLABLE GO - {0xACE1, 0xACFB, prLVT}, // Lo [27] HANGUL SYLLABLE GOG..HANGUL SYLLABLE GOH - {0xACFC, 0xACFC, prLV}, // Lo HANGUL SYLLABLE GWA - {0xACFD, 0xAD17, prLVT}, // Lo [27] HANGUL SYLLABLE GWAG..HANGUL SYLLABLE GWAH - {0xAD18, 0xAD18, prLV}, // Lo HANGUL SYLLABLE GWAE - {0xAD19, 0xAD33, prLVT}, // Lo [27] HANGUL SYLLABLE GWAEG..HANGUL SYLLABLE GWAEH - {0xAD34, 0xAD34, prLV}, // Lo HANGUL SYLLABLE GOE - {0xAD35, 0xAD4F, prLVT}, // Lo [27] HANGUL SYLLABLE GOEG..HANGUL SYLLABLE GOEH - {0xAD50, 0xAD50, prLV}, // Lo HANGUL SYLLABLE GYO - {0xAD51, 0xAD6B, prLVT}, // Lo [27] HANGUL SYLLABLE GYOG..HANGUL SYLLABLE GYOH - {0xAD6C, 0xAD6C, prLV}, // Lo HANGUL SYLLABLE GU - {0xAD6D, 0xAD87, prLVT}, // Lo [27] HANGUL SYLLABLE GUG..HANGUL SYLLABLE GUH - {0xAD88, 0xAD88, prLV}, // Lo HANGUL SYLLABLE GWEO - {0xAD89, 0xADA3, prLVT}, // Lo [27] HANGUL SYLLABLE GWEOG..HANGUL SYLLABLE GWEOH - {0xADA4, 0xADA4, prLV}, // Lo HANGUL SYLLABLE GWE - {0xADA5, 0xADBF, prLVT}, // Lo [27] HANGUL SYLLABLE GWEG..HANGUL SYLLABLE GWEH - {0xADC0, 0xADC0, prLV}, // Lo HANGUL SYLLABLE GWI - {0xADC1, 0xADDB, prLVT}, // Lo [27] HANGUL SYLLABLE GWIG..HANGUL SYLLABLE GWIH - {0xADDC, 0xADDC, prLV}, // Lo HANGUL SYLLABLE GYU - {0xADDD, 0xADF7, prLVT}, // Lo [27] HANGUL SYLLABLE GYUG..HANGUL SYLLABLE GYUH - {0xADF8, 0xADF8, prLV}, // Lo HANGUL SYLLABLE GEU - {0xADF9, 0xAE13, prLVT}, // Lo [27] HANGUL SYLLABLE GEUG..HANGUL SYLLABLE GEUH - {0xAE14, 0xAE14, prLV}, // Lo HANGUL SYLLABLE GYI - {0xAE15, 0xAE2F, prLVT}, // Lo [27] HANGUL SYLLABLE GYIG..HANGUL SYLLABLE GYIH - {0xAE30, 0xAE30, prLV}, // Lo HANGUL SYLLABLE GI - {0xAE31, 0xAE4B, prLVT}, // Lo [27] HANGUL SYLLABLE GIG..HANGUL SYLLABLE GIH - {0xAE4C, 0xAE4C, prLV}, // Lo HANGUL SYLLABLE GGA - {0xAE4D, 0xAE67, prLVT}, // Lo [27] HANGUL SYLLABLE GGAG..HANGUL SYLLABLE GGAH - {0xAE68, 0xAE68, prLV}, // Lo HANGUL SYLLABLE GGAE - {0xAE69, 0xAE83, prLVT}, // Lo [27] HANGUL SYLLABLE GGAEG..HANGUL SYLLABLE GGAEH - {0xAE84, 0xAE84, prLV}, // Lo HANGUL SYLLABLE GGYA - {0xAE85, 0xAE9F, prLVT}, // Lo [27] HANGUL SYLLABLE GGYAG..HANGUL SYLLABLE GGYAH - {0xAEA0, 0xAEA0, prLV}, // Lo HANGUL SYLLABLE GGYAE - {0xAEA1, 0xAEBB, prLVT}, // Lo [27] HANGUL SYLLABLE GGYAEG..HANGUL SYLLABLE GGYAEH - {0xAEBC, 0xAEBC, prLV}, // Lo HANGUL SYLLABLE GGEO - {0xAEBD, 0xAED7, prLVT}, // Lo [27] HANGUL SYLLABLE GGEOG..HANGUL SYLLABLE GGEOH - {0xAED8, 0xAED8, prLV}, // Lo HANGUL SYLLABLE GGE - {0xAED9, 0xAEF3, prLVT}, // Lo [27] HANGUL SYLLABLE GGEG..HANGUL SYLLABLE GGEH - {0xAEF4, 0xAEF4, prLV}, // Lo HANGUL SYLLABLE GGYEO - {0xAEF5, 0xAF0F, prLVT}, // Lo [27] HANGUL SYLLABLE GGYEOG..HANGUL SYLLABLE GGYEOH - {0xAF10, 0xAF10, prLV}, // Lo HANGUL SYLLABLE GGYE - {0xAF11, 0xAF2B, prLVT}, // Lo [27] HANGUL SYLLABLE GGYEG..HANGUL SYLLABLE GGYEH - {0xAF2C, 0xAF2C, prLV}, // Lo HANGUL SYLLABLE GGO - {0xAF2D, 0xAF47, prLVT}, // Lo [27] HANGUL SYLLABLE GGOG..HANGUL SYLLABLE GGOH - {0xAF48, 0xAF48, prLV}, // Lo HANGUL SYLLABLE GGWA - {0xAF49, 0xAF63, prLVT}, // Lo [27] HANGUL SYLLABLE GGWAG..HANGUL SYLLABLE GGWAH - {0xAF64, 0xAF64, prLV}, // Lo HANGUL SYLLABLE GGWAE - {0xAF65, 0xAF7F, prLVT}, // Lo [27] HANGUL SYLLABLE GGWAEG..HANGUL SYLLABLE GGWAEH - {0xAF80, 0xAF80, prLV}, // Lo HANGUL SYLLABLE GGOE - {0xAF81, 0xAF9B, prLVT}, // Lo [27] HANGUL SYLLABLE GGOEG..HANGUL SYLLABLE GGOEH - {0xAF9C, 0xAF9C, prLV}, // Lo HANGUL SYLLABLE GGYO - {0xAF9D, 0xAFB7, prLVT}, // Lo [27] HANGUL SYLLABLE GGYOG..HANGUL SYLLABLE GGYOH - {0xAFB8, 0xAFB8, prLV}, // Lo HANGUL SYLLABLE GGU - {0xAFB9, 0xAFD3, prLVT}, // Lo [27] HANGUL SYLLABLE GGUG..HANGUL SYLLABLE GGUH - {0xAFD4, 0xAFD4, prLV}, // Lo HANGUL SYLLABLE GGWEO - {0xAFD5, 0xAFEF, prLVT}, // Lo [27] HANGUL SYLLABLE GGWEOG..HANGUL SYLLABLE GGWEOH - {0xAFF0, 0xAFF0, prLV}, // Lo HANGUL SYLLABLE GGWE - {0xAFF1, 0xB00B, prLVT}, // Lo [27] HANGUL SYLLABLE GGWEG..HANGUL SYLLABLE GGWEH - {0xB00C, 0xB00C, prLV}, // Lo HANGUL SYLLABLE GGWI - {0xB00D, 0xB027, prLVT}, // Lo [27] HANGUL SYLLABLE GGWIG..HANGUL SYLLABLE GGWIH - {0xB028, 0xB028, prLV}, // Lo HANGUL SYLLABLE GGYU - {0xB029, 0xB043, prLVT}, // Lo [27] HANGUL SYLLABLE GGYUG..HANGUL SYLLABLE GGYUH - {0xB044, 0xB044, prLV}, // Lo HANGUL SYLLABLE GGEU - {0xB045, 0xB05F, prLVT}, // Lo [27] HANGUL SYLLABLE GGEUG..HANGUL SYLLABLE GGEUH - {0xB060, 0xB060, prLV}, // Lo HANGUL SYLLABLE GGYI - {0xB061, 0xB07B, prLVT}, // Lo [27] HANGUL SYLLABLE GGYIG..HANGUL SYLLABLE GGYIH - {0xB07C, 0xB07C, prLV}, // Lo HANGUL SYLLABLE GGI - {0xB07D, 0xB097, prLVT}, // Lo [27] HANGUL SYLLABLE GGIG..HANGUL SYLLABLE GGIH - {0xB098, 0xB098, prLV}, // Lo HANGUL SYLLABLE NA - {0xB099, 0xB0B3, prLVT}, // Lo [27] HANGUL SYLLABLE NAG..HANGUL SYLLABLE NAH - {0xB0B4, 0xB0B4, prLV}, // Lo HANGUL SYLLABLE NAE - {0xB0B5, 0xB0CF, prLVT}, // Lo [27] HANGUL SYLLABLE NAEG..HANGUL SYLLABLE NAEH - {0xB0D0, 0xB0D0, prLV}, // Lo HANGUL SYLLABLE NYA - {0xB0D1, 0xB0EB, prLVT}, // Lo [27] HANGUL SYLLABLE NYAG..HANGUL SYLLABLE NYAH - {0xB0EC, 0xB0EC, prLV}, // Lo HANGUL SYLLABLE NYAE - {0xB0ED, 0xB107, prLVT}, // Lo [27] HANGUL SYLLABLE NYAEG..HANGUL SYLLABLE NYAEH - {0xB108, 0xB108, prLV}, // Lo HANGUL SYLLABLE NEO - {0xB109, 0xB123, prLVT}, // Lo [27] HANGUL SYLLABLE NEOG..HANGUL SYLLABLE NEOH - {0xB124, 0xB124, prLV}, // Lo HANGUL SYLLABLE NE - {0xB125, 0xB13F, prLVT}, // Lo [27] HANGUL SYLLABLE NEG..HANGUL SYLLABLE NEH - {0xB140, 0xB140, prLV}, // Lo HANGUL SYLLABLE NYEO - {0xB141, 0xB15B, prLVT}, // Lo [27] HANGUL SYLLABLE NYEOG..HANGUL SYLLABLE NYEOH - {0xB15C, 0xB15C, prLV}, // Lo HANGUL SYLLABLE NYE - {0xB15D, 0xB177, prLVT}, // Lo [27] HANGUL SYLLABLE NYEG..HANGUL SYLLABLE NYEH - {0xB178, 0xB178, prLV}, // Lo HANGUL SYLLABLE NO - {0xB179, 0xB193, prLVT}, // Lo [27] HANGUL SYLLABLE NOG..HANGUL SYLLABLE NOH - {0xB194, 0xB194, prLV}, // Lo HANGUL SYLLABLE NWA - {0xB195, 0xB1AF, prLVT}, // Lo [27] HANGUL SYLLABLE NWAG..HANGUL SYLLABLE NWAH - {0xB1B0, 0xB1B0, prLV}, // Lo HANGUL SYLLABLE NWAE - {0xB1B1, 0xB1CB, prLVT}, // Lo [27] HANGUL SYLLABLE NWAEG..HANGUL SYLLABLE NWAEH - {0xB1CC, 0xB1CC, prLV}, // Lo HANGUL SYLLABLE NOE - {0xB1CD, 0xB1E7, prLVT}, // Lo [27] HANGUL SYLLABLE NOEG..HANGUL SYLLABLE NOEH - {0xB1E8, 0xB1E8, prLV}, // Lo HANGUL SYLLABLE NYO - {0xB1E9, 0xB203, prLVT}, // Lo [27] HANGUL SYLLABLE NYOG..HANGUL SYLLABLE NYOH - {0xB204, 0xB204, prLV}, // Lo HANGUL SYLLABLE NU - {0xB205, 0xB21F, prLVT}, // Lo [27] HANGUL SYLLABLE NUG..HANGUL SYLLABLE NUH - {0xB220, 0xB220, prLV}, // Lo HANGUL SYLLABLE NWEO - {0xB221, 0xB23B, prLVT}, // Lo [27] HANGUL SYLLABLE NWEOG..HANGUL SYLLABLE NWEOH - {0xB23C, 0xB23C, prLV}, // Lo HANGUL SYLLABLE NWE - {0xB23D, 0xB257, prLVT}, // Lo [27] HANGUL SYLLABLE NWEG..HANGUL SYLLABLE NWEH - {0xB258, 0xB258, prLV}, // Lo HANGUL SYLLABLE NWI - {0xB259, 0xB273, prLVT}, // Lo [27] HANGUL SYLLABLE NWIG..HANGUL SYLLABLE NWIH - {0xB274, 0xB274, prLV}, // Lo HANGUL SYLLABLE NYU - {0xB275, 0xB28F, prLVT}, // Lo [27] HANGUL SYLLABLE NYUG..HANGUL SYLLABLE NYUH - {0xB290, 0xB290, prLV}, // Lo HANGUL SYLLABLE NEU - {0xB291, 0xB2AB, prLVT}, // Lo [27] HANGUL SYLLABLE NEUG..HANGUL SYLLABLE NEUH - {0xB2AC, 0xB2AC, prLV}, // Lo HANGUL SYLLABLE NYI - {0xB2AD, 0xB2C7, prLVT}, // Lo [27] HANGUL SYLLABLE NYIG..HANGUL SYLLABLE NYIH - {0xB2C8, 0xB2C8, prLV}, // Lo HANGUL SYLLABLE NI - {0xB2C9, 0xB2E3, prLVT}, // Lo [27] HANGUL SYLLABLE NIG..HANGUL SYLLABLE NIH - {0xB2E4, 0xB2E4, prLV}, // Lo HANGUL SYLLABLE DA - {0xB2E5, 0xB2FF, prLVT}, // Lo [27] HANGUL SYLLABLE DAG..HANGUL SYLLABLE DAH - {0xB300, 0xB300, prLV}, // Lo HANGUL SYLLABLE DAE - {0xB301, 0xB31B, prLVT}, // Lo [27] HANGUL SYLLABLE DAEG..HANGUL SYLLABLE DAEH - {0xB31C, 0xB31C, prLV}, // Lo HANGUL SYLLABLE DYA - {0xB31D, 0xB337, prLVT}, // Lo [27] HANGUL SYLLABLE DYAG..HANGUL SYLLABLE DYAH - {0xB338, 0xB338, prLV}, // Lo HANGUL SYLLABLE DYAE - {0xB339, 0xB353, prLVT}, // Lo [27] HANGUL SYLLABLE DYAEG..HANGUL SYLLABLE DYAEH - {0xB354, 0xB354, prLV}, // Lo HANGUL SYLLABLE DEO - {0xB355, 0xB36F, prLVT}, // Lo [27] HANGUL SYLLABLE DEOG..HANGUL SYLLABLE DEOH - {0xB370, 0xB370, prLV}, // Lo HANGUL SYLLABLE DE - {0xB371, 0xB38B, prLVT}, // Lo [27] HANGUL SYLLABLE DEG..HANGUL SYLLABLE DEH - {0xB38C, 0xB38C, prLV}, // Lo HANGUL SYLLABLE DYEO - {0xB38D, 0xB3A7, prLVT}, // Lo [27] HANGUL SYLLABLE DYEOG..HANGUL SYLLABLE DYEOH - {0xB3A8, 0xB3A8, prLV}, // Lo HANGUL SYLLABLE DYE - {0xB3A9, 0xB3C3, prLVT}, // Lo [27] HANGUL SYLLABLE DYEG..HANGUL SYLLABLE DYEH - {0xB3C4, 0xB3C4, prLV}, // Lo HANGUL SYLLABLE DO - {0xB3C5, 0xB3DF, prLVT}, // Lo [27] HANGUL SYLLABLE DOG..HANGUL SYLLABLE DOH - {0xB3E0, 0xB3E0, prLV}, // Lo HANGUL SYLLABLE DWA - {0xB3E1, 0xB3FB, prLVT}, // Lo [27] HANGUL SYLLABLE DWAG..HANGUL SYLLABLE DWAH - {0xB3FC, 0xB3FC, prLV}, // Lo HANGUL SYLLABLE DWAE - {0xB3FD, 0xB417, prLVT}, // Lo [27] HANGUL SYLLABLE DWAEG..HANGUL SYLLABLE DWAEH - {0xB418, 0xB418, prLV}, // Lo HANGUL SYLLABLE DOE - {0xB419, 0xB433, prLVT}, // Lo [27] HANGUL SYLLABLE DOEG..HANGUL SYLLABLE DOEH - {0xB434, 0xB434, prLV}, // Lo HANGUL SYLLABLE DYO - {0xB435, 0xB44F, prLVT}, // Lo [27] HANGUL SYLLABLE DYOG..HANGUL SYLLABLE DYOH - {0xB450, 0xB450, prLV}, // Lo HANGUL SYLLABLE DU - {0xB451, 0xB46B, prLVT}, // Lo [27] HANGUL SYLLABLE DUG..HANGUL SYLLABLE DUH - {0xB46C, 0xB46C, prLV}, // Lo HANGUL SYLLABLE DWEO - {0xB46D, 0xB487, prLVT}, // Lo [27] HANGUL SYLLABLE DWEOG..HANGUL SYLLABLE DWEOH - {0xB488, 0xB488, prLV}, // Lo HANGUL SYLLABLE DWE - {0xB489, 0xB4A3, prLVT}, // Lo [27] HANGUL SYLLABLE DWEG..HANGUL SYLLABLE DWEH - {0xB4A4, 0xB4A4, prLV}, // Lo HANGUL SYLLABLE DWI - {0xB4A5, 0xB4BF, prLVT}, // Lo [27] HANGUL SYLLABLE DWIG..HANGUL SYLLABLE DWIH - {0xB4C0, 0xB4C0, prLV}, // Lo HANGUL SYLLABLE DYU - {0xB4C1, 0xB4DB, prLVT}, // Lo [27] HANGUL SYLLABLE DYUG..HANGUL SYLLABLE DYUH - {0xB4DC, 0xB4DC, prLV}, // Lo HANGUL SYLLABLE DEU - {0xB4DD, 0xB4F7, prLVT}, // Lo [27] HANGUL SYLLABLE DEUG..HANGUL SYLLABLE DEUH - {0xB4F8, 0xB4F8, prLV}, // Lo HANGUL SYLLABLE DYI - {0xB4F9, 0xB513, prLVT}, // Lo [27] HANGUL SYLLABLE DYIG..HANGUL SYLLABLE DYIH - {0xB514, 0xB514, prLV}, // Lo HANGUL SYLLABLE DI - {0xB515, 0xB52F, prLVT}, // Lo [27] HANGUL SYLLABLE DIG..HANGUL SYLLABLE DIH - {0xB530, 0xB530, prLV}, // Lo HANGUL SYLLABLE DDA - {0xB531, 0xB54B, prLVT}, // Lo [27] HANGUL SYLLABLE DDAG..HANGUL SYLLABLE DDAH - {0xB54C, 0xB54C, prLV}, // Lo HANGUL SYLLABLE DDAE - {0xB54D, 0xB567, prLVT}, // Lo [27] HANGUL SYLLABLE DDAEG..HANGUL SYLLABLE DDAEH - {0xB568, 0xB568, prLV}, // Lo HANGUL SYLLABLE DDYA - {0xB569, 0xB583, prLVT}, // Lo [27] HANGUL SYLLABLE DDYAG..HANGUL SYLLABLE DDYAH - {0xB584, 0xB584, prLV}, // Lo HANGUL SYLLABLE DDYAE - {0xB585, 0xB59F, prLVT}, // Lo [27] HANGUL SYLLABLE DDYAEG..HANGUL SYLLABLE DDYAEH - {0xB5A0, 0xB5A0, prLV}, // Lo HANGUL SYLLABLE DDEO - {0xB5A1, 0xB5BB, prLVT}, // Lo [27] HANGUL SYLLABLE DDEOG..HANGUL SYLLABLE DDEOH - {0xB5BC, 0xB5BC, prLV}, // Lo HANGUL SYLLABLE DDE - {0xB5BD, 0xB5D7, prLVT}, // Lo [27] HANGUL SYLLABLE DDEG..HANGUL SYLLABLE DDEH - {0xB5D8, 0xB5D8, prLV}, // Lo HANGUL SYLLABLE DDYEO - {0xB5D9, 0xB5F3, prLVT}, // Lo [27] HANGUL SYLLABLE DDYEOG..HANGUL SYLLABLE DDYEOH - {0xB5F4, 0xB5F4, prLV}, // Lo HANGUL SYLLABLE DDYE - {0xB5F5, 0xB60F, prLVT}, // Lo [27] HANGUL SYLLABLE DDYEG..HANGUL SYLLABLE DDYEH - {0xB610, 0xB610, prLV}, // Lo HANGUL SYLLABLE DDO - {0xB611, 0xB62B, prLVT}, // Lo [27] HANGUL SYLLABLE DDOG..HANGUL SYLLABLE DDOH - {0xB62C, 0xB62C, prLV}, // Lo HANGUL SYLLABLE DDWA - {0xB62D, 0xB647, prLVT}, // Lo [27] HANGUL SYLLABLE DDWAG..HANGUL SYLLABLE DDWAH - {0xB648, 0xB648, prLV}, // Lo HANGUL SYLLABLE DDWAE - {0xB649, 0xB663, prLVT}, // Lo [27] HANGUL SYLLABLE DDWAEG..HANGUL SYLLABLE DDWAEH - {0xB664, 0xB664, prLV}, // Lo HANGUL SYLLABLE DDOE - {0xB665, 0xB67F, prLVT}, // Lo [27] HANGUL SYLLABLE DDOEG..HANGUL SYLLABLE DDOEH - {0xB680, 0xB680, prLV}, // Lo HANGUL SYLLABLE DDYO - {0xB681, 0xB69B, prLVT}, // Lo [27] HANGUL SYLLABLE DDYOG..HANGUL SYLLABLE DDYOH - {0xB69C, 0xB69C, prLV}, // Lo HANGUL SYLLABLE DDU - {0xB69D, 0xB6B7, prLVT}, // Lo [27] HANGUL SYLLABLE DDUG..HANGUL SYLLABLE DDUH - {0xB6B8, 0xB6B8, prLV}, // Lo HANGUL SYLLABLE DDWEO - {0xB6B9, 0xB6D3, prLVT}, // Lo [27] HANGUL SYLLABLE DDWEOG..HANGUL SYLLABLE DDWEOH - {0xB6D4, 0xB6D4, prLV}, // Lo HANGUL SYLLABLE DDWE - {0xB6D5, 0xB6EF, prLVT}, // Lo [27] HANGUL SYLLABLE DDWEG..HANGUL SYLLABLE DDWEH - {0xB6F0, 0xB6F0, prLV}, // Lo HANGUL SYLLABLE DDWI - {0xB6F1, 0xB70B, prLVT}, // Lo [27] HANGUL SYLLABLE DDWIG..HANGUL SYLLABLE DDWIH - {0xB70C, 0xB70C, prLV}, // Lo HANGUL SYLLABLE DDYU - {0xB70D, 0xB727, prLVT}, // Lo [27] HANGUL SYLLABLE DDYUG..HANGUL SYLLABLE DDYUH - {0xB728, 0xB728, prLV}, // Lo HANGUL SYLLABLE DDEU - {0xB729, 0xB743, prLVT}, // Lo [27] HANGUL SYLLABLE DDEUG..HANGUL SYLLABLE DDEUH - {0xB744, 0xB744, prLV}, // Lo HANGUL SYLLABLE DDYI - {0xB745, 0xB75F, prLVT}, // Lo [27] HANGUL SYLLABLE DDYIG..HANGUL SYLLABLE DDYIH - {0xB760, 0xB760, prLV}, // Lo HANGUL SYLLABLE DDI - {0xB761, 0xB77B, prLVT}, // Lo [27] HANGUL SYLLABLE DDIG..HANGUL SYLLABLE DDIH - {0xB77C, 0xB77C, prLV}, // Lo HANGUL SYLLABLE RA - {0xB77D, 0xB797, prLVT}, // Lo [27] HANGUL SYLLABLE RAG..HANGUL SYLLABLE RAH - {0xB798, 0xB798, prLV}, // Lo HANGUL SYLLABLE RAE - {0xB799, 0xB7B3, prLVT}, // Lo [27] HANGUL SYLLABLE RAEG..HANGUL SYLLABLE RAEH - {0xB7B4, 0xB7B4, prLV}, // Lo HANGUL SYLLABLE RYA - {0xB7B5, 0xB7CF, prLVT}, // Lo [27] HANGUL SYLLABLE RYAG..HANGUL SYLLABLE RYAH - {0xB7D0, 0xB7D0, prLV}, // Lo HANGUL SYLLABLE RYAE - {0xB7D1, 0xB7EB, prLVT}, // Lo [27] HANGUL SYLLABLE RYAEG..HANGUL SYLLABLE RYAEH - {0xB7EC, 0xB7EC, prLV}, // Lo HANGUL SYLLABLE REO - {0xB7ED, 0xB807, prLVT}, // Lo [27] HANGUL SYLLABLE REOG..HANGUL SYLLABLE REOH - {0xB808, 0xB808, prLV}, // Lo HANGUL SYLLABLE RE - {0xB809, 0xB823, prLVT}, // Lo [27] HANGUL SYLLABLE REG..HANGUL SYLLABLE REH - {0xB824, 0xB824, prLV}, // Lo HANGUL SYLLABLE RYEO - {0xB825, 0xB83F, prLVT}, // Lo [27] HANGUL SYLLABLE RYEOG..HANGUL SYLLABLE RYEOH - {0xB840, 0xB840, prLV}, // Lo HANGUL SYLLABLE RYE - {0xB841, 0xB85B, prLVT}, // Lo [27] HANGUL SYLLABLE RYEG..HANGUL SYLLABLE RYEH - {0xB85C, 0xB85C, prLV}, // Lo HANGUL SYLLABLE RO - {0xB85D, 0xB877, prLVT}, // Lo [27] HANGUL SYLLABLE ROG..HANGUL SYLLABLE ROH - {0xB878, 0xB878, prLV}, // Lo HANGUL SYLLABLE RWA - {0xB879, 0xB893, prLVT}, // Lo [27] HANGUL SYLLABLE RWAG..HANGUL SYLLABLE RWAH - {0xB894, 0xB894, prLV}, // Lo HANGUL SYLLABLE RWAE - {0xB895, 0xB8AF, prLVT}, // Lo [27] HANGUL SYLLABLE RWAEG..HANGUL SYLLABLE RWAEH - {0xB8B0, 0xB8B0, prLV}, // Lo HANGUL SYLLABLE ROE - {0xB8B1, 0xB8CB, prLVT}, // Lo [27] HANGUL SYLLABLE ROEG..HANGUL SYLLABLE ROEH - {0xB8CC, 0xB8CC, prLV}, // Lo HANGUL SYLLABLE RYO - {0xB8CD, 0xB8E7, prLVT}, // Lo [27] HANGUL SYLLABLE RYOG..HANGUL SYLLABLE RYOH - {0xB8E8, 0xB8E8, prLV}, // Lo HANGUL SYLLABLE RU - {0xB8E9, 0xB903, prLVT}, // Lo [27] HANGUL SYLLABLE RUG..HANGUL SYLLABLE RUH - {0xB904, 0xB904, prLV}, // Lo HANGUL SYLLABLE RWEO - {0xB905, 0xB91F, prLVT}, // Lo [27] HANGUL SYLLABLE RWEOG..HANGUL SYLLABLE RWEOH - {0xB920, 0xB920, prLV}, // Lo HANGUL SYLLABLE RWE - {0xB921, 0xB93B, prLVT}, // Lo [27] HANGUL SYLLABLE RWEG..HANGUL SYLLABLE RWEH - {0xB93C, 0xB93C, prLV}, // Lo HANGUL SYLLABLE RWI - {0xB93D, 0xB957, prLVT}, // Lo [27] HANGUL SYLLABLE RWIG..HANGUL SYLLABLE RWIH - {0xB958, 0xB958, prLV}, // Lo HANGUL SYLLABLE RYU - {0xB959, 0xB973, prLVT}, // Lo [27] HANGUL SYLLABLE RYUG..HANGUL SYLLABLE RYUH - {0xB974, 0xB974, prLV}, // Lo HANGUL SYLLABLE REU - {0xB975, 0xB98F, prLVT}, // Lo [27] HANGUL SYLLABLE REUG..HANGUL SYLLABLE REUH - {0xB990, 0xB990, prLV}, // Lo HANGUL SYLLABLE RYI - {0xB991, 0xB9AB, prLVT}, // Lo [27] HANGUL SYLLABLE RYIG..HANGUL SYLLABLE RYIH - {0xB9AC, 0xB9AC, prLV}, // Lo HANGUL SYLLABLE RI - {0xB9AD, 0xB9C7, prLVT}, // Lo [27] HANGUL SYLLABLE RIG..HANGUL SYLLABLE RIH - {0xB9C8, 0xB9C8, prLV}, // Lo HANGUL SYLLABLE MA - {0xB9C9, 0xB9E3, prLVT}, // Lo [27] HANGUL SYLLABLE MAG..HANGUL SYLLABLE MAH - {0xB9E4, 0xB9E4, prLV}, // Lo HANGUL SYLLABLE MAE - {0xB9E5, 0xB9FF, prLVT}, // Lo [27] HANGUL SYLLABLE MAEG..HANGUL SYLLABLE MAEH - {0xBA00, 0xBA00, prLV}, // Lo HANGUL SYLLABLE MYA - {0xBA01, 0xBA1B, prLVT}, // Lo [27] HANGUL SYLLABLE MYAG..HANGUL SYLLABLE MYAH - {0xBA1C, 0xBA1C, prLV}, // Lo HANGUL SYLLABLE MYAE - {0xBA1D, 0xBA37, prLVT}, // Lo [27] HANGUL SYLLABLE MYAEG..HANGUL SYLLABLE MYAEH - {0xBA38, 0xBA38, prLV}, // Lo HANGUL SYLLABLE MEO - {0xBA39, 0xBA53, prLVT}, // Lo [27] HANGUL SYLLABLE MEOG..HANGUL SYLLABLE MEOH - {0xBA54, 0xBA54, prLV}, // Lo HANGUL SYLLABLE ME - {0xBA55, 0xBA6F, prLVT}, // Lo [27] HANGUL SYLLABLE MEG..HANGUL SYLLABLE MEH - {0xBA70, 0xBA70, prLV}, // Lo HANGUL SYLLABLE MYEO - {0xBA71, 0xBA8B, prLVT}, // Lo [27] HANGUL SYLLABLE MYEOG..HANGUL SYLLABLE MYEOH - {0xBA8C, 0xBA8C, prLV}, // Lo HANGUL SYLLABLE MYE - {0xBA8D, 0xBAA7, prLVT}, // Lo [27] HANGUL SYLLABLE MYEG..HANGUL SYLLABLE MYEH - {0xBAA8, 0xBAA8, prLV}, // Lo HANGUL SYLLABLE MO - {0xBAA9, 0xBAC3, prLVT}, // Lo [27] HANGUL SYLLABLE MOG..HANGUL SYLLABLE MOH - {0xBAC4, 0xBAC4, prLV}, // Lo HANGUL SYLLABLE MWA - {0xBAC5, 0xBADF, prLVT}, // Lo [27] HANGUL SYLLABLE MWAG..HANGUL SYLLABLE MWAH - {0xBAE0, 0xBAE0, prLV}, // Lo HANGUL SYLLABLE MWAE - {0xBAE1, 0xBAFB, prLVT}, // Lo [27] HANGUL SYLLABLE MWAEG..HANGUL SYLLABLE MWAEH - {0xBAFC, 0xBAFC, prLV}, // Lo HANGUL SYLLABLE MOE - {0xBAFD, 0xBB17, prLVT}, // Lo [27] HANGUL SYLLABLE MOEG..HANGUL SYLLABLE MOEH - {0xBB18, 0xBB18, prLV}, // Lo HANGUL SYLLABLE MYO - {0xBB19, 0xBB33, prLVT}, // Lo [27] HANGUL SYLLABLE MYOG..HANGUL SYLLABLE MYOH - {0xBB34, 0xBB34, prLV}, // Lo HANGUL SYLLABLE MU - {0xBB35, 0xBB4F, prLVT}, // Lo [27] HANGUL SYLLABLE MUG..HANGUL SYLLABLE MUH - {0xBB50, 0xBB50, prLV}, // Lo HANGUL SYLLABLE MWEO - {0xBB51, 0xBB6B, prLVT}, // Lo [27] HANGUL SYLLABLE MWEOG..HANGUL SYLLABLE MWEOH - {0xBB6C, 0xBB6C, prLV}, // Lo HANGUL SYLLABLE MWE - {0xBB6D, 0xBB87, prLVT}, // Lo [27] HANGUL SYLLABLE MWEG..HANGUL SYLLABLE MWEH - {0xBB88, 0xBB88, prLV}, // Lo HANGUL SYLLABLE MWI - {0xBB89, 0xBBA3, prLVT}, // Lo [27] HANGUL SYLLABLE MWIG..HANGUL SYLLABLE MWIH - {0xBBA4, 0xBBA4, prLV}, // Lo HANGUL SYLLABLE MYU - {0xBBA5, 0xBBBF, prLVT}, // Lo [27] HANGUL SYLLABLE MYUG..HANGUL SYLLABLE MYUH - {0xBBC0, 0xBBC0, prLV}, // Lo HANGUL SYLLABLE MEU - {0xBBC1, 0xBBDB, prLVT}, // Lo [27] HANGUL SYLLABLE MEUG..HANGUL SYLLABLE MEUH - {0xBBDC, 0xBBDC, prLV}, // Lo HANGUL SYLLABLE MYI - {0xBBDD, 0xBBF7, prLVT}, // Lo [27] HANGUL SYLLABLE MYIG..HANGUL SYLLABLE MYIH - {0xBBF8, 0xBBF8, prLV}, // Lo HANGUL SYLLABLE MI - {0xBBF9, 0xBC13, prLVT}, // Lo [27] HANGUL SYLLABLE MIG..HANGUL SYLLABLE MIH - {0xBC14, 0xBC14, prLV}, // Lo HANGUL SYLLABLE BA - {0xBC15, 0xBC2F, prLVT}, // Lo [27] HANGUL SYLLABLE BAG..HANGUL SYLLABLE BAH - {0xBC30, 0xBC30, prLV}, // Lo HANGUL SYLLABLE BAE - {0xBC31, 0xBC4B, prLVT}, // Lo [27] HANGUL SYLLABLE BAEG..HANGUL SYLLABLE BAEH - {0xBC4C, 0xBC4C, prLV}, // Lo HANGUL SYLLABLE BYA - {0xBC4D, 0xBC67, prLVT}, // Lo [27] HANGUL SYLLABLE BYAG..HANGUL SYLLABLE BYAH - {0xBC68, 0xBC68, prLV}, // Lo HANGUL SYLLABLE BYAE - {0xBC69, 0xBC83, prLVT}, // Lo [27] HANGUL SYLLABLE BYAEG..HANGUL SYLLABLE BYAEH - {0xBC84, 0xBC84, prLV}, // Lo HANGUL SYLLABLE BEO - {0xBC85, 0xBC9F, prLVT}, // Lo [27] HANGUL SYLLABLE BEOG..HANGUL SYLLABLE BEOH - {0xBCA0, 0xBCA0, prLV}, // Lo HANGUL SYLLABLE BE - {0xBCA1, 0xBCBB, prLVT}, // Lo [27] HANGUL SYLLABLE BEG..HANGUL SYLLABLE BEH - {0xBCBC, 0xBCBC, prLV}, // Lo HANGUL SYLLABLE BYEO - {0xBCBD, 0xBCD7, prLVT}, // Lo [27] HANGUL SYLLABLE BYEOG..HANGUL SYLLABLE BYEOH - {0xBCD8, 0xBCD8, prLV}, // Lo HANGUL SYLLABLE BYE - {0xBCD9, 0xBCF3, prLVT}, // Lo [27] HANGUL SYLLABLE BYEG..HANGUL SYLLABLE BYEH - {0xBCF4, 0xBCF4, prLV}, // Lo HANGUL SYLLABLE BO - {0xBCF5, 0xBD0F, prLVT}, // Lo [27] HANGUL SYLLABLE BOG..HANGUL SYLLABLE BOH - {0xBD10, 0xBD10, prLV}, // Lo HANGUL SYLLABLE BWA - {0xBD11, 0xBD2B, prLVT}, // Lo [27] HANGUL SYLLABLE BWAG..HANGUL SYLLABLE BWAH - {0xBD2C, 0xBD2C, prLV}, // Lo HANGUL SYLLABLE BWAE - {0xBD2D, 0xBD47, prLVT}, // Lo [27] HANGUL SYLLABLE BWAEG..HANGUL SYLLABLE BWAEH - {0xBD48, 0xBD48, prLV}, // Lo HANGUL SYLLABLE BOE - {0xBD49, 0xBD63, prLVT}, // Lo [27] HANGUL SYLLABLE BOEG..HANGUL SYLLABLE BOEH - {0xBD64, 0xBD64, prLV}, // Lo HANGUL SYLLABLE BYO - {0xBD65, 0xBD7F, prLVT}, // Lo [27] HANGUL SYLLABLE BYOG..HANGUL SYLLABLE BYOH - {0xBD80, 0xBD80, prLV}, // Lo HANGUL SYLLABLE BU - {0xBD81, 0xBD9B, prLVT}, // Lo [27] HANGUL SYLLABLE BUG..HANGUL SYLLABLE BUH - {0xBD9C, 0xBD9C, prLV}, // Lo HANGUL SYLLABLE BWEO - {0xBD9D, 0xBDB7, prLVT}, // Lo [27] HANGUL SYLLABLE BWEOG..HANGUL SYLLABLE BWEOH - {0xBDB8, 0xBDB8, prLV}, // Lo HANGUL SYLLABLE BWE - {0xBDB9, 0xBDD3, prLVT}, // Lo [27] HANGUL SYLLABLE BWEG..HANGUL SYLLABLE BWEH - {0xBDD4, 0xBDD4, prLV}, // Lo HANGUL SYLLABLE BWI - {0xBDD5, 0xBDEF, prLVT}, // Lo [27] HANGUL SYLLABLE BWIG..HANGUL SYLLABLE BWIH - {0xBDF0, 0xBDF0, prLV}, // Lo HANGUL SYLLABLE BYU - {0xBDF1, 0xBE0B, prLVT}, // Lo [27] HANGUL SYLLABLE BYUG..HANGUL SYLLABLE BYUH - {0xBE0C, 0xBE0C, prLV}, // Lo HANGUL SYLLABLE BEU - {0xBE0D, 0xBE27, prLVT}, // Lo [27] HANGUL SYLLABLE BEUG..HANGUL SYLLABLE BEUH - {0xBE28, 0xBE28, prLV}, // Lo HANGUL SYLLABLE BYI - {0xBE29, 0xBE43, prLVT}, // Lo [27] HANGUL SYLLABLE BYIG..HANGUL SYLLABLE BYIH - {0xBE44, 0xBE44, prLV}, // Lo HANGUL SYLLABLE BI - {0xBE45, 0xBE5F, prLVT}, // Lo [27] HANGUL SYLLABLE BIG..HANGUL SYLLABLE BIH - {0xBE60, 0xBE60, prLV}, // Lo HANGUL SYLLABLE BBA - {0xBE61, 0xBE7B, prLVT}, // Lo [27] HANGUL SYLLABLE BBAG..HANGUL SYLLABLE BBAH - {0xBE7C, 0xBE7C, prLV}, // Lo HANGUL SYLLABLE BBAE - {0xBE7D, 0xBE97, prLVT}, // Lo [27] HANGUL SYLLABLE BBAEG..HANGUL SYLLABLE BBAEH - {0xBE98, 0xBE98, prLV}, // Lo HANGUL SYLLABLE BBYA - {0xBE99, 0xBEB3, prLVT}, // Lo [27] HANGUL SYLLABLE BBYAG..HANGUL SYLLABLE BBYAH - {0xBEB4, 0xBEB4, prLV}, // Lo HANGUL SYLLABLE BBYAE - {0xBEB5, 0xBECF, prLVT}, // Lo [27] HANGUL SYLLABLE BBYAEG..HANGUL SYLLABLE BBYAEH - {0xBED0, 0xBED0, prLV}, // Lo HANGUL SYLLABLE BBEO - {0xBED1, 0xBEEB, prLVT}, // Lo [27] HANGUL SYLLABLE BBEOG..HANGUL SYLLABLE BBEOH - {0xBEEC, 0xBEEC, prLV}, // Lo HANGUL SYLLABLE BBE - {0xBEED, 0xBF07, prLVT}, // Lo [27] HANGUL SYLLABLE BBEG..HANGUL SYLLABLE BBEH - {0xBF08, 0xBF08, prLV}, // Lo HANGUL SYLLABLE BBYEO - {0xBF09, 0xBF23, prLVT}, // Lo [27] HANGUL SYLLABLE BBYEOG..HANGUL SYLLABLE BBYEOH - {0xBF24, 0xBF24, prLV}, // Lo HANGUL SYLLABLE BBYE - {0xBF25, 0xBF3F, prLVT}, // Lo [27] HANGUL SYLLABLE BBYEG..HANGUL SYLLABLE BBYEH - {0xBF40, 0xBF40, prLV}, // Lo HANGUL SYLLABLE BBO - {0xBF41, 0xBF5B, prLVT}, // Lo [27] HANGUL SYLLABLE BBOG..HANGUL SYLLABLE BBOH - {0xBF5C, 0xBF5C, prLV}, // Lo HANGUL SYLLABLE BBWA - {0xBF5D, 0xBF77, prLVT}, // Lo [27] HANGUL SYLLABLE BBWAG..HANGUL SYLLABLE BBWAH - {0xBF78, 0xBF78, prLV}, // Lo HANGUL SYLLABLE BBWAE - {0xBF79, 0xBF93, prLVT}, // Lo [27] HANGUL SYLLABLE BBWAEG..HANGUL SYLLABLE BBWAEH - {0xBF94, 0xBF94, prLV}, // Lo HANGUL SYLLABLE BBOE - {0xBF95, 0xBFAF, prLVT}, // Lo [27] HANGUL SYLLABLE BBOEG..HANGUL SYLLABLE BBOEH - {0xBFB0, 0xBFB0, prLV}, // Lo HANGUL SYLLABLE BBYO - {0xBFB1, 0xBFCB, prLVT}, // Lo [27] HANGUL SYLLABLE BBYOG..HANGUL SYLLABLE BBYOH - {0xBFCC, 0xBFCC, prLV}, // Lo HANGUL SYLLABLE BBU - {0xBFCD, 0xBFE7, prLVT}, // Lo [27] HANGUL SYLLABLE BBUG..HANGUL SYLLABLE BBUH - {0xBFE8, 0xBFE8, prLV}, // Lo HANGUL SYLLABLE BBWEO - {0xBFE9, 0xC003, prLVT}, // Lo [27] HANGUL SYLLABLE BBWEOG..HANGUL SYLLABLE BBWEOH - {0xC004, 0xC004, prLV}, // Lo HANGUL SYLLABLE BBWE - {0xC005, 0xC01F, prLVT}, // Lo [27] HANGUL SYLLABLE BBWEG..HANGUL SYLLABLE BBWEH - {0xC020, 0xC020, prLV}, // Lo HANGUL SYLLABLE BBWI - {0xC021, 0xC03B, prLVT}, // Lo [27] HANGUL SYLLABLE BBWIG..HANGUL SYLLABLE BBWIH - {0xC03C, 0xC03C, prLV}, // Lo HANGUL SYLLABLE BBYU - {0xC03D, 0xC057, prLVT}, // Lo [27] HANGUL SYLLABLE BBYUG..HANGUL SYLLABLE BBYUH - {0xC058, 0xC058, prLV}, // Lo HANGUL SYLLABLE BBEU - {0xC059, 0xC073, prLVT}, // Lo [27] HANGUL SYLLABLE BBEUG..HANGUL SYLLABLE BBEUH - {0xC074, 0xC074, prLV}, // Lo HANGUL SYLLABLE BBYI - {0xC075, 0xC08F, prLVT}, // Lo [27] HANGUL SYLLABLE BBYIG..HANGUL SYLLABLE BBYIH - {0xC090, 0xC090, prLV}, // Lo HANGUL SYLLABLE BBI - {0xC091, 0xC0AB, prLVT}, // Lo [27] HANGUL SYLLABLE BBIG..HANGUL SYLLABLE BBIH - {0xC0AC, 0xC0AC, prLV}, // Lo HANGUL SYLLABLE SA - {0xC0AD, 0xC0C7, prLVT}, // Lo [27] HANGUL SYLLABLE SAG..HANGUL SYLLABLE SAH - {0xC0C8, 0xC0C8, prLV}, // Lo HANGUL SYLLABLE SAE - {0xC0C9, 0xC0E3, prLVT}, // Lo [27] HANGUL SYLLABLE SAEG..HANGUL SYLLABLE SAEH - {0xC0E4, 0xC0E4, prLV}, // Lo HANGUL SYLLABLE SYA - {0xC0E5, 0xC0FF, prLVT}, // Lo [27] HANGUL SYLLABLE SYAG..HANGUL SYLLABLE SYAH - {0xC100, 0xC100, prLV}, // Lo HANGUL SYLLABLE SYAE - {0xC101, 0xC11B, prLVT}, // Lo [27] HANGUL SYLLABLE SYAEG..HANGUL SYLLABLE SYAEH - {0xC11C, 0xC11C, prLV}, // Lo HANGUL SYLLABLE SEO - {0xC11D, 0xC137, prLVT}, // Lo [27] HANGUL SYLLABLE SEOG..HANGUL SYLLABLE SEOH - {0xC138, 0xC138, prLV}, // Lo HANGUL SYLLABLE SE - {0xC139, 0xC153, prLVT}, // Lo [27] HANGUL SYLLABLE SEG..HANGUL SYLLABLE SEH - {0xC154, 0xC154, prLV}, // Lo HANGUL SYLLABLE SYEO - {0xC155, 0xC16F, prLVT}, // Lo [27] HANGUL SYLLABLE SYEOG..HANGUL SYLLABLE SYEOH - {0xC170, 0xC170, prLV}, // Lo HANGUL SYLLABLE SYE - {0xC171, 0xC18B, prLVT}, // Lo [27] HANGUL SYLLABLE SYEG..HANGUL SYLLABLE SYEH - {0xC18C, 0xC18C, prLV}, // Lo HANGUL SYLLABLE SO - {0xC18D, 0xC1A7, prLVT}, // Lo [27] HANGUL SYLLABLE SOG..HANGUL SYLLABLE SOH - {0xC1A8, 0xC1A8, prLV}, // Lo HANGUL SYLLABLE SWA - {0xC1A9, 0xC1C3, prLVT}, // Lo [27] HANGUL SYLLABLE SWAG..HANGUL SYLLABLE SWAH - {0xC1C4, 0xC1C4, prLV}, // Lo HANGUL SYLLABLE SWAE - {0xC1C5, 0xC1DF, prLVT}, // Lo [27] HANGUL SYLLABLE SWAEG..HANGUL SYLLABLE SWAEH - {0xC1E0, 0xC1E0, prLV}, // Lo HANGUL SYLLABLE SOE - {0xC1E1, 0xC1FB, prLVT}, // Lo [27] HANGUL SYLLABLE SOEG..HANGUL SYLLABLE SOEH - {0xC1FC, 0xC1FC, prLV}, // Lo HANGUL SYLLABLE SYO - {0xC1FD, 0xC217, prLVT}, // Lo [27] HANGUL SYLLABLE SYOG..HANGUL SYLLABLE SYOH - {0xC218, 0xC218, prLV}, // Lo HANGUL SYLLABLE SU - {0xC219, 0xC233, prLVT}, // Lo [27] HANGUL SYLLABLE SUG..HANGUL SYLLABLE SUH - {0xC234, 0xC234, prLV}, // Lo HANGUL SYLLABLE SWEO - {0xC235, 0xC24F, prLVT}, // Lo [27] HANGUL SYLLABLE SWEOG..HANGUL SYLLABLE SWEOH - {0xC250, 0xC250, prLV}, // Lo HANGUL SYLLABLE SWE - {0xC251, 0xC26B, prLVT}, // Lo [27] HANGUL SYLLABLE SWEG..HANGUL SYLLABLE SWEH - {0xC26C, 0xC26C, prLV}, // Lo HANGUL SYLLABLE SWI - {0xC26D, 0xC287, prLVT}, // Lo [27] HANGUL SYLLABLE SWIG..HANGUL SYLLABLE SWIH - {0xC288, 0xC288, prLV}, // Lo HANGUL SYLLABLE SYU - {0xC289, 0xC2A3, prLVT}, // Lo [27] HANGUL SYLLABLE SYUG..HANGUL SYLLABLE SYUH - {0xC2A4, 0xC2A4, prLV}, // Lo HANGUL SYLLABLE SEU - {0xC2A5, 0xC2BF, prLVT}, // Lo [27] HANGUL SYLLABLE SEUG..HANGUL SYLLABLE SEUH - {0xC2C0, 0xC2C0, prLV}, // Lo HANGUL SYLLABLE SYI - {0xC2C1, 0xC2DB, prLVT}, // Lo [27] HANGUL SYLLABLE SYIG..HANGUL SYLLABLE SYIH - {0xC2DC, 0xC2DC, prLV}, // Lo HANGUL SYLLABLE SI - {0xC2DD, 0xC2F7, prLVT}, // Lo [27] HANGUL SYLLABLE SIG..HANGUL SYLLABLE SIH - {0xC2F8, 0xC2F8, prLV}, // Lo HANGUL SYLLABLE SSA - {0xC2F9, 0xC313, prLVT}, // Lo [27] HANGUL SYLLABLE SSAG..HANGUL SYLLABLE SSAH - {0xC314, 0xC314, prLV}, // Lo HANGUL SYLLABLE SSAE - {0xC315, 0xC32F, prLVT}, // Lo [27] HANGUL SYLLABLE SSAEG..HANGUL SYLLABLE SSAEH - {0xC330, 0xC330, prLV}, // Lo HANGUL SYLLABLE SSYA - {0xC331, 0xC34B, prLVT}, // Lo [27] HANGUL SYLLABLE SSYAG..HANGUL SYLLABLE SSYAH - {0xC34C, 0xC34C, prLV}, // Lo HANGUL SYLLABLE SSYAE - {0xC34D, 0xC367, prLVT}, // Lo [27] HANGUL SYLLABLE SSYAEG..HANGUL SYLLABLE SSYAEH - {0xC368, 0xC368, prLV}, // Lo HANGUL SYLLABLE SSEO - {0xC369, 0xC383, prLVT}, // Lo [27] HANGUL SYLLABLE SSEOG..HANGUL SYLLABLE SSEOH - {0xC384, 0xC384, prLV}, // Lo HANGUL SYLLABLE SSE - {0xC385, 0xC39F, prLVT}, // Lo [27] HANGUL SYLLABLE SSEG..HANGUL SYLLABLE SSEH - {0xC3A0, 0xC3A0, prLV}, // Lo HANGUL SYLLABLE SSYEO - {0xC3A1, 0xC3BB, prLVT}, // Lo [27] HANGUL SYLLABLE SSYEOG..HANGUL SYLLABLE SSYEOH - {0xC3BC, 0xC3BC, prLV}, // Lo HANGUL SYLLABLE SSYE - {0xC3BD, 0xC3D7, prLVT}, // Lo [27] HANGUL SYLLABLE SSYEG..HANGUL SYLLABLE SSYEH - {0xC3D8, 0xC3D8, prLV}, // Lo HANGUL SYLLABLE SSO - {0xC3D9, 0xC3F3, prLVT}, // Lo [27] HANGUL SYLLABLE SSOG..HANGUL SYLLABLE SSOH - {0xC3F4, 0xC3F4, prLV}, // Lo HANGUL SYLLABLE SSWA - {0xC3F5, 0xC40F, prLVT}, // Lo [27] HANGUL SYLLABLE SSWAG..HANGUL SYLLABLE SSWAH - {0xC410, 0xC410, prLV}, // Lo HANGUL SYLLABLE SSWAE - {0xC411, 0xC42B, prLVT}, // Lo [27] HANGUL SYLLABLE SSWAEG..HANGUL SYLLABLE SSWAEH - {0xC42C, 0xC42C, prLV}, // Lo HANGUL SYLLABLE SSOE - {0xC42D, 0xC447, prLVT}, // Lo [27] HANGUL SYLLABLE SSOEG..HANGUL SYLLABLE SSOEH - {0xC448, 0xC448, prLV}, // Lo HANGUL SYLLABLE SSYO - {0xC449, 0xC463, prLVT}, // Lo [27] HANGUL SYLLABLE SSYOG..HANGUL SYLLABLE SSYOH - {0xC464, 0xC464, prLV}, // Lo HANGUL SYLLABLE SSU - {0xC465, 0xC47F, prLVT}, // Lo [27] HANGUL SYLLABLE SSUG..HANGUL SYLLABLE SSUH - {0xC480, 0xC480, prLV}, // Lo HANGUL SYLLABLE SSWEO - {0xC481, 0xC49B, prLVT}, // Lo [27] HANGUL SYLLABLE SSWEOG..HANGUL SYLLABLE SSWEOH - {0xC49C, 0xC49C, prLV}, // Lo HANGUL SYLLABLE SSWE - {0xC49D, 0xC4B7, prLVT}, // Lo [27] HANGUL SYLLABLE SSWEG..HANGUL SYLLABLE SSWEH - {0xC4B8, 0xC4B8, prLV}, // Lo HANGUL SYLLABLE SSWI - {0xC4B9, 0xC4D3, prLVT}, // Lo [27] HANGUL SYLLABLE SSWIG..HANGUL SYLLABLE SSWIH - {0xC4D4, 0xC4D4, prLV}, // Lo HANGUL SYLLABLE SSYU - {0xC4D5, 0xC4EF, prLVT}, // Lo [27] HANGUL SYLLABLE SSYUG..HANGUL SYLLABLE SSYUH - {0xC4F0, 0xC4F0, prLV}, // Lo HANGUL SYLLABLE SSEU - {0xC4F1, 0xC50B, prLVT}, // Lo [27] HANGUL SYLLABLE SSEUG..HANGUL SYLLABLE SSEUH - {0xC50C, 0xC50C, prLV}, // Lo HANGUL SYLLABLE SSYI - {0xC50D, 0xC527, prLVT}, // Lo [27] HANGUL SYLLABLE SSYIG..HANGUL SYLLABLE SSYIH - {0xC528, 0xC528, prLV}, // Lo HANGUL SYLLABLE SSI - {0xC529, 0xC543, prLVT}, // Lo [27] HANGUL SYLLABLE SSIG..HANGUL SYLLABLE SSIH - {0xC544, 0xC544, prLV}, // Lo HANGUL SYLLABLE A - {0xC545, 0xC55F, prLVT}, // Lo [27] HANGUL SYLLABLE AG..HANGUL SYLLABLE AH - {0xC560, 0xC560, prLV}, // Lo HANGUL SYLLABLE AE - {0xC561, 0xC57B, prLVT}, // Lo [27] HANGUL SYLLABLE AEG..HANGUL SYLLABLE AEH - {0xC57C, 0xC57C, prLV}, // Lo HANGUL SYLLABLE YA - {0xC57D, 0xC597, prLVT}, // Lo [27] HANGUL SYLLABLE YAG..HANGUL SYLLABLE YAH - {0xC598, 0xC598, prLV}, // Lo HANGUL SYLLABLE YAE - {0xC599, 0xC5B3, prLVT}, // Lo [27] HANGUL SYLLABLE YAEG..HANGUL SYLLABLE YAEH - {0xC5B4, 0xC5B4, prLV}, // Lo HANGUL SYLLABLE EO - {0xC5B5, 0xC5CF, prLVT}, // Lo [27] HANGUL SYLLABLE EOG..HANGUL SYLLABLE EOH - {0xC5D0, 0xC5D0, prLV}, // Lo HANGUL SYLLABLE E - {0xC5D1, 0xC5EB, prLVT}, // Lo [27] HANGUL SYLLABLE EG..HANGUL SYLLABLE EH - {0xC5EC, 0xC5EC, prLV}, // Lo HANGUL SYLLABLE YEO - {0xC5ED, 0xC607, prLVT}, // Lo [27] HANGUL SYLLABLE YEOG..HANGUL SYLLABLE YEOH - {0xC608, 0xC608, prLV}, // Lo HANGUL SYLLABLE YE - {0xC609, 0xC623, prLVT}, // Lo [27] HANGUL SYLLABLE YEG..HANGUL SYLLABLE YEH - {0xC624, 0xC624, prLV}, // Lo HANGUL SYLLABLE O - {0xC625, 0xC63F, prLVT}, // Lo [27] HANGUL SYLLABLE OG..HANGUL SYLLABLE OH - {0xC640, 0xC640, prLV}, // Lo HANGUL SYLLABLE WA - {0xC641, 0xC65B, prLVT}, // Lo [27] HANGUL SYLLABLE WAG..HANGUL SYLLABLE WAH - {0xC65C, 0xC65C, prLV}, // Lo HANGUL SYLLABLE WAE - {0xC65D, 0xC677, prLVT}, // Lo [27] HANGUL SYLLABLE WAEG..HANGUL SYLLABLE WAEH - {0xC678, 0xC678, prLV}, // Lo HANGUL SYLLABLE OE - {0xC679, 0xC693, prLVT}, // Lo [27] HANGUL SYLLABLE OEG..HANGUL SYLLABLE OEH - {0xC694, 0xC694, prLV}, // Lo HANGUL SYLLABLE YO - {0xC695, 0xC6AF, prLVT}, // Lo [27] HANGUL SYLLABLE YOG..HANGUL SYLLABLE YOH - {0xC6B0, 0xC6B0, prLV}, // Lo HANGUL SYLLABLE U - {0xC6B1, 0xC6CB, prLVT}, // Lo [27] HANGUL SYLLABLE UG..HANGUL SYLLABLE UH - {0xC6CC, 0xC6CC, prLV}, // Lo HANGUL SYLLABLE WEO - {0xC6CD, 0xC6E7, prLVT}, // Lo [27] HANGUL SYLLABLE WEOG..HANGUL SYLLABLE WEOH - {0xC6E8, 0xC6E8, prLV}, // Lo HANGUL SYLLABLE WE - {0xC6E9, 0xC703, prLVT}, // Lo [27] HANGUL SYLLABLE WEG..HANGUL SYLLABLE WEH - {0xC704, 0xC704, prLV}, // Lo HANGUL SYLLABLE WI - {0xC705, 0xC71F, prLVT}, // Lo [27] HANGUL SYLLABLE WIG..HANGUL SYLLABLE WIH - {0xC720, 0xC720, prLV}, // Lo HANGUL SYLLABLE YU - {0xC721, 0xC73B, prLVT}, // Lo [27] HANGUL SYLLABLE YUG..HANGUL SYLLABLE YUH - {0xC73C, 0xC73C, prLV}, // Lo HANGUL SYLLABLE EU - {0xC73D, 0xC757, prLVT}, // Lo [27] HANGUL SYLLABLE EUG..HANGUL SYLLABLE EUH - {0xC758, 0xC758, prLV}, // Lo HANGUL SYLLABLE YI - {0xC759, 0xC773, prLVT}, // Lo [27] HANGUL SYLLABLE YIG..HANGUL SYLLABLE YIH - {0xC774, 0xC774, prLV}, // Lo HANGUL SYLLABLE I - {0xC775, 0xC78F, prLVT}, // Lo [27] HANGUL SYLLABLE IG..HANGUL SYLLABLE IH - {0xC790, 0xC790, prLV}, // Lo HANGUL SYLLABLE JA - {0xC791, 0xC7AB, prLVT}, // Lo [27] HANGUL SYLLABLE JAG..HANGUL SYLLABLE JAH - {0xC7AC, 0xC7AC, prLV}, // Lo HANGUL SYLLABLE JAE - {0xC7AD, 0xC7C7, prLVT}, // Lo [27] HANGUL SYLLABLE JAEG..HANGUL SYLLABLE JAEH - {0xC7C8, 0xC7C8, prLV}, // Lo HANGUL SYLLABLE JYA - {0xC7C9, 0xC7E3, prLVT}, // Lo [27] HANGUL SYLLABLE JYAG..HANGUL SYLLABLE JYAH - {0xC7E4, 0xC7E4, prLV}, // Lo HANGUL SYLLABLE JYAE - {0xC7E5, 0xC7FF, prLVT}, // Lo [27] HANGUL SYLLABLE JYAEG..HANGUL SYLLABLE JYAEH - {0xC800, 0xC800, prLV}, // Lo HANGUL SYLLABLE JEO - {0xC801, 0xC81B, prLVT}, // Lo [27] HANGUL SYLLABLE JEOG..HANGUL SYLLABLE JEOH - {0xC81C, 0xC81C, prLV}, // Lo HANGUL SYLLABLE JE - {0xC81D, 0xC837, prLVT}, // Lo [27] HANGUL SYLLABLE JEG..HANGUL SYLLABLE JEH - {0xC838, 0xC838, prLV}, // Lo HANGUL SYLLABLE JYEO - {0xC839, 0xC853, prLVT}, // Lo [27] HANGUL SYLLABLE JYEOG..HANGUL SYLLABLE JYEOH - {0xC854, 0xC854, prLV}, // Lo HANGUL SYLLABLE JYE - {0xC855, 0xC86F, prLVT}, // Lo [27] HANGUL SYLLABLE JYEG..HANGUL SYLLABLE JYEH - {0xC870, 0xC870, prLV}, // Lo HANGUL SYLLABLE JO - {0xC871, 0xC88B, prLVT}, // Lo [27] HANGUL SYLLABLE JOG..HANGUL SYLLABLE JOH - {0xC88C, 0xC88C, prLV}, // Lo HANGUL SYLLABLE JWA - {0xC88D, 0xC8A7, prLVT}, // Lo [27] HANGUL SYLLABLE JWAG..HANGUL SYLLABLE JWAH - {0xC8A8, 0xC8A8, prLV}, // Lo HANGUL SYLLABLE JWAE - {0xC8A9, 0xC8C3, prLVT}, // Lo [27] HANGUL SYLLABLE JWAEG..HANGUL SYLLABLE JWAEH - {0xC8C4, 0xC8C4, prLV}, // Lo HANGUL SYLLABLE JOE - {0xC8C5, 0xC8DF, prLVT}, // Lo [27] HANGUL SYLLABLE JOEG..HANGUL SYLLABLE JOEH - {0xC8E0, 0xC8E0, prLV}, // Lo HANGUL SYLLABLE JYO - {0xC8E1, 0xC8FB, prLVT}, // Lo [27] HANGUL SYLLABLE JYOG..HANGUL SYLLABLE JYOH - {0xC8FC, 0xC8FC, prLV}, // Lo HANGUL SYLLABLE JU - {0xC8FD, 0xC917, prLVT}, // Lo [27] HANGUL SYLLABLE JUG..HANGUL SYLLABLE JUH - {0xC918, 0xC918, prLV}, // Lo HANGUL SYLLABLE JWEO - {0xC919, 0xC933, prLVT}, // Lo [27] HANGUL SYLLABLE JWEOG..HANGUL SYLLABLE JWEOH - {0xC934, 0xC934, prLV}, // Lo HANGUL SYLLABLE JWE - {0xC935, 0xC94F, prLVT}, // Lo [27] HANGUL SYLLABLE JWEG..HANGUL SYLLABLE JWEH - {0xC950, 0xC950, prLV}, // Lo HANGUL SYLLABLE JWI - {0xC951, 0xC96B, prLVT}, // Lo [27] HANGUL SYLLABLE JWIG..HANGUL SYLLABLE JWIH - {0xC96C, 0xC96C, prLV}, // Lo HANGUL SYLLABLE JYU - {0xC96D, 0xC987, prLVT}, // Lo [27] HANGUL SYLLABLE JYUG..HANGUL SYLLABLE JYUH - {0xC988, 0xC988, prLV}, // Lo HANGUL SYLLABLE JEU - {0xC989, 0xC9A3, prLVT}, // Lo [27] HANGUL SYLLABLE JEUG..HANGUL SYLLABLE JEUH - {0xC9A4, 0xC9A4, prLV}, // Lo HANGUL SYLLABLE JYI - {0xC9A5, 0xC9BF, prLVT}, // Lo [27] HANGUL SYLLABLE JYIG..HANGUL SYLLABLE JYIH - {0xC9C0, 0xC9C0, prLV}, // Lo HANGUL SYLLABLE JI - {0xC9C1, 0xC9DB, prLVT}, // Lo [27] HANGUL SYLLABLE JIG..HANGUL SYLLABLE JIH - {0xC9DC, 0xC9DC, prLV}, // Lo HANGUL SYLLABLE JJA - {0xC9DD, 0xC9F7, prLVT}, // Lo [27] HANGUL SYLLABLE JJAG..HANGUL SYLLABLE JJAH - {0xC9F8, 0xC9F8, prLV}, // Lo HANGUL SYLLABLE JJAE - {0xC9F9, 0xCA13, prLVT}, // Lo [27] HANGUL SYLLABLE JJAEG..HANGUL SYLLABLE JJAEH - {0xCA14, 0xCA14, prLV}, // Lo HANGUL SYLLABLE JJYA - {0xCA15, 0xCA2F, prLVT}, // Lo [27] HANGUL SYLLABLE JJYAG..HANGUL SYLLABLE JJYAH - {0xCA30, 0xCA30, prLV}, // Lo HANGUL SYLLABLE JJYAE - {0xCA31, 0xCA4B, prLVT}, // Lo [27] HANGUL SYLLABLE JJYAEG..HANGUL SYLLABLE JJYAEH - {0xCA4C, 0xCA4C, prLV}, // Lo HANGUL SYLLABLE JJEO - {0xCA4D, 0xCA67, prLVT}, // Lo [27] HANGUL SYLLABLE JJEOG..HANGUL SYLLABLE JJEOH - {0xCA68, 0xCA68, prLV}, // Lo HANGUL SYLLABLE JJE - {0xCA69, 0xCA83, prLVT}, // Lo [27] HANGUL SYLLABLE JJEG..HANGUL SYLLABLE JJEH - {0xCA84, 0xCA84, prLV}, // Lo HANGUL SYLLABLE JJYEO - {0xCA85, 0xCA9F, prLVT}, // Lo [27] HANGUL SYLLABLE JJYEOG..HANGUL SYLLABLE JJYEOH - {0xCAA0, 0xCAA0, prLV}, // Lo HANGUL SYLLABLE JJYE - {0xCAA1, 0xCABB, prLVT}, // Lo [27] HANGUL SYLLABLE JJYEG..HANGUL SYLLABLE JJYEH - {0xCABC, 0xCABC, prLV}, // Lo HANGUL SYLLABLE JJO - {0xCABD, 0xCAD7, prLVT}, // Lo [27] HANGUL SYLLABLE JJOG..HANGUL SYLLABLE JJOH - {0xCAD8, 0xCAD8, prLV}, // Lo HANGUL SYLLABLE JJWA - {0xCAD9, 0xCAF3, prLVT}, // Lo [27] HANGUL SYLLABLE JJWAG..HANGUL SYLLABLE JJWAH - {0xCAF4, 0xCAF4, prLV}, // Lo HANGUL SYLLABLE JJWAE - {0xCAF5, 0xCB0F, prLVT}, // Lo [27] HANGUL SYLLABLE JJWAEG..HANGUL SYLLABLE JJWAEH - {0xCB10, 0xCB10, prLV}, // Lo HANGUL SYLLABLE JJOE - {0xCB11, 0xCB2B, prLVT}, // Lo [27] HANGUL SYLLABLE JJOEG..HANGUL SYLLABLE JJOEH - {0xCB2C, 0xCB2C, prLV}, // Lo HANGUL SYLLABLE JJYO - {0xCB2D, 0xCB47, prLVT}, // Lo [27] HANGUL SYLLABLE JJYOG..HANGUL SYLLABLE JJYOH - {0xCB48, 0xCB48, prLV}, // Lo HANGUL SYLLABLE JJU - {0xCB49, 0xCB63, prLVT}, // Lo [27] HANGUL SYLLABLE JJUG..HANGUL SYLLABLE JJUH - {0xCB64, 0xCB64, prLV}, // Lo HANGUL SYLLABLE JJWEO - {0xCB65, 0xCB7F, prLVT}, // Lo [27] HANGUL SYLLABLE JJWEOG..HANGUL SYLLABLE JJWEOH - {0xCB80, 0xCB80, prLV}, // Lo HANGUL SYLLABLE JJWE - {0xCB81, 0xCB9B, prLVT}, // Lo [27] HANGUL SYLLABLE JJWEG..HANGUL SYLLABLE JJWEH - {0xCB9C, 0xCB9C, prLV}, // Lo HANGUL SYLLABLE JJWI - {0xCB9D, 0xCBB7, prLVT}, // Lo [27] HANGUL SYLLABLE JJWIG..HANGUL SYLLABLE JJWIH - {0xCBB8, 0xCBB8, prLV}, // Lo HANGUL SYLLABLE JJYU - {0xCBB9, 0xCBD3, prLVT}, // Lo [27] HANGUL SYLLABLE JJYUG..HANGUL SYLLABLE JJYUH - {0xCBD4, 0xCBD4, prLV}, // Lo HANGUL SYLLABLE JJEU - {0xCBD5, 0xCBEF, prLVT}, // Lo [27] HANGUL SYLLABLE JJEUG..HANGUL SYLLABLE JJEUH - {0xCBF0, 0xCBF0, prLV}, // Lo HANGUL SYLLABLE JJYI - {0xCBF1, 0xCC0B, prLVT}, // Lo [27] HANGUL SYLLABLE JJYIG..HANGUL SYLLABLE JJYIH - {0xCC0C, 0xCC0C, prLV}, // Lo HANGUL SYLLABLE JJI - {0xCC0D, 0xCC27, prLVT}, // Lo [27] HANGUL SYLLABLE JJIG..HANGUL SYLLABLE JJIH - {0xCC28, 0xCC28, prLV}, // Lo HANGUL SYLLABLE CA - {0xCC29, 0xCC43, prLVT}, // Lo [27] HANGUL SYLLABLE CAG..HANGUL SYLLABLE CAH - {0xCC44, 0xCC44, prLV}, // Lo HANGUL SYLLABLE CAE - {0xCC45, 0xCC5F, prLVT}, // Lo [27] HANGUL SYLLABLE CAEG..HANGUL SYLLABLE CAEH - {0xCC60, 0xCC60, prLV}, // Lo HANGUL SYLLABLE CYA - {0xCC61, 0xCC7B, prLVT}, // Lo [27] HANGUL SYLLABLE CYAG..HANGUL SYLLABLE CYAH - {0xCC7C, 0xCC7C, prLV}, // Lo HANGUL SYLLABLE CYAE - {0xCC7D, 0xCC97, prLVT}, // Lo [27] HANGUL SYLLABLE CYAEG..HANGUL SYLLABLE CYAEH - {0xCC98, 0xCC98, prLV}, // Lo HANGUL SYLLABLE CEO - {0xCC99, 0xCCB3, prLVT}, // Lo [27] HANGUL SYLLABLE CEOG..HANGUL SYLLABLE CEOH - {0xCCB4, 0xCCB4, prLV}, // Lo HANGUL SYLLABLE CE - {0xCCB5, 0xCCCF, prLVT}, // Lo [27] HANGUL SYLLABLE CEG..HANGUL SYLLABLE CEH - {0xCCD0, 0xCCD0, prLV}, // Lo HANGUL SYLLABLE CYEO - {0xCCD1, 0xCCEB, prLVT}, // Lo [27] HANGUL SYLLABLE CYEOG..HANGUL SYLLABLE CYEOH - {0xCCEC, 0xCCEC, prLV}, // Lo HANGUL SYLLABLE CYE - {0xCCED, 0xCD07, prLVT}, // Lo [27] HANGUL SYLLABLE CYEG..HANGUL SYLLABLE CYEH - {0xCD08, 0xCD08, prLV}, // Lo HANGUL SYLLABLE CO - {0xCD09, 0xCD23, prLVT}, // Lo [27] HANGUL SYLLABLE COG..HANGUL SYLLABLE COH - {0xCD24, 0xCD24, prLV}, // Lo HANGUL SYLLABLE CWA - {0xCD25, 0xCD3F, prLVT}, // Lo [27] HANGUL SYLLABLE CWAG..HANGUL SYLLABLE CWAH - {0xCD40, 0xCD40, prLV}, // Lo HANGUL SYLLABLE CWAE - {0xCD41, 0xCD5B, prLVT}, // Lo [27] HANGUL SYLLABLE CWAEG..HANGUL SYLLABLE CWAEH - {0xCD5C, 0xCD5C, prLV}, // Lo HANGUL SYLLABLE COE - {0xCD5D, 0xCD77, prLVT}, // Lo [27] HANGUL SYLLABLE COEG..HANGUL SYLLABLE COEH - {0xCD78, 0xCD78, prLV}, // Lo HANGUL SYLLABLE CYO - {0xCD79, 0xCD93, prLVT}, // Lo [27] HANGUL SYLLABLE CYOG..HANGUL SYLLABLE CYOH - {0xCD94, 0xCD94, prLV}, // Lo HANGUL SYLLABLE CU - {0xCD95, 0xCDAF, prLVT}, // Lo [27] HANGUL SYLLABLE CUG..HANGUL SYLLABLE CUH - {0xCDB0, 0xCDB0, prLV}, // Lo HANGUL SYLLABLE CWEO - {0xCDB1, 0xCDCB, prLVT}, // Lo [27] HANGUL SYLLABLE CWEOG..HANGUL SYLLABLE CWEOH - {0xCDCC, 0xCDCC, prLV}, // Lo HANGUL SYLLABLE CWE - {0xCDCD, 0xCDE7, prLVT}, // Lo [27] HANGUL SYLLABLE CWEG..HANGUL SYLLABLE CWEH - {0xCDE8, 0xCDE8, prLV}, // Lo HANGUL SYLLABLE CWI - {0xCDE9, 0xCE03, prLVT}, // Lo [27] HANGUL SYLLABLE CWIG..HANGUL SYLLABLE CWIH - {0xCE04, 0xCE04, prLV}, // Lo HANGUL SYLLABLE CYU - {0xCE05, 0xCE1F, prLVT}, // Lo [27] HANGUL SYLLABLE CYUG..HANGUL SYLLABLE CYUH - {0xCE20, 0xCE20, prLV}, // Lo HANGUL SYLLABLE CEU - {0xCE21, 0xCE3B, prLVT}, // Lo [27] HANGUL SYLLABLE CEUG..HANGUL SYLLABLE CEUH - {0xCE3C, 0xCE3C, prLV}, // Lo HANGUL SYLLABLE CYI - {0xCE3D, 0xCE57, prLVT}, // Lo [27] HANGUL SYLLABLE CYIG..HANGUL SYLLABLE CYIH - {0xCE58, 0xCE58, prLV}, // Lo HANGUL SYLLABLE CI - {0xCE59, 0xCE73, prLVT}, // Lo [27] HANGUL SYLLABLE CIG..HANGUL SYLLABLE CIH - {0xCE74, 0xCE74, prLV}, // Lo HANGUL SYLLABLE KA - {0xCE75, 0xCE8F, prLVT}, // Lo [27] HANGUL SYLLABLE KAG..HANGUL SYLLABLE KAH - {0xCE90, 0xCE90, prLV}, // Lo HANGUL SYLLABLE KAE - {0xCE91, 0xCEAB, prLVT}, // Lo [27] HANGUL SYLLABLE KAEG..HANGUL SYLLABLE KAEH - {0xCEAC, 0xCEAC, prLV}, // Lo HANGUL SYLLABLE KYA - {0xCEAD, 0xCEC7, prLVT}, // Lo [27] HANGUL SYLLABLE KYAG..HANGUL SYLLABLE KYAH - {0xCEC8, 0xCEC8, prLV}, // Lo HANGUL SYLLABLE KYAE - {0xCEC9, 0xCEE3, prLVT}, // Lo [27] HANGUL SYLLABLE KYAEG..HANGUL SYLLABLE KYAEH - {0xCEE4, 0xCEE4, prLV}, // Lo HANGUL SYLLABLE KEO - {0xCEE5, 0xCEFF, prLVT}, // Lo [27] HANGUL SYLLABLE KEOG..HANGUL SYLLABLE KEOH - {0xCF00, 0xCF00, prLV}, // Lo HANGUL SYLLABLE KE - {0xCF01, 0xCF1B, prLVT}, // Lo [27] HANGUL SYLLABLE KEG..HANGUL SYLLABLE KEH - {0xCF1C, 0xCF1C, prLV}, // Lo HANGUL SYLLABLE KYEO - {0xCF1D, 0xCF37, prLVT}, // Lo [27] HANGUL SYLLABLE KYEOG..HANGUL SYLLABLE KYEOH - {0xCF38, 0xCF38, prLV}, // Lo HANGUL SYLLABLE KYE - {0xCF39, 0xCF53, prLVT}, // Lo [27] HANGUL SYLLABLE KYEG..HANGUL SYLLABLE KYEH - {0xCF54, 0xCF54, prLV}, // Lo HANGUL SYLLABLE KO - {0xCF55, 0xCF6F, prLVT}, // Lo [27] HANGUL SYLLABLE KOG..HANGUL SYLLABLE KOH - {0xCF70, 0xCF70, prLV}, // Lo HANGUL SYLLABLE KWA - {0xCF71, 0xCF8B, prLVT}, // Lo [27] HANGUL SYLLABLE KWAG..HANGUL SYLLABLE KWAH - {0xCF8C, 0xCF8C, prLV}, // Lo HANGUL SYLLABLE KWAE - {0xCF8D, 0xCFA7, prLVT}, // Lo [27] HANGUL SYLLABLE KWAEG..HANGUL SYLLABLE KWAEH - {0xCFA8, 0xCFA8, prLV}, // Lo HANGUL SYLLABLE KOE - {0xCFA9, 0xCFC3, prLVT}, // Lo [27] HANGUL SYLLABLE KOEG..HANGUL SYLLABLE KOEH - {0xCFC4, 0xCFC4, prLV}, // Lo HANGUL SYLLABLE KYO - {0xCFC5, 0xCFDF, prLVT}, // Lo [27] HANGUL SYLLABLE KYOG..HANGUL SYLLABLE KYOH - {0xCFE0, 0xCFE0, prLV}, // Lo HANGUL SYLLABLE KU - {0xCFE1, 0xCFFB, prLVT}, // Lo [27] HANGUL SYLLABLE KUG..HANGUL SYLLABLE KUH - {0xCFFC, 0xCFFC, prLV}, // Lo HANGUL SYLLABLE KWEO - {0xCFFD, 0xD017, prLVT}, // Lo [27] HANGUL SYLLABLE KWEOG..HANGUL SYLLABLE KWEOH - {0xD018, 0xD018, prLV}, // Lo HANGUL SYLLABLE KWE - {0xD019, 0xD033, prLVT}, // Lo [27] HANGUL SYLLABLE KWEG..HANGUL SYLLABLE KWEH - {0xD034, 0xD034, prLV}, // Lo HANGUL SYLLABLE KWI - {0xD035, 0xD04F, prLVT}, // Lo [27] HANGUL SYLLABLE KWIG..HANGUL SYLLABLE KWIH - {0xD050, 0xD050, prLV}, // Lo HANGUL SYLLABLE KYU - {0xD051, 0xD06B, prLVT}, // Lo [27] HANGUL SYLLABLE KYUG..HANGUL SYLLABLE KYUH - {0xD06C, 0xD06C, prLV}, // Lo HANGUL SYLLABLE KEU - {0xD06D, 0xD087, prLVT}, // Lo [27] HANGUL SYLLABLE KEUG..HANGUL SYLLABLE KEUH - {0xD088, 0xD088, prLV}, // Lo HANGUL SYLLABLE KYI - {0xD089, 0xD0A3, prLVT}, // Lo [27] HANGUL SYLLABLE KYIG..HANGUL SYLLABLE KYIH - {0xD0A4, 0xD0A4, prLV}, // Lo HANGUL SYLLABLE KI - {0xD0A5, 0xD0BF, prLVT}, // Lo [27] HANGUL SYLLABLE KIG..HANGUL SYLLABLE KIH - {0xD0C0, 0xD0C0, prLV}, // Lo HANGUL SYLLABLE TA - {0xD0C1, 0xD0DB, prLVT}, // Lo [27] HANGUL SYLLABLE TAG..HANGUL SYLLABLE TAH - {0xD0DC, 0xD0DC, prLV}, // Lo HANGUL SYLLABLE TAE - {0xD0DD, 0xD0F7, prLVT}, // Lo [27] HANGUL SYLLABLE TAEG..HANGUL SYLLABLE TAEH - {0xD0F8, 0xD0F8, prLV}, // Lo HANGUL SYLLABLE TYA - {0xD0F9, 0xD113, prLVT}, // Lo [27] HANGUL SYLLABLE TYAG..HANGUL SYLLABLE TYAH - {0xD114, 0xD114, prLV}, // Lo HANGUL SYLLABLE TYAE - {0xD115, 0xD12F, prLVT}, // Lo [27] HANGUL SYLLABLE TYAEG..HANGUL SYLLABLE TYAEH - {0xD130, 0xD130, prLV}, // Lo HANGUL SYLLABLE TEO - {0xD131, 0xD14B, prLVT}, // Lo [27] HANGUL SYLLABLE TEOG..HANGUL SYLLABLE TEOH - {0xD14C, 0xD14C, prLV}, // Lo HANGUL SYLLABLE TE - {0xD14D, 0xD167, prLVT}, // Lo [27] HANGUL SYLLABLE TEG..HANGUL SYLLABLE TEH - {0xD168, 0xD168, prLV}, // Lo HANGUL SYLLABLE TYEO - {0xD169, 0xD183, prLVT}, // Lo [27] HANGUL SYLLABLE TYEOG..HANGUL SYLLABLE TYEOH - {0xD184, 0xD184, prLV}, // Lo HANGUL SYLLABLE TYE - {0xD185, 0xD19F, prLVT}, // Lo [27] HANGUL SYLLABLE TYEG..HANGUL SYLLABLE TYEH - {0xD1A0, 0xD1A0, prLV}, // Lo HANGUL SYLLABLE TO - {0xD1A1, 0xD1BB, prLVT}, // Lo [27] HANGUL SYLLABLE TOG..HANGUL SYLLABLE TOH - {0xD1BC, 0xD1BC, prLV}, // Lo HANGUL SYLLABLE TWA - {0xD1BD, 0xD1D7, prLVT}, // Lo [27] HANGUL SYLLABLE TWAG..HANGUL SYLLABLE TWAH - {0xD1D8, 0xD1D8, prLV}, // Lo HANGUL SYLLABLE TWAE - {0xD1D9, 0xD1F3, prLVT}, // Lo [27] HANGUL SYLLABLE TWAEG..HANGUL SYLLABLE TWAEH - {0xD1F4, 0xD1F4, prLV}, // Lo HANGUL SYLLABLE TOE - {0xD1F5, 0xD20F, prLVT}, // Lo [27] HANGUL SYLLABLE TOEG..HANGUL SYLLABLE TOEH - {0xD210, 0xD210, prLV}, // Lo HANGUL SYLLABLE TYO - {0xD211, 0xD22B, prLVT}, // Lo [27] HANGUL SYLLABLE TYOG..HANGUL SYLLABLE TYOH - {0xD22C, 0xD22C, prLV}, // Lo HANGUL SYLLABLE TU - {0xD22D, 0xD247, prLVT}, // Lo [27] HANGUL SYLLABLE TUG..HANGUL SYLLABLE TUH - {0xD248, 0xD248, prLV}, // Lo HANGUL SYLLABLE TWEO - {0xD249, 0xD263, prLVT}, // Lo [27] HANGUL SYLLABLE TWEOG..HANGUL SYLLABLE TWEOH - {0xD264, 0xD264, prLV}, // Lo HANGUL SYLLABLE TWE - {0xD265, 0xD27F, prLVT}, // Lo [27] HANGUL SYLLABLE TWEG..HANGUL SYLLABLE TWEH - {0xD280, 0xD280, prLV}, // Lo HANGUL SYLLABLE TWI - {0xD281, 0xD29B, prLVT}, // Lo [27] HANGUL SYLLABLE TWIG..HANGUL SYLLABLE TWIH - {0xD29C, 0xD29C, prLV}, // Lo HANGUL SYLLABLE TYU - {0xD29D, 0xD2B7, prLVT}, // Lo [27] HANGUL SYLLABLE TYUG..HANGUL SYLLABLE TYUH - {0xD2B8, 0xD2B8, prLV}, // Lo HANGUL SYLLABLE TEU - {0xD2B9, 0xD2D3, prLVT}, // Lo [27] HANGUL SYLLABLE TEUG..HANGUL SYLLABLE TEUH - {0xD2D4, 0xD2D4, prLV}, // Lo HANGUL SYLLABLE TYI - {0xD2D5, 0xD2EF, prLVT}, // Lo [27] HANGUL SYLLABLE TYIG..HANGUL SYLLABLE TYIH - {0xD2F0, 0xD2F0, prLV}, // Lo HANGUL SYLLABLE TI - {0xD2F1, 0xD30B, prLVT}, // Lo [27] HANGUL SYLLABLE TIG..HANGUL SYLLABLE TIH - {0xD30C, 0xD30C, prLV}, // Lo HANGUL SYLLABLE PA - {0xD30D, 0xD327, prLVT}, // Lo [27] HANGUL SYLLABLE PAG..HANGUL SYLLABLE PAH - {0xD328, 0xD328, prLV}, // Lo HANGUL SYLLABLE PAE - {0xD329, 0xD343, prLVT}, // Lo [27] HANGUL SYLLABLE PAEG..HANGUL SYLLABLE PAEH - {0xD344, 0xD344, prLV}, // Lo HANGUL SYLLABLE PYA - {0xD345, 0xD35F, prLVT}, // Lo [27] HANGUL SYLLABLE PYAG..HANGUL SYLLABLE PYAH - {0xD360, 0xD360, prLV}, // Lo HANGUL SYLLABLE PYAE - {0xD361, 0xD37B, prLVT}, // Lo [27] HANGUL SYLLABLE PYAEG..HANGUL SYLLABLE PYAEH - {0xD37C, 0xD37C, prLV}, // Lo HANGUL SYLLABLE PEO - {0xD37D, 0xD397, prLVT}, // Lo [27] HANGUL SYLLABLE PEOG..HANGUL SYLLABLE PEOH - {0xD398, 0xD398, prLV}, // Lo HANGUL SYLLABLE PE - {0xD399, 0xD3B3, prLVT}, // Lo [27] HANGUL SYLLABLE PEG..HANGUL SYLLABLE PEH - {0xD3B4, 0xD3B4, prLV}, // Lo HANGUL SYLLABLE PYEO - {0xD3B5, 0xD3CF, prLVT}, // Lo [27] HANGUL SYLLABLE PYEOG..HANGUL SYLLABLE PYEOH - {0xD3D0, 0xD3D0, prLV}, // Lo HANGUL SYLLABLE PYE - {0xD3D1, 0xD3EB, prLVT}, // Lo [27] HANGUL SYLLABLE PYEG..HANGUL SYLLABLE PYEH - {0xD3EC, 0xD3EC, prLV}, // Lo HANGUL SYLLABLE PO - {0xD3ED, 0xD407, prLVT}, // Lo [27] HANGUL SYLLABLE POG..HANGUL SYLLABLE POH - {0xD408, 0xD408, prLV}, // Lo HANGUL SYLLABLE PWA - {0xD409, 0xD423, prLVT}, // Lo [27] HANGUL SYLLABLE PWAG..HANGUL SYLLABLE PWAH - {0xD424, 0xD424, prLV}, // Lo HANGUL SYLLABLE PWAE - {0xD425, 0xD43F, prLVT}, // Lo [27] HANGUL SYLLABLE PWAEG..HANGUL SYLLABLE PWAEH - {0xD440, 0xD440, prLV}, // Lo HANGUL SYLLABLE POE - {0xD441, 0xD45B, prLVT}, // Lo [27] HANGUL SYLLABLE POEG..HANGUL SYLLABLE POEH - {0xD45C, 0xD45C, prLV}, // Lo HANGUL SYLLABLE PYO - {0xD45D, 0xD477, prLVT}, // Lo [27] HANGUL SYLLABLE PYOG..HANGUL SYLLABLE PYOH - {0xD478, 0xD478, prLV}, // Lo HANGUL SYLLABLE PU - {0xD479, 0xD493, prLVT}, // Lo [27] HANGUL SYLLABLE PUG..HANGUL SYLLABLE PUH - {0xD494, 0xD494, prLV}, // Lo HANGUL SYLLABLE PWEO - {0xD495, 0xD4AF, prLVT}, // Lo [27] HANGUL SYLLABLE PWEOG..HANGUL SYLLABLE PWEOH - {0xD4B0, 0xD4B0, prLV}, // Lo HANGUL SYLLABLE PWE - {0xD4B1, 0xD4CB, prLVT}, // Lo [27] HANGUL SYLLABLE PWEG..HANGUL SYLLABLE PWEH - {0xD4CC, 0xD4CC, prLV}, // Lo HANGUL SYLLABLE PWI - {0xD4CD, 0xD4E7, prLVT}, // Lo [27] HANGUL SYLLABLE PWIG..HANGUL SYLLABLE PWIH - {0xD4E8, 0xD4E8, prLV}, // Lo HANGUL SYLLABLE PYU - {0xD4E9, 0xD503, prLVT}, // Lo [27] HANGUL SYLLABLE PYUG..HANGUL SYLLABLE PYUH - {0xD504, 0xD504, prLV}, // Lo HANGUL SYLLABLE PEU - {0xD505, 0xD51F, prLVT}, // Lo [27] HANGUL SYLLABLE PEUG..HANGUL SYLLABLE PEUH - {0xD520, 0xD520, prLV}, // Lo HANGUL SYLLABLE PYI - {0xD521, 0xD53B, prLVT}, // Lo [27] HANGUL SYLLABLE PYIG..HANGUL SYLLABLE PYIH - {0xD53C, 0xD53C, prLV}, // Lo HANGUL SYLLABLE PI - {0xD53D, 0xD557, prLVT}, // Lo [27] HANGUL SYLLABLE PIG..HANGUL SYLLABLE PIH - {0xD558, 0xD558, prLV}, // Lo HANGUL SYLLABLE HA - {0xD559, 0xD573, prLVT}, // Lo [27] HANGUL SYLLABLE HAG..HANGUL SYLLABLE HAH - {0xD574, 0xD574, prLV}, // Lo HANGUL SYLLABLE HAE - {0xD575, 0xD58F, prLVT}, // Lo [27] HANGUL SYLLABLE HAEG..HANGUL SYLLABLE HAEH - {0xD590, 0xD590, prLV}, // Lo HANGUL SYLLABLE HYA - {0xD591, 0xD5AB, prLVT}, // Lo [27] HANGUL SYLLABLE HYAG..HANGUL SYLLABLE HYAH - {0xD5AC, 0xD5AC, prLV}, // Lo HANGUL SYLLABLE HYAE - {0xD5AD, 0xD5C7, prLVT}, // Lo [27] HANGUL SYLLABLE HYAEG..HANGUL SYLLABLE HYAEH - {0xD5C8, 0xD5C8, prLV}, // Lo HANGUL SYLLABLE HEO - {0xD5C9, 0xD5E3, prLVT}, // Lo [27] HANGUL SYLLABLE HEOG..HANGUL SYLLABLE HEOH - {0xD5E4, 0xD5E4, prLV}, // Lo HANGUL SYLLABLE HE - {0xD5E5, 0xD5FF, prLVT}, // Lo [27] HANGUL SYLLABLE HEG..HANGUL SYLLABLE HEH - {0xD600, 0xD600, prLV}, // Lo HANGUL SYLLABLE HYEO - {0xD601, 0xD61B, prLVT}, // Lo [27] HANGUL SYLLABLE HYEOG..HANGUL SYLLABLE HYEOH - {0xD61C, 0xD61C, prLV}, // Lo HANGUL SYLLABLE HYE - {0xD61D, 0xD637, prLVT}, // Lo [27] HANGUL SYLLABLE HYEG..HANGUL SYLLABLE HYEH - {0xD638, 0xD638, prLV}, // Lo HANGUL SYLLABLE HO - {0xD639, 0xD653, prLVT}, // Lo [27] HANGUL SYLLABLE HOG..HANGUL SYLLABLE HOH - {0xD654, 0xD654, prLV}, // Lo HANGUL SYLLABLE HWA - {0xD655, 0xD66F, prLVT}, // Lo [27] HANGUL SYLLABLE HWAG..HANGUL SYLLABLE HWAH - {0xD670, 0xD670, prLV}, // Lo HANGUL SYLLABLE HWAE - {0xD671, 0xD68B, prLVT}, // Lo [27] HANGUL SYLLABLE HWAEG..HANGUL SYLLABLE HWAEH - {0xD68C, 0xD68C, prLV}, // Lo HANGUL SYLLABLE HOE - {0xD68D, 0xD6A7, prLVT}, // Lo [27] HANGUL SYLLABLE HOEG..HANGUL SYLLABLE HOEH - {0xD6A8, 0xD6A8, prLV}, // Lo HANGUL SYLLABLE HYO - {0xD6A9, 0xD6C3, prLVT}, // Lo [27] HANGUL SYLLABLE HYOG..HANGUL SYLLABLE HYOH - {0xD6C4, 0xD6C4, prLV}, // Lo HANGUL SYLLABLE HU - {0xD6C5, 0xD6DF, prLVT}, // Lo [27] HANGUL SYLLABLE HUG..HANGUL SYLLABLE HUH - {0xD6E0, 0xD6E0, prLV}, // Lo HANGUL SYLLABLE HWEO - {0xD6E1, 0xD6FB, prLVT}, // Lo [27] HANGUL SYLLABLE HWEOG..HANGUL SYLLABLE HWEOH - {0xD6FC, 0xD6FC, prLV}, // Lo HANGUL SYLLABLE HWE - {0xD6FD, 0xD717, prLVT}, // Lo [27] HANGUL SYLLABLE HWEG..HANGUL SYLLABLE HWEH - {0xD718, 0xD718, prLV}, // Lo HANGUL SYLLABLE HWI - {0xD719, 0xD733, prLVT}, // Lo [27] HANGUL SYLLABLE HWIG..HANGUL SYLLABLE HWIH - {0xD734, 0xD734, prLV}, // Lo HANGUL SYLLABLE HYU - {0xD735, 0xD74F, prLVT}, // Lo [27] HANGUL SYLLABLE HYUG..HANGUL SYLLABLE HYUH - {0xD750, 0xD750, prLV}, // Lo HANGUL SYLLABLE HEU - {0xD751, 0xD76B, prLVT}, // Lo [27] HANGUL SYLLABLE HEUG..HANGUL SYLLABLE HEUH - {0xD76C, 0xD76C, prLV}, // Lo HANGUL SYLLABLE HYI - {0xD76D, 0xD787, prLVT}, // Lo [27] HANGUL SYLLABLE HYIG..HANGUL SYLLABLE HYIH - {0xD788, 0xD788, prLV}, // Lo HANGUL SYLLABLE HI - {0xD789, 0xD7A3, prLVT}, // Lo [27] HANGUL SYLLABLE HIG..HANGUL SYLLABLE HIH - {0xD7B0, 0xD7C6, prV}, // Lo [23] HANGUL JUNGSEONG O-YEO..HANGUL JUNGSEONG ARAEA-E - {0xD7CB, 0xD7FB, prT}, // Lo [49] HANGUL JONGSEONG NIEUN-RIEUL..HANGUL JONGSEONG PHIEUPH-THIEUTH - {0xFB1E, 0xFB1E, prExtend}, // Mn HEBREW POINT JUDEO-SPANISH VARIKA - {0xFE00, 0xFE0F, prExtend}, // Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16 - {0xFE20, 0xFE2F, prExtend}, // Mn [16] COMBINING LIGATURE LEFT HALF..COMBINING CYRILLIC TITLO RIGHT HALF - {0xFEFF, 0xFEFF, prControl}, // Cf ZERO WIDTH NO-BREAK SPACE - {0xFF9E, 0xFF9F, prExtend}, // Lm [2] HALFWIDTH KATAKANA VOICED SOUND MARK..HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK - {0xFFF0, 0xFFF8, prControl}, // Cn [9] .. - {0xFFF9, 0xFFFB, prControl}, // Cf [3] INTERLINEAR ANNOTATION ANCHOR..INTERLINEAR ANNOTATION TERMINATOR - {0x101FD, 0x101FD, prExtend}, // Mn PHAISTOS DISC SIGN COMBINING OBLIQUE STROKE - {0x102E0, 0x102E0, prExtend}, // Mn COPTIC EPACT THOUSANDS MARK - {0x10376, 0x1037A, prExtend}, // Mn [5] COMBINING OLD PERMIC LETTER AN..COMBINING OLD PERMIC LETTER SII - {0x10A01, 0x10A03, prExtend}, // Mn [3] KHAROSHTHI VOWEL SIGN I..KHAROSHTHI VOWEL SIGN VOCALIC R - {0x10A05, 0x10A06, prExtend}, // Mn [2] KHAROSHTHI VOWEL SIGN E..KHAROSHTHI VOWEL SIGN O - {0x10A0C, 0x10A0F, prExtend}, // Mn [4] KHAROSHTHI VOWEL LENGTH MARK..KHAROSHTHI SIGN VISARGA - {0x10A38, 0x10A3A, prExtend}, // Mn [3] KHAROSHTHI SIGN BAR ABOVE..KHAROSHTHI SIGN DOT BELOW - {0x10A3F, 0x10A3F, prExtend}, // Mn KHAROSHTHI VIRAMA - {0x10AE5, 0x10AE6, prExtend}, // Mn [2] MANICHAEAN ABBREVIATION MARK ABOVE..MANICHAEAN ABBREVIATION MARK BELOW - {0x10D24, 0x10D27, prExtend}, // Mn [4] HANIFI ROHINGYA SIGN HARBAHAY..HANIFI ROHINGYA SIGN TASSI - {0x10F46, 0x10F50, prExtend}, // Mn [11] SOGDIAN COMBINING DOT BELOW..SOGDIAN COMBINING STROKE BELOW - {0x11000, 0x11000, prSpacingMark}, // Mc BRAHMI SIGN CANDRABINDU - {0x11001, 0x11001, prExtend}, // Mn BRAHMI SIGN ANUSVARA - {0x11002, 0x11002, prSpacingMark}, // Mc BRAHMI SIGN VISARGA - {0x11038, 0x11046, prExtend}, // Mn [15] BRAHMI VOWEL SIGN AA..BRAHMI VIRAMA - {0x1107F, 0x11081, prExtend}, // Mn [3] BRAHMI NUMBER JOINER..KAITHI SIGN ANUSVARA - {0x11082, 0x11082, prSpacingMark}, // Mc KAITHI SIGN VISARGA - {0x110B0, 0x110B2, prSpacingMark}, // Mc [3] KAITHI VOWEL SIGN AA..KAITHI VOWEL SIGN II - {0x110B3, 0x110B6, prExtend}, // Mn [4] KAITHI VOWEL SIGN U..KAITHI VOWEL SIGN AI - {0x110B7, 0x110B8, prSpacingMark}, // Mc [2] KAITHI VOWEL SIGN O..KAITHI VOWEL SIGN AU - {0x110B9, 0x110BA, prExtend}, // Mn [2] KAITHI SIGN VIRAMA..KAITHI SIGN NUKTA - {0x110BD, 0x110BD, prPreprend}, // Cf KAITHI NUMBER SIGN - {0x110CD, 0x110CD, prPreprend}, // Cf KAITHI NUMBER SIGN ABOVE - {0x11100, 0x11102, prExtend}, // Mn [3] CHAKMA SIGN CANDRABINDU..CHAKMA SIGN VISARGA - {0x11127, 0x1112B, prExtend}, // Mn [5] CHAKMA VOWEL SIGN A..CHAKMA VOWEL SIGN UU - {0x1112C, 0x1112C, prSpacingMark}, // Mc CHAKMA VOWEL SIGN E - {0x1112D, 0x11134, prExtend}, // Mn [8] CHAKMA VOWEL SIGN AI..CHAKMA MAAYYAA - {0x11145, 0x11146, prSpacingMark}, // Mc [2] CHAKMA VOWEL SIGN AA..CHAKMA VOWEL SIGN EI - {0x11173, 0x11173, prExtend}, // Mn MAHAJANI SIGN NUKTA - {0x11180, 0x11181, prExtend}, // Mn [2] SHARADA SIGN CANDRABINDU..SHARADA SIGN ANUSVARA - {0x11182, 0x11182, prSpacingMark}, // Mc SHARADA SIGN VISARGA - {0x111B3, 0x111B5, prSpacingMark}, // Mc [3] SHARADA VOWEL SIGN AA..SHARADA VOWEL SIGN II - {0x111B6, 0x111BE, prExtend}, // Mn [9] SHARADA VOWEL SIGN U..SHARADA VOWEL SIGN O - {0x111BF, 0x111C0, prSpacingMark}, // Mc [2] SHARADA VOWEL SIGN AU..SHARADA SIGN VIRAMA - {0x111C2, 0x111C3, prPreprend}, // Lo [2] SHARADA SIGN JIHVAMULIYA..SHARADA SIGN UPADHMANIYA - {0x111C9, 0x111CC, prExtend}, // Mn [4] SHARADA SANDHI MARK..SHARADA EXTRA SHORT VOWEL MARK - {0x1122C, 0x1122E, prSpacingMark}, // Mc [3] KHOJKI VOWEL SIGN AA..KHOJKI VOWEL SIGN II - {0x1122F, 0x11231, prExtend}, // Mn [3] KHOJKI VOWEL SIGN U..KHOJKI VOWEL SIGN AI - {0x11232, 0x11233, prSpacingMark}, // Mc [2] KHOJKI VOWEL SIGN O..KHOJKI VOWEL SIGN AU - {0x11234, 0x11234, prExtend}, // Mn KHOJKI SIGN ANUSVARA - {0x11235, 0x11235, prSpacingMark}, // Mc KHOJKI SIGN VIRAMA - {0x11236, 0x11237, prExtend}, // Mn [2] KHOJKI SIGN NUKTA..KHOJKI SIGN SHADDA - {0x1123E, 0x1123E, prExtend}, // Mn KHOJKI SIGN SUKUN - {0x112DF, 0x112DF, prExtend}, // Mn KHUDAWADI SIGN ANUSVARA - {0x112E0, 0x112E2, prSpacingMark}, // Mc [3] KHUDAWADI VOWEL SIGN AA..KHUDAWADI VOWEL SIGN II - {0x112E3, 0x112EA, prExtend}, // Mn [8] KHUDAWADI VOWEL SIGN U..KHUDAWADI SIGN VIRAMA - {0x11300, 0x11301, prExtend}, // Mn [2] GRANTHA SIGN COMBINING ANUSVARA ABOVE..GRANTHA SIGN CANDRABINDU - {0x11302, 0x11303, prSpacingMark}, // Mc [2] GRANTHA SIGN ANUSVARA..GRANTHA SIGN VISARGA - {0x1133B, 0x1133C, prExtend}, // Mn [2] COMBINING BINDU BELOW..GRANTHA SIGN NUKTA - {0x1133E, 0x1133E, prExtend}, // Mc GRANTHA VOWEL SIGN AA - {0x1133F, 0x1133F, prSpacingMark}, // Mc GRANTHA VOWEL SIGN I - {0x11340, 0x11340, prExtend}, // Mn GRANTHA VOWEL SIGN II - {0x11341, 0x11344, prSpacingMark}, // Mc [4] GRANTHA VOWEL SIGN U..GRANTHA VOWEL SIGN VOCALIC RR - {0x11347, 0x11348, prSpacingMark}, // Mc [2] GRANTHA VOWEL SIGN EE..GRANTHA VOWEL SIGN AI - {0x1134B, 0x1134D, prSpacingMark}, // Mc [3] GRANTHA VOWEL SIGN OO..GRANTHA SIGN VIRAMA - {0x11357, 0x11357, prExtend}, // Mc GRANTHA AU LENGTH MARK - {0x11362, 0x11363, prSpacingMark}, // Mc [2] GRANTHA VOWEL SIGN VOCALIC L..GRANTHA VOWEL SIGN VOCALIC LL - {0x11366, 0x1136C, prExtend}, // Mn [7] COMBINING GRANTHA DIGIT ZERO..COMBINING GRANTHA DIGIT SIX - {0x11370, 0x11374, prExtend}, // Mn [5] COMBINING GRANTHA LETTER A..COMBINING GRANTHA LETTER PA - {0x11435, 0x11437, prSpacingMark}, // Mc [3] NEWA VOWEL SIGN AA..NEWA VOWEL SIGN II - {0x11438, 0x1143F, prExtend}, // Mn [8] NEWA VOWEL SIGN U..NEWA VOWEL SIGN AI - {0x11440, 0x11441, prSpacingMark}, // Mc [2] NEWA VOWEL SIGN O..NEWA VOWEL SIGN AU - {0x11442, 0x11444, prExtend}, // Mn [3] NEWA SIGN VIRAMA..NEWA SIGN ANUSVARA - {0x11445, 0x11445, prSpacingMark}, // Mc NEWA SIGN VISARGA - {0x11446, 0x11446, prExtend}, // Mn NEWA SIGN NUKTA - {0x1145E, 0x1145E, prExtend}, // Mn NEWA SANDHI MARK - {0x114B0, 0x114B0, prExtend}, // Mc TIRHUTA VOWEL SIGN AA - {0x114B1, 0x114B2, prSpacingMark}, // Mc [2] TIRHUTA VOWEL SIGN I..TIRHUTA VOWEL SIGN II - {0x114B3, 0x114B8, prExtend}, // Mn [6] TIRHUTA VOWEL SIGN U..TIRHUTA VOWEL SIGN VOCALIC LL - {0x114B9, 0x114B9, prSpacingMark}, // Mc TIRHUTA VOWEL SIGN E - {0x114BA, 0x114BA, prExtend}, // Mn TIRHUTA VOWEL SIGN SHORT E - {0x114BB, 0x114BC, prSpacingMark}, // Mc [2] TIRHUTA VOWEL SIGN AI..TIRHUTA VOWEL SIGN O - {0x114BD, 0x114BD, prExtend}, // Mc TIRHUTA VOWEL SIGN SHORT O - {0x114BE, 0x114BE, prSpacingMark}, // Mc TIRHUTA VOWEL SIGN AU - {0x114BF, 0x114C0, prExtend}, // Mn [2] TIRHUTA SIGN CANDRABINDU..TIRHUTA SIGN ANUSVARA - {0x114C1, 0x114C1, prSpacingMark}, // Mc TIRHUTA SIGN VISARGA - {0x114C2, 0x114C3, prExtend}, // Mn [2] TIRHUTA SIGN VIRAMA..TIRHUTA SIGN NUKTA - {0x115AF, 0x115AF, prExtend}, // Mc SIDDHAM VOWEL SIGN AA - {0x115B0, 0x115B1, prSpacingMark}, // Mc [2] SIDDHAM VOWEL SIGN I..SIDDHAM VOWEL SIGN II - {0x115B2, 0x115B5, prExtend}, // Mn [4] SIDDHAM VOWEL SIGN U..SIDDHAM VOWEL SIGN VOCALIC RR - {0x115B8, 0x115BB, prSpacingMark}, // Mc [4] SIDDHAM VOWEL SIGN E..SIDDHAM VOWEL SIGN AU - {0x115BC, 0x115BD, prExtend}, // Mn [2] SIDDHAM SIGN CANDRABINDU..SIDDHAM SIGN ANUSVARA - {0x115BE, 0x115BE, prSpacingMark}, // Mc SIDDHAM SIGN VISARGA - {0x115BF, 0x115C0, prExtend}, // Mn [2] SIDDHAM SIGN VIRAMA..SIDDHAM SIGN NUKTA - {0x115DC, 0x115DD, prExtend}, // Mn [2] SIDDHAM VOWEL SIGN ALTERNATE U..SIDDHAM VOWEL SIGN ALTERNATE UU - {0x11630, 0x11632, prSpacingMark}, // Mc [3] MODI VOWEL SIGN AA..MODI VOWEL SIGN II - {0x11633, 0x1163A, prExtend}, // Mn [8] MODI VOWEL SIGN U..MODI VOWEL SIGN AI - {0x1163B, 0x1163C, prSpacingMark}, // Mc [2] MODI VOWEL SIGN O..MODI VOWEL SIGN AU - {0x1163D, 0x1163D, prExtend}, // Mn MODI SIGN ANUSVARA - {0x1163E, 0x1163E, prSpacingMark}, // Mc MODI SIGN VISARGA - {0x1163F, 0x11640, prExtend}, // Mn [2] MODI SIGN VIRAMA..MODI SIGN ARDHACANDRA - {0x116AB, 0x116AB, prExtend}, // Mn TAKRI SIGN ANUSVARA - {0x116AC, 0x116AC, prSpacingMark}, // Mc TAKRI SIGN VISARGA - {0x116AD, 0x116AD, prExtend}, // Mn TAKRI VOWEL SIGN AA - {0x116AE, 0x116AF, prSpacingMark}, // Mc [2] TAKRI VOWEL SIGN I..TAKRI VOWEL SIGN II - {0x116B0, 0x116B5, prExtend}, // Mn [6] TAKRI VOWEL SIGN U..TAKRI VOWEL SIGN AU - {0x116B6, 0x116B6, prSpacingMark}, // Mc TAKRI SIGN VIRAMA - {0x116B7, 0x116B7, prExtend}, // Mn TAKRI SIGN NUKTA - {0x1171D, 0x1171F, prExtend}, // Mn [3] AHOM CONSONANT SIGN MEDIAL LA..AHOM CONSONANT SIGN MEDIAL LIGATING RA - {0x11720, 0x11721, prSpacingMark}, // Mc [2] AHOM VOWEL SIGN A..AHOM VOWEL SIGN AA - {0x11722, 0x11725, prExtend}, // Mn [4] AHOM VOWEL SIGN I..AHOM VOWEL SIGN UU - {0x11726, 0x11726, prSpacingMark}, // Mc AHOM VOWEL SIGN E - {0x11727, 0x1172B, prExtend}, // Mn [5] AHOM VOWEL SIGN AW..AHOM SIGN KILLER - {0x1182C, 0x1182E, prSpacingMark}, // Mc [3] DOGRA VOWEL SIGN AA..DOGRA VOWEL SIGN II - {0x1182F, 0x11837, prExtend}, // Mn [9] DOGRA VOWEL SIGN U..DOGRA SIGN ANUSVARA - {0x11838, 0x11838, prSpacingMark}, // Mc DOGRA SIGN VISARGA - {0x11839, 0x1183A, prExtend}, // Mn [2] DOGRA SIGN VIRAMA..DOGRA SIGN NUKTA - {0x119D1, 0x119D3, prSpacingMark}, // Mc [3] NANDINAGARI VOWEL SIGN AA..NANDINAGARI VOWEL SIGN II - {0x119D4, 0x119D7, prExtend}, // Mn [4] NANDINAGARI VOWEL SIGN U..NANDINAGARI VOWEL SIGN VOCALIC RR - {0x119DA, 0x119DB, prExtend}, // Mn [2] NANDINAGARI VOWEL SIGN E..NANDINAGARI VOWEL SIGN AI - {0x119DC, 0x119DF, prSpacingMark}, // Mc [4] NANDINAGARI VOWEL SIGN O..NANDINAGARI SIGN VISARGA - {0x119E0, 0x119E0, prExtend}, // Mn NANDINAGARI SIGN VIRAMA - {0x119E4, 0x119E4, prSpacingMark}, // Mc NANDINAGARI VOWEL SIGN PRISHTHAMATRA E - {0x11A01, 0x11A0A, prExtend}, // Mn [10] ZANABAZAR SQUARE VOWEL SIGN I..ZANABAZAR SQUARE VOWEL LENGTH MARK - {0x11A33, 0x11A38, prExtend}, // Mn [6] ZANABAZAR SQUARE FINAL CONSONANT MARK..ZANABAZAR SQUARE SIGN ANUSVARA - {0x11A39, 0x11A39, prSpacingMark}, // Mc ZANABAZAR SQUARE SIGN VISARGA - {0x11A3A, 0x11A3A, prPreprend}, // Lo ZANABAZAR SQUARE CLUSTER-INITIAL LETTER RA - {0x11A3B, 0x11A3E, prExtend}, // Mn [4] ZANABAZAR SQUARE CLUSTER-FINAL LETTER YA..ZANABAZAR SQUARE CLUSTER-FINAL LETTER VA - {0x11A47, 0x11A47, prExtend}, // Mn ZANABAZAR SQUARE SUBJOINER - {0x11A51, 0x11A56, prExtend}, // Mn [6] SOYOMBO VOWEL SIGN I..SOYOMBO VOWEL SIGN OE - {0x11A57, 0x11A58, prSpacingMark}, // Mc [2] SOYOMBO VOWEL SIGN AI..SOYOMBO VOWEL SIGN AU - {0x11A59, 0x11A5B, prExtend}, // Mn [3] SOYOMBO VOWEL SIGN VOCALIC R..SOYOMBO VOWEL LENGTH MARK - {0x11A84, 0x11A89, prPreprend}, // Lo [6] SOYOMBO SIGN JIHVAMULIYA..SOYOMBO CLUSTER-INITIAL LETTER SA - {0x11A8A, 0x11A96, prExtend}, // Mn [13] SOYOMBO FINAL CONSONANT SIGN G..SOYOMBO SIGN ANUSVARA - {0x11A97, 0x11A97, prSpacingMark}, // Mc SOYOMBO SIGN VISARGA - {0x11A98, 0x11A99, prExtend}, // Mn [2] SOYOMBO GEMINATION MARK..SOYOMBO SUBJOINER - {0x11C2F, 0x11C2F, prSpacingMark}, // Mc BHAIKSUKI VOWEL SIGN AA - {0x11C30, 0x11C36, prExtend}, // Mn [7] BHAIKSUKI VOWEL SIGN I..BHAIKSUKI VOWEL SIGN VOCALIC L - {0x11C38, 0x11C3D, prExtend}, // Mn [6] BHAIKSUKI VOWEL SIGN E..BHAIKSUKI SIGN ANUSVARA - {0x11C3E, 0x11C3E, prSpacingMark}, // Mc BHAIKSUKI SIGN VISARGA - {0x11C3F, 0x11C3F, prExtend}, // Mn BHAIKSUKI SIGN VIRAMA - {0x11C92, 0x11CA7, prExtend}, // Mn [22] MARCHEN SUBJOINED LETTER KA..MARCHEN SUBJOINED LETTER ZA - {0x11CA9, 0x11CA9, prSpacingMark}, // Mc MARCHEN SUBJOINED LETTER YA - {0x11CAA, 0x11CB0, prExtend}, // Mn [7] MARCHEN SUBJOINED LETTER RA..MARCHEN VOWEL SIGN AA - {0x11CB1, 0x11CB1, prSpacingMark}, // Mc MARCHEN VOWEL SIGN I - {0x11CB2, 0x11CB3, prExtend}, // Mn [2] MARCHEN VOWEL SIGN U..MARCHEN VOWEL SIGN E - {0x11CB4, 0x11CB4, prSpacingMark}, // Mc MARCHEN VOWEL SIGN O - {0x11CB5, 0x11CB6, prExtend}, // Mn [2] MARCHEN SIGN ANUSVARA..MARCHEN SIGN CANDRABINDU - {0x11D31, 0x11D36, prExtend}, // Mn [6] MASARAM GONDI VOWEL SIGN AA..MASARAM GONDI VOWEL SIGN VOCALIC R - {0x11D3A, 0x11D3A, prExtend}, // Mn MASARAM GONDI VOWEL SIGN E - {0x11D3C, 0x11D3D, prExtend}, // Mn [2] MASARAM GONDI VOWEL SIGN AI..MASARAM GONDI VOWEL SIGN O - {0x11D3F, 0x11D45, prExtend}, // Mn [7] MASARAM GONDI VOWEL SIGN AU..MASARAM GONDI VIRAMA - {0x11D46, 0x11D46, prPreprend}, // Lo MASARAM GONDI REPHA - {0x11D47, 0x11D47, prExtend}, // Mn MASARAM GONDI RA-KARA - {0x11D8A, 0x11D8E, prSpacingMark}, // Mc [5] GUNJALA GONDI VOWEL SIGN AA..GUNJALA GONDI VOWEL SIGN UU - {0x11D90, 0x11D91, prExtend}, // Mn [2] GUNJALA GONDI VOWEL SIGN EE..GUNJALA GONDI VOWEL SIGN AI - {0x11D93, 0x11D94, prSpacingMark}, // Mc [2] GUNJALA GONDI VOWEL SIGN OO..GUNJALA GONDI VOWEL SIGN AU - {0x11D95, 0x11D95, prExtend}, // Mn GUNJALA GONDI SIGN ANUSVARA - {0x11D96, 0x11D96, prSpacingMark}, // Mc GUNJALA GONDI SIGN VISARGA - {0x11D97, 0x11D97, prExtend}, // Mn GUNJALA GONDI VIRAMA - {0x11EF3, 0x11EF4, prExtend}, // Mn [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U - {0x11EF5, 0x11EF6, prSpacingMark}, // Mc [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O - {0x13430, 0x13438, prControl}, // Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT - {0x16AF0, 0x16AF4, prExtend}, // Mn [5] BASSA VAH COMBINING HIGH TONE..BASSA VAH COMBINING HIGH-LOW TONE - {0x16B30, 0x16B36, prExtend}, // Mn [7] PAHAWH HMONG MARK CIM TUB..PAHAWH HMONG MARK CIM TAUM - {0x16F4F, 0x16F4F, prExtend}, // Mn MIAO SIGN CONSONANT MODIFIER BAR - {0x16F51, 0x16F87, prSpacingMark}, // Mc [55] MIAO SIGN ASPIRATION..MIAO VOWEL SIGN UI - {0x16F8F, 0x16F92, prExtend}, // Mn [4] MIAO TONE RIGHT..MIAO TONE BELOW - {0x1BC9D, 0x1BC9E, prExtend}, // Mn [2] DUPLOYAN THICK LETTER SELECTOR..DUPLOYAN DOUBLE MARK - {0x1BCA0, 0x1BCA3, prControl}, // Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP - {0x1D165, 0x1D165, prExtend}, // Mc MUSICAL SYMBOL COMBINING STEM - {0x1D166, 0x1D166, prSpacingMark}, // Mc MUSICAL SYMBOL COMBINING SPRECHGESANG STEM - {0x1D167, 0x1D169, prExtend}, // Mn [3] MUSICAL SYMBOL COMBINING TREMOLO-1..MUSICAL SYMBOL COMBINING TREMOLO-3 - {0x1D16D, 0x1D16D, prSpacingMark}, // Mc MUSICAL SYMBOL COMBINING AUGMENTATION DOT - {0x1D16E, 0x1D172, prExtend}, // Mc [5] MUSICAL SYMBOL COMBINING FLAG-1..MUSICAL SYMBOL COMBINING FLAG-5 - {0x1D173, 0x1D17A, prControl}, // Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE - {0x1D17B, 0x1D182, prExtend}, // Mn [8] MUSICAL SYMBOL COMBINING ACCENT..MUSICAL SYMBOL COMBINING LOURE - {0x1D185, 0x1D18B, prExtend}, // Mn [7] MUSICAL SYMBOL COMBINING DOIT..MUSICAL SYMBOL COMBINING TRIPLE TONGUE - {0x1D1AA, 0x1D1AD, prExtend}, // Mn [4] MUSICAL SYMBOL COMBINING DOWN BOW..MUSICAL SYMBOL COMBINING SNAP PIZZICATO - {0x1D242, 0x1D244, prExtend}, // Mn [3] COMBINING GREEK MUSICAL TRISEME..COMBINING GREEK MUSICAL PENTASEME - {0x1DA00, 0x1DA36, prExtend}, // Mn [55] SIGNWRITING HEAD RIM..SIGNWRITING AIR SUCKING IN - {0x1DA3B, 0x1DA6C, prExtend}, // Mn [50] SIGNWRITING MOUTH CLOSED NEUTRAL..SIGNWRITING EXCITEMENT - {0x1DA75, 0x1DA75, prExtend}, // Mn SIGNWRITING UPPER BODY TILTING FROM HIP JOINTS - {0x1DA84, 0x1DA84, prExtend}, // Mn SIGNWRITING LOCATION HEAD NECK - {0x1DA9B, 0x1DA9F, prExtend}, // Mn [5] SIGNWRITING FILL MODIFIER-2..SIGNWRITING FILL MODIFIER-6 - {0x1DAA1, 0x1DAAF, prExtend}, // Mn [15] SIGNWRITING ROTATION MODIFIER-2..SIGNWRITING ROTATION MODIFIER-16 - {0x1E000, 0x1E006, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE - {0x1E008, 0x1E018, prExtend}, // Mn [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU - {0x1E01B, 0x1E021, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI - {0x1E023, 0x1E024, prExtend}, // Mn [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS - {0x1E026, 0x1E02A, prExtend}, // Mn [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA - {0x1E130, 0x1E136, prExtend}, // Mn [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D - {0x1E2EC, 0x1E2EF, prExtend}, // Mn [4] WANCHO TONE TUP..WANCHO TONE KOINI - {0x1E8D0, 0x1E8D6, prExtend}, // Mn [7] MENDE KIKAKUI COMBINING NUMBER TEENS..MENDE KIKAKUI COMBINING NUMBER MILLIONS - {0x1E944, 0x1E94A, prExtend}, // Mn [7] ADLAM ALIF LENGTHENER..ADLAM NUKTA - {0x1F000, 0x1F02B, prExtendedPictographic}, // 5.1 [44] (🀀..🀫) MAHJONG TILE EAST WIND..MAHJONG TILE BACK - {0x1F02C, 0x1F02F, prExtendedPictographic}, // NA [4] (🀬..🀯) .. - {0x1F030, 0x1F093, prExtendedPictographic}, // 5.1[100] (🀰..🂓) DOMINO TILE HORIZONTAL BACK..DOMINO TILE VERTICAL-06-06 - {0x1F094, 0x1F09F, prExtendedPictographic}, // NA [12] (🂔..🂟) .. - {0x1F0A0, 0x1F0AE, prExtendedPictographic}, // 6.0 [15] (🂠..🂮) PLAYING CARD BACK..PLAYING CARD KING OF SPADES - {0x1F0AF, 0x1F0B0, prExtendedPictographic}, // NA [2] (🂯..🂰) .. - {0x1F0B1, 0x1F0BE, prExtendedPictographic}, // 6.0 [14] (🂱..🂾) PLAYING CARD ACE OF HEARTS..PLAYING CARD KING OF HEARTS - {0x1F0BF, 0x1F0BF, prExtendedPictographic}, // 7.0 [1] (🂿) PLAYING CARD RED JOKER - {0x1F0C0, 0x1F0C0, prExtendedPictographic}, // NA [1] (🃀) - {0x1F0C1, 0x1F0CF, prExtendedPictographic}, // 6.0 [15] (🃁..🃏) PLAYING CARD ACE OF DIAMONDS..joker - {0x1F0D0, 0x1F0D0, prExtendedPictographic}, // NA [1] (🃐) - {0x1F0D1, 0x1F0DF, prExtendedPictographic}, // 6.0 [15] (🃑..🃟) PLAYING CARD ACE OF CLUBS..PLAYING CARD WHITE JOKER - {0x1F0E0, 0x1F0F5, prExtendedPictographic}, // 7.0 [22] (🃠..🃵) PLAYING CARD FOOL..PLAYING CARD TRUMP-21 - {0x1F0F6, 0x1F0FF, prExtendedPictographic}, // NA [10] (🃶..🃿) .. - {0x1F10D, 0x1F10F, prExtendedPictographic}, // NA [3] (🄍..🄏) .. - {0x1F12F, 0x1F12F, prExtendedPictographic}, // 11.0 [1] (🄯) COPYLEFT SYMBOL - {0x1F16C, 0x1F16C, prExtendedPictographic}, // 12.0 [1] (🅬) RAISED MR SIGN - {0x1F16D, 0x1F16F, prExtendedPictographic}, // NA [3] (🅭..🅯) .. - {0x1F170, 0x1F171, prExtendedPictographic}, // 6.0 [2] (🅰️..🅱️) A button (blood type)..B button (blood type) - {0x1F17E, 0x1F17E, prExtendedPictographic}, // 6.0 [1] (🅾️) O button (blood type) - {0x1F17F, 0x1F17F, prExtendedPictographic}, // 5.2 [1] (🅿️) P button - {0x1F18E, 0x1F18E, prExtendedPictographic}, // 6.0 [1] (🆎) AB button (blood type) - {0x1F191, 0x1F19A, prExtendedPictographic}, // 6.0 [10] (🆑..🆚) CL button..VS button - {0x1F1AD, 0x1F1E5, prExtendedPictographic}, // NA [57] (🆭..🇥) .. - {0x1F1E6, 0x1F1FF, prRegionalIndicator}, // So [26] REGIONAL INDICATOR SYMBOL LETTER A..REGIONAL INDICATOR SYMBOL LETTER Z - {0x1F201, 0x1F202, prExtendedPictographic}, // 6.0 [2] (🈁..🈂️) Japanese “here” button..Japanese “service charge” button - {0x1F203, 0x1F20F, prExtendedPictographic}, // NA [13] (🈃..🈏) .. - {0x1F21A, 0x1F21A, prExtendedPictographic}, // 5.2 [1] (🈚) Japanese “free of charge” button - {0x1F22F, 0x1F22F, prExtendedPictographic}, // 5.2 [1] (🈯) Japanese “reserved” button - {0x1F232, 0x1F23A, prExtendedPictographic}, // 6.0 [9] (🈲..🈺) Japanese “prohibited” button..Japanese “open for business” button - {0x1F23C, 0x1F23F, prExtendedPictographic}, // NA [4] (🈼..🈿) .. - {0x1F249, 0x1F24F, prExtendedPictographic}, // NA [7] (🉉..🉏) .. - {0x1F250, 0x1F251, prExtendedPictographic}, // 6.0 [2] (🉐..🉑) Japanese “bargain” button..Japanese “acceptable” button - {0x1F252, 0x1F25F, prExtendedPictographic}, // NA [14] (🉒..🉟) .. - {0x1F260, 0x1F265, prExtendedPictographic}, // 10.0 [6] (🉠..🉥) ROUNDED SYMBOL FOR FU..ROUNDED SYMBOL FOR CAI - {0x1F266, 0x1F2FF, prExtendedPictographic}, // NA[154] (🉦..🋿) .. - {0x1F300, 0x1F320, prExtendedPictographic}, // 6.0 [33] (🌀..🌠) cyclone..shooting star - {0x1F321, 0x1F32C, prExtendedPictographic}, // 7.0 [12] (🌡️..🌬️) thermometer..wind face - {0x1F32D, 0x1F32F, prExtendedPictographic}, // 8.0 [3] (🌭..🌯) hot dog..burrito - {0x1F330, 0x1F335, prExtendedPictographic}, // 6.0 [6] (🌰..🌵) chestnut..cactus - {0x1F336, 0x1F336, prExtendedPictographic}, // 7.0 [1] (🌶️) hot pepper - {0x1F337, 0x1F37C, prExtendedPictographic}, // 6.0 [70] (🌷..🍼) tulip..baby bottle - {0x1F37D, 0x1F37D, prExtendedPictographic}, // 7.0 [1] (🍽️) fork and knife with plate - {0x1F37E, 0x1F37F, prExtendedPictographic}, // 8.0 [2] (🍾..🍿) bottle with popping cork..popcorn - {0x1F380, 0x1F393, prExtendedPictographic}, // 6.0 [20] (🎀..🎓) ribbon..graduation cap - {0x1F394, 0x1F39F, prExtendedPictographic}, // 7.0 [12] (🎔..🎟️) HEART WITH TIP ON THE LEFT..admission tickets - {0x1F3A0, 0x1F3C4, prExtendedPictographic}, // 6.0 [37] (🎠..🏄) carousel horse..person surfing - {0x1F3C5, 0x1F3C5, prExtendedPictographic}, // 7.0 [1] (🏅) sports medal - {0x1F3C6, 0x1F3CA, prExtendedPictographic}, // 6.0 [5] (🏆..🏊) trophy..person swimming - {0x1F3CB, 0x1F3CE, prExtendedPictographic}, // 7.0 [4] (🏋️..🏎️) person lifting weights..racing car - {0x1F3CF, 0x1F3D3, prExtendedPictographic}, // 8.0 [5] (🏏..🏓) cricket game..ping pong - {0x1F3D4, 0x1F3DF, prExtendedPictographic}, // 7.0 [12] (🏔️..🏟️) snow-capped mountain..stadium - {0x1F3E0, 0x1F3F0, prExtendedPictographic}, // 6.0 [17] (🏠..🏰) house..castle - {0x1F3F1, 0x1F3F7, prExtendedPictographic}, // 7.0 [7] (🏱..🏷️) WHITE PENNANT..label - {0x1F3F8, 0x1F3FA, prExtendedPictographic}, // 8.0 [3] (🏸..🏺) badminton..amphora - {0x1F3FB, 0x1F3FF, prExtend}, // Sk [5] EMOJI MODIFIER FITZPATRICK TYPE-1-2..EMOJI MODIFIER FITZPATRICK TYPE-6 - {0x1F400, 0x1F43E, prExtendedPictographic}, // 6.0 [63] (🐀..🐾) rat..paw prints - {0x1F43F, 0x1F43F, prExtendedPictographic}, // 7.0 [1] (🐿️) chipmunk - {0x1F440, 0x1F440, prExtendedPictographic}, // 6.0 [1] (👀) eyes - {0x1F441, 0x1F441, prExtendedPictographic}, // 7.0 [1] (👁️) eye - {0x1F442, 0x1F4F7, prExtendedPictographic}, // 6.0[182] (👂..📷) ear..camera - {0x1F4F8, 0x1F4F8, prExtendedPictographic}, // 7.0 [1] (📸) camera with flash - {0x1F4F9, 0x1F4FC, prExtendedPictographic}, // 6.0 [4] (📹..📼) video camera..videocassette - {0x1F4FD, 0x1F4FE, prExtendedPictographic}, // 7.0 [2] (📽️..📾) film projector..PORTABLE STEREO - {0x1F4FF, 0x1F4FF, prExtendedPictographic}, // 8.0 [1] (📿) prayer beads - {0x1F500, 0x1F53D, prExtendedPictographic}, // 6.0 [62] (🔀..🔽) shuffle tracks button..downwards button - {0x1F546, 0x1F54A, prExtendedPictographic}, // 7.0 [5] (🕆..🕊️) WHITE LATIN CROSS..dove - {0x1F54B, 0x1F54F, prExtendedPictographic}, // 8.0 [5] (🕋..🕏) kaaba..BOWL OF HYGIEIA - {0x1F550, 0x1F567, prExtendedPictographic}, // 6.0 [24] (🕐..🕧) one o’clock..twelve-thirty - {0x1F568, 0x1F579, prExtendedPictographic}, // 7.0 [18] (🕨..🕹️) RIGHT SPEAKER..joystick - {0x1F57A, 0x1F57A, prExtendedPictographic}, // 9.0 [1] (🕺) man dancing - {0x1F57B, 0x1F5A3, prExtendedPictographic}, // 7.0 [41] (🕻..🖣) LEFT HAND TELEPHONE RECEIVER..BLACK DOWN POINTING BACKHAND INDEX - {0x1F5A4, 0x1F5A4, prExtendedPictographic}, // 9.0 [1] (🖤) black heart - {0x1F5A5, 0x1F5FA, prExtendedPictographic}, // 7.0 [86] (🖥️..🗺️) desktop computer..world map - {0x1F5FB, 0x1F5FF, prExtendedPictographic}, // 6.0 [5] (🗻..🗿) mount fuji..moai - {0x1F600, 0x1F600, prExtendedPictographic}, // 6.1 [1] (😀) grinning face - {0x1F601, 0x1F610, prExtendedPictographic}, // 6.0 [16] (😁..😐) beaming face with smiling eyes..neutral face - {0x1F611, 0x1F611, prExtendedPictographic}, // 6.1 [1] (😑) expressionless face - {0x1F612, 0x1F614, prExtendedPictographic}, // 6.0 [3] (😒..😔) unamused face..pensive face - {0x1F615, 0x1F615, prExtendedPictographic}, // 6.1 [1] (😕) confused face - {0x1F616, 0x1F616, prExtendedPictographic}, // 6.0 [1] (😖) confounded face - {0x1F617, 0x1F617, prExtendedPictographic}, // 6.1 [1] (😗) kissing face - {0x1F618, 0x1F618, prExtendedPictographic}, // 6.0 [1] (😘) face blowing a kiss - {0x1F619, 0x1F619, prExtendedPictographic}, // 6.1 [1] (😙) kissing face with smiling eyes - {0x1F61A, 0x1F61A, prExtendedPictographic}, // 6.0 [1] (😚) kissing face with closed eyes - {0x1F61B, 0x1F61B, prExtendedPictographic}, // 6.1 [1] (😛) face with tongue - {0x1F61C, 0x1F61E, prExtendedPictographic}, // 6.0 [3] (😜..😞) winking face with tongue..disappointed face - {0x1F61F, 0x1F61F, prExtendedPictographic}, // 6.1 [1] (😟) worried face - {0x1F620, 0x1F625, prExtendedPictographic}, // 6.0 [6] (😠..😥) angry face..sad but relieved face - {0x1F626, 0x1F627, prExtendedPictographic}, // 6.1 [2] (😦..😧) frowning face with open mouth..anguished face - {0x1F628, 0x1F62B, prExtendedPictographic}, // 6.0 [4] (😨..😫) fearful face..tired face - {0x1F62C, 0x1F62C, prExtendedPictographic}, // 6.1 [1] (😬) grimacing face - {0x1F62D, 0x1F62D, prExtendedPictographic}, // 6.0 [1] (😭) loudly crying face - {0x1F62E, 0x1F62F, prExtendedPictographic}, // 6.1 [2] (😮..😯) face with open mouth..hushed face - {0x1F630, 0x1F633, prExtendedPictographic}, // 6.0 [4] (😰..😳) anxious face with sweat..flushed face - {0x1F634, 0x1F634, prExtendedPictographic}, // 6.1 [1] (😴) sleeping face - {0x1F635, 0x1F640, prExtendedPictographic}, // 6.0 [12] (😵..🙀) dizzy face..weary cat - {0x1F641, 0x1F642, prExtendedPictographic}, // 7.0 [2] (🙁..🙂) slightly frowning face..slightly smiling face - {0x1F643, 0x1F644, prExtendedPictographic}, // 8.0 [2] (🙃..🙄) upside-down face..face with rolling eyes - {0x1F645, 0x1F64F, prExtendedPictographic}, // 6.0 [11] (🙅..🙏) person gesturing NO..folded hands - {0x1F680, 0x1F6C5, prExtendedPictographic}, // 6.0 [70] (🚀..🛅) rocket..left luggage - {0x1F6C6, 0x1F6CF, prExtendedPictographic}, // 7.0 [10] (🛆..🛏️) TRIANGLE WITH ROUNDED CORNERS..bed - {0x1F6D0, 0x1F6D0, prExtendedPictographic}, // 8.0 [1] (🛐) place of worship - {0x1F6D1, 0x1F6D2, prExtendedPictographic}, // 9.0 [2] (🛑..🛒) stop sign..shopping cart - {0x1F6D3, 0x1F6D4, prExtendedPictographic}, // 10.0 [2] (🛓..🛔) STUPA..PAGODA - {0x1F6D5, 0x1F6D5, prExtendedPictographic}, // 12.0 [1] (🛕) hindu temple - {0x1F6D6, 0x1F6DF, prExtendedPictographic}, // NA [10] (🛖..🛟) .. - {0x1F6E0, 0x1F6EC, prExtendedPictographic}, // 7.0 [13] (🛠️..🛬) hammer and wrench..airplane arrival - {0x1F6ED, 0x1F6EF, prExtendedPictographic}, // NA [3] (🛭..🛯) .. - {0x1F6F0, 0x1F6F3, prExtendedPictographic}, // 7.0 [4] (🛰️..🛳️) satellite..passenger ship - {0x1F6F4, 0x1F6F6, prExtendedPictographic}, // 9.0 [3] (🛴..🛶) kick scooter..canoe - {0x1F6F7, 0x1F6F8, prExtendedPictographic}, // 10.0 [2] (🛷..🛸) sled..flying saucer - {0x1F6F9, 0x1F6F9, prExtendedPictographic}, // 11.0 [1] (🛹) skateboard - {0x1F6FA, 0x1F6FA, prExtendedPictographic}, // 12.0 [1] (🛺) auto rickshaw - {0x1F6FB, 0x1F6FF, prExtendedPictographic}, // NA [5] (🛻..🛿) .. - {0x1F774, 0x1F77F, prExtendedPictographic}, // NA [12] (🝴..🝿) .. - {0x1F7D5, 0x1F7D8, prExtendedPictographic}, // 11.0 [4] (🟕..🟘) CIRCLED TRIANGLE..NEGATIVE CIRCLED SQUARE - {0x1F7D9, 0x1F7DF, prExtendedPictographic}, // NA [7] (🟙..🟟) .. - {0x1F7E0, 0x1F7EB, prExtendedPictographic}, // 12.0 [12] (🟠..🟫) orange circle..brown square - {0x1F7EC, 0x1F7FF, prExtendedPictographic}, // NA [20] (🟬..🟿) .. - {0x1F80C, 0x1F80F, prExtendedPictographic}, // NA [4] (🠌..🠏) .. - {0x1F848, 0x1F84F, prExtendedPictographic}, // NA [8] (🡈..🡏) .. - {0x1F85A, 0x1F85F, prExtendedPictographic}, // NA [6] (🡚..🡟) .. - {0x1F888, 0x1F88F, prExtendedPictographic}, // NA [8] (🢈..🢏) .. - {0x1F8AE, 0x1F8FF, prExtendedPictographic}, // NA [82] (🢮..🣿) .. - {0x1F90C, 0x1F90C, prExtendedPictographic}, // NA [1] (🤌) - {0x1F90D, 0x1F90F, prExtendedPictographic}, // 12.0 [3] (🤍..🤏) white heart..pinching hand - {0x1F910, 0x1F918, prExtendedPictographic}, // 8.0 [9] (🤐..🤘) zipper-mouth face..sign of the horns - {0x1F919, 0x1F91E, prExtendedPictographic}, // 9.0 [6] (🤙..🤞) call me hand..crossed fingers - {0x1F91F, 0x1F91F, prExtendedPictographic}, // 10.0 [1] (🤟) love-you gesture - {0x1F920, 0x1F927, prExtendedPictographic}, // 9.0 [8] (🤠..🤧) cowboy hat face..sneezing face - {0x1F928, 0x1F92F, prExtendedPictographic}, // 10.0 [8] (🤨..🤯) face with raised eyebrow..exploding head - {0x1F930, 0x1F930, prExtendedPictographic}, // 9.0 [1] (🤰) pregnant woman - {0x1F931, 0x1F932, prExtendedPictographic}, // 10.0 [2] (🤱..🤲) breast-feeding..palms up together - {0x1F933, 0x1F93A, prExtendedPictographic}, // 9.0 [8] (🤳..🤺) selfie..person fencing - {0x1F93C, 0x1F93E, prExtendedPictographic}, // 9.0 [3] (🤼..🤾) people wrestling..person playing handball - {0x1F93F, 0x1F93F, prExtendedPictographic}, // 12.0 [1] (🤿) diving mask - {0x1F940, 0x1F945, prExtendedPictographic}, // 9.0 [6] (🥀..🥅) wilted flower..goal net - {0x1F947, 0x1F94B, prExtendedPictographic}, // 9.0 [5] (🥇..🥋) 1st place medal..martial arts uniform - {0x1F94C, 0x1F94C, prExtendedPictographic}, // 10.0 [1] (🥌) curling stone - {0x1F94D, 0x1F94F, prExtendedPictographic}, // 11.0 [3] (🥍..🥏) lacrosse..flying disc - {0x1F950, 0x1F95E, prExtendedPictographic}, // 9.0 [15] (🥐..🥞) croissant..pancakes - {0x1F95F, 0x1F96B, prExtendedPictographic}, // 10.0 [13] (🥟..🥫) dumpling..canned food - {0x1F96C, 0x1F970, prExtendedPictographic}, // 11.0 [5] (🥬..🥰) leafy green..smiling face with hearts - {0x1F971, 0x1F971, prExtendedPictographic}, // 12.0 [1] (🥱) yawning face - {0x1F972, 0x1F972, prExtendedPictographic}, // NA [1] (🥲) - {0x1F973, 0x1F976, prExtendedPictographic}, // 11.0 [4] (🥳..🥶) partying face..cold face - {0x1F977, 0x1F979, prExtendedPictographic}, // NA [3] (🥷..🥹) .. - {0x1F97A, 0x1F97A, prExtendedPictographic}, // 11.0 [1] (🥺) pleading face - {0x1F97B, 0x1F97B, prExtendedPictographic}, // 12.0 [1] (🥻) sari - {0x1F97C, 0x1F97F, prExtendedPictographic}, // 11.0 [4] (🥼..🥿) lab coat..flat shoe - {0x1F980, 0x1F984, prExtendedPictographic}, // 8.0 [5] (🦀..🦄) crab..unicorn - {0x1F985, 0x1F991, prExtendedPictographic}, // 9.0 [13] (🦅..🦑) eagle..squid - {0x1F992, 0x1F997, prExtendedPictographic}, // 10.0 [6] (🦒..🦗) giraffe..cricket - {0x1F998, 0x1F9A2, prExtendedPictographic}, // 11.0 [11] (🦘..🦢) kangaroo..swan - {0x1F9A3, 0x1F9A4, prExtendedPictographic}, // NA [2] (🦣..🦤) .. - {0x1F9A5, 0x1F9AA, prExtendedPictographic}, // 12.0 [6] (🦥..🦪) sloth..oyster - {0x1F9AB, 0x1F9AD, prExtendedPictographic}, // NA [3] (🦫..🦭) .. - {0x1F9AE, 0x1F9AF, prExtendedPictographic}, // 12.0 [2] (🦮..🦯) guide dog..probing cane - {0x1F9B0, 0x1F9B9, prExtendedPictographic}, // 11.0 [10] (🦰..🦹) red hair..supervillain - {0x1F9BA, 0x1F9BF, prExtendedPictographic}, // 12.0 [6] (🦺..🦿) safety vest..mechanical leg - {0x1F9C0, 0x1F9C0, prExtendedPictographic}, // 8.0 [1] (🧀) cheese wedge - {0x1F9C1, 0x1F9C2, prExtendedPictographic}, // 11.0 [2] (🧁..🧂) cupcake..salt - {0x1F9C3, 0x1F9CA, prExtendedPictographic}, // 12.0 [8] (🧃..🧊) beverage box..ice cube - {0x1F9CB, 0x1F9CC, prExtendedPictographic}, // NA [2] (🧋..🧌) .. - {0x1F9CD, 0x1F9CF, prExtendedPictographic}, // 12.0 [3] (🧍..🧏) person standing..deaf person - {0x1F9D0, 0x1F9E6, prExtendedPictographic}, // 10.0 [23] (🧐..🧦) face with monocle..socks - {0x1F9E7, 0x1F9FF, prExtendedPictographic}, // 11.0 [25] (🧧..🧿) red envelope..nazar amulet - {0x1FA00, 0x1FA53, prExtendedPictographic}, // 12.0 [84] (🨀..🩓) NEUTRAL CHESS KING..BLACK CHESS KNIGHT-BISHOP - {0x1FA54, 0x1FA5F, prExtendedPictographic}, // NA [12] (🩔..🩟) .. - {0x1FA60, 0x1FA6D, prExtendedPictographic}, // 11.0 [14] (🩠..🩭) XIANGQI RED GENERAL..XIANGQI BLACK SOLDIER - {0x1FA6E, 0x1FA6F, prExtendedPictographic}, // NA [2] (🩮..🩯) .. - {0x1FA70, 0x1FA73, prExtendedPictographic}, // 12.0 [4] (🩰..🩳) ballet shoes..shorts - {0x1FA74, 0x1FA77, prExtendedPictographic}, // NA [4] (🩴..🩷) .. - {0x1FA78, 0x1FA7A, prExtendedPictographic}, // 12.0 [3] (🩸..🩺) drop of blood..stethoscope - {0x1FA7B, 0x1FA7F, prExtendedPictographic}, // NA [5] (🩻..🩿) .. - {0x1FA80, 0x1FA82, prExtendedPictographic}, // 12.0 [3] (🪀..🪂) yo-yo..parachute - {0x1FA83, 0x1FA8F, prExtendedPictographic}, // NA [13] (🪃..🪏) .. - {0x1FA90, 0x1FA95, prExtendedPictographic}, // 12.0 [6] (🪐..🪕) ringed planet..banjo - {0x1FA96, 0x1FFFD, prExtendedPictographic}, // NA[1384] (🪖..🿽) .. - {0xE0000, 0xE0000, prControl}, // Cn - {0xE0001, 0xE0001, prControl}, // Cf LANGUAGE TAG - {0xE0002, 0xE001F, prControl}, // Cn [30] .. - {0xE0020, 0xE007F, prExtend}, // Cf [96] TAG SPACE..CANCEL TAG - {0xE0080, 0xE00FF, prControl}, // Cn [128] .. - {0xE0100, 0xE01EF, prExtend}, // Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 - {0xE01F0, 0xE0FFF, prControl}, // Cn [3600] .. -} - -// property returns the Unicode property value (see constants above) of the -// given code point. -func property(r rune) int { - // Run a binary search. - from := 0 - to := len(codePoints) - for to > from { - middle := (from + to) / 2 - cpRange := codePoints[middle] - if int(r) < cpRange[0] { - to = middle - continue - } - if int(r) > cpRange[1] { - from = middle + 1 - continue - } - return cpRange[2] - } - return prAny -} diff --git a/vendor/github.com/shopspring/decimal/.gitignore b/vendor/github.com/shopspring/decimal/.gitignore deleted file mode 100644 index 8a43ce9d..00000000 --- a/vendor/github.com/shopspring/decimal/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -.git -*.swp - -# IntelliJ -.idea/ -*.iml diff --git a/vendor/github.com/shopspring/decimal/.travis.yml b/vendor/github.com/shopspring/decimal/.travis.yml deleted file mode 100644 index 55d42b28..00000000 --- a/vendor/github.com/shopspring/decimal/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.7.x - - 1.12.x - - 1.13.x - - tip - -install: - - go build . - -script: - - go test -v diff --git a/vendor/github.com/shopspring/decimal/CHANGELOG.md b/vendor/github.com/shopspring/decimal/CHANGELOG.md deleted file mode 100644 index 01ba02fe..00000000 --- a/vendor/github.com/shopspring/decimal/CHANGELOG.md +++ /dev/null @@ -1,19 +0,0 @@ -## Decimal v1.2.0 - -#### BREAKING -- Drop support for Go version older than 1.7 [#172](https://github.com/shopspring/decimal/pull/172) - -#### FEATURES -- Add NewFromInt and NewFromInt32 initializers [#72](https://github.com/shopspring/decimal/pull/72) -- Add support for Go modules [#157](https://github.com/shopspring/decimal/pull/157) -- Add BigInt, BigFloat helper methods [#171](https://github.com/shopspring/decimal/pull/171) - -#### ENHANCEMENTS -- Memory usage optimization [#160](https://github.com/shopspring/decimal/pull/160) -- Updated travis CI golang versions [#156](https://github.com/shopspring/decimal/pull/156) -- Update documentation [#173](https://github.com/shopspring/decimal/pull/173) -- Improve code quality [#174](https://github.com/shopspring/decimal/pull/174) - -#### BUGFIXES -- Revert remove insignificant digits [#159](https://github.com/shopspring/decimal/pull/159) -- Remove 15 interval for RoundCash [#166](https://github.com/shopspring/decimal/pull/166) diff --git a/vendor/github.com/shopspring/decimal/LICENSE b/vendor/github.com/shopspring/decimal/LICENSE deleted file mode 100644 index ad2148aa..00000000 --- a/vendor/github.com/shopspring/decimal/LICENSE +++ /dev/null @@ -1,45 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Spring, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -- Based on https://github.com/oguzbilgic/fpd, which has the following license: -""" -The MIT License (MIT) - -Copyright (c) 2013 Oguz Bilgic - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -""" diff --git a/vendor/github.com/shopspring/decimal/README.md b/vendor/github.com/shopspring/decimal/README.md deleted file mode 100644 index b70f9015..00000000 --- a/vendor/github.com/shopspring/decimal/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# decimal - -[![Build Status](https://travis-ci.org/shopspring/decimal.png?branch=master)](https://travis-ci.org/shopspring/decimal) [![GoDoc](https://godoc.org/github.com/shopspring/decimal?status.svg)](https://godoc.org/github.com/shopspring/decimal) [![Go Report Card](https://goreportcard.com/badge/github.com/shopspring/decimal)](https://goreportcard.com/report/github.com/shopspring/decimal) - -Arbitrary-precision fixed-point decimal numbers in go. - -_Note:_ Decimal library can "only" represent numbers with a maximum of 2^31 digits after the decimal point. - -## Features - - * The zero-value is 0, and is safe to use without initialization - * Addition, subtraction, multiplication with no loss of precision - * Division with specified precision - * Database/sql serialization/deserialization - * JSON and XML serialization/deserialization - -## Install - -Run `go get github.com/shopspring/decimal` - -## Requirements - -Decimal library requires Go version `>=1.7` - -## Usage - -```go -package main - -import ( - "fmt" - "github.com/shopspring/decimal" -) - -func main() { - price, err := decimal.NewFromString("136.02") - if err != nil { - panic(err) - } - - quantity := decimal.NewFromInt(3) - - fee, _ := decimal.NewFromString(".035") - taxRate, _ := decimal.NewFromString(".08875") - - subtotal := price.Mul(quantity) - - preTax := subtotal.Mul(fee.Add(decimal.NewFromFloat(1))) - - total := preTax.Mul(taxRate.Add(decimal.NewFromFloat(1))) - - fmt.Println("Subtotal:", subtotal) // Subtotal: 408.06 - fmt.Println("Pre-tax:", preTax) // Pre-tax: 422.3421 - fmt.Println("Taxes:", total.Sub(preTax)) // Taxes: 37.482861375 - fmt.Println("Total:", total) // Total: 459.824961375 - fmt.Println("Tax rate:", total.Sub(preTax).Div(preTax)) // Tax rate: 0.08875 -} -``` - -## Documentation - -http://godoc.org/github.com/shopspring/decimal - -## Production Usage - -* [Spring](https://shopspring.com/), since August 14, 2014. -* If you are using this in production, please let us know! - -## FAQ - -#### Why don't you just use float64? - -Because float64 (or any binary floating point type, actually) can't represent -numbers such as `0.1` exactly. - -Consider this code: http://play.golang.org/p/TQBd4yJe6B You might expect that -it prints out `10`, but it actually prints `9.999999999999831`. Over time, -these small errors can really add up! - -#### Why don't you just use big.Rat? - -big.Rat is fine for representing rational numbers, but Decimal is better for -representing money. Why? Here's a (contrived) example: - -Let's say you use big.Rat, and you have two numbers, x and y, both -representing 1/3, and you have `z = 1 - x - y = 1/3`. If you print each one -out, the string output has to stop somewhere (let's say it stops at 3 decimal -digits, for simplicity), so you'll get 0.333, 0.333, and 0.333. But where did -the other 0.001 go? - -Here's the above example as code: http://play.golang.org/p/lCZZs0w9KE - -With Decimal, the strings being printed out represent the number exactly. So, -if you have `x = y = 1/3` (with precision 3), they will actually be equal to -0.333, and when you do `z = 1 - x - y`, `z` will be equal to .334. No money is -unaccounted for! - -You still have to be careful. If you want to split a number `N` 3 ways, you -can't just send `N/3` to three different people. You have to pick one to send -`N - (2/3*N)` to. That person will receive the fraction of a penny remainder. - -But, it is much easier to be careful with Decimal than with big.Rat. - -#### Why isn't the API similar to big.Int's? - -big.Int's API is built to reduce the number of memory allocations for maximal -performance. This makes sense for its use-case, but the trade-off is that the -API is awkward and easy to misuse. - -For example, to add two big.Ints, you do: `z := new(big.Int).Add(x, y)`. A -developer unfamiliar with this API might try to do `z := a.Add(a, b)`. This -modifies `a` and sets `z` as an alias for `a`, which they might not expect. It -also modifies any other aliases to `a`. - -Here's an example of the subtle bugs you can introduce with big.Int's API: -https://play.golang.org/p/x2R_78pa8r - -In contrast, it's difficult to make such mistakes with decimal. Decimals -behave like other go numbers types: even though `a = b` will not deep copy -`b` into `a`, it is impossible to modify a Decimal, since all Decimal methods -return new Decimals and do not modify the originals. The downside is that -this causes extra allocations, so Decimal is less performant. My assumption -is that if you're using Decimals, you probably care more about correctness -than performance. - -## License - -The MIT License (MIT) - -This is a heavily modified fork of [fpd.Decimal](https://github.com/oguzbilgic/fpd), which was also released under the MIT License. diff --git a/vendor/github.com/shopspring/decimal/decimal-go.go b/vendor/github.com/shopspring/decimal/decimal-go.go deleted file mode 100644 index 9958d690..00000000 --- a/vendor/github.com/shopspring/decimal/decimal-go.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Multiprecision decimal numbers. -// For floating-point formatting only; not general purpose. -// Only operations are assign and (binary) left/right shift. -// Can do binary floating point in multiprecision decimal precisely -// because 2 divides 10; cannot do decimal floating point -// in multiprecision binary precisely. - -package decimal - -type decimal struct { - d [800]byte // digits, big-endian representation - nd int // number of digits used - dp int // decimal point - neg bool // negative flag - trunc bool // discarded nonzero digits beyond d[:nd] -} - -func (a *decimal) String() string { - n := 10 + a.nd - if a.dp > 0 { - n += a.dp - } - if a.dp < 0 { - n += -a.dp - } - - buf := make([]byte, n) - w := 0 - switch { - case a.nd == 0: - return "0" - - case a.dp <= 0: - // zeros fill space between decimal point and digits - buf[w] = '0' - w++ - buf[w] = '.' - w++ - w += digitZero(buf[w : w+-a.dp]) - w += copy(buf[w:], a.d[0:a.nd]) - - case a.dp < a.nd: - // decimal point in middle of digits - w += copy(buf[w:], a.d[0:a.dp]) - buf[w] = '.' - w++ - w += copy(buf[w:], a.d[a.dp:a.nd]) - - default: - // zeros fill space between digits and decimal point - w += copy(buf[w:], a.d[0:a.nd]) - w += digitZero(buf[w : w+a.dp-a.nd]) - } - return string(buf[0:w]) -} - -func digitZero(dst []byte) int { - for i := range dst { - dst[i] = '0' - } - return len(dst) -} - -// trim trailing zeros from number. -// (They are meaningless; the decimal point is tracked -// independent of the number of digits.) -func trim(a *decimal) { - for a.nd > 0 && a.d[a.nd-1] == '0' { - a.nd-- - } - if a.nd == 0 { - a.dp = 0 - } -} - -// Assign v to a. -func (a *decimal) Assign(v uint64) { - var buf [24]byte - - // Write reversed decimal in buf. - n := 0 - for v > 0 { - v1 := v / 10 - v -= 10 * v1 - buf[n] = byte(v + '0') - n++ - v = v1 - } - - // Reverse again to produce forward decimal in a.d. - a.nd = 0 - for n--; n >= 0; n-- { - a.d[a.nd] = buf[n] - a.nd++ - } - a.dp = a.nd - trim(a) -} - -// Maximum shift that we can do in one pass without overflow. -// A uint has 32 or 64 bits, and we have to be able to accommodate 9<> 63) -const maxShift = uintSize - 4 - -// Binary shift right (/ 2) by k bits. k <= maxShift to avoid overflow. -func rightShift(a *decimal, k uint) { - r := 0 // read pointer - w := 0 // write pointer - - // Pick up enough leading digits to cover first shift. - var n uint - for ; n>>k == 0; r++ { - if r >= a.nd { - if n == 0 { - // a == 0; shouldn't get here, but handle anyway. - a.nd = 0 - return - } - for n>>k == 0 { - n = n * 10 - r++ - } - break - } - c := uint(a.d[r]) - n = n*10 + c - '0' - } - a.dp -= r - 1 - - var mask uint = (1 << k) - 1 - - // Pick up a digit, put down a digit. - for ; r < a.nd; r++ { - c := uint(a.d[r]) - dig := n >> k - n &= mask - a.d[w] = byte(dig + '0') - w++ - n = n*10 + c - '0' - } - - // Put down extra digits. - for n > 0 { - dig := n >> k - n &= mask - if w < len(a.d) { - a.d[w] = byte(dig + '0') - w++ - } else if dig > 0 { - a.trunc = true - } - n = n * 10 - } - - a.nd = w - trim(a) -} - -// Cheat sheet for left shift: table indexed by shift count giving -// number of new digits that will be introduced by that shift. -// -// For example, leftcheats[4] = {2, "625"}. That means that -// if we are shifting by 4 (multiplying by 16), it will add 2 digits -// when the string prefix is "625" through "999", and one fewer digit -// if the string prefix is "000" through "624". -// -// Credit for this trick goes to Ken. - -type leftCheat struct { - delta int // number of new digits - cutoff string // minus one digit if original < a. -} - -var leftcheats = []leftCheat{ - // Leading digits of 1/2^i = 5^i. - // 5^23 is not an exact 64-bit floating point number, - // so have to use bc for the math. - // Go up to 60 to be large enough for 32bit and 64bit platforms. - /* - seq 60 | sed 's/^/5^/' | bc | - awk 'BEGIN{ print "\t{ 0, \"\" }," } - { - log2 = log(2)/log(10) - printf("\t{ %d, \"%s\" },\t// * %d\n", - int(log2*NR+1), $0, 2**NR) - }' - */ - {0, ""}, - {1, "5"}, // * 2 - {1, "25"}, // * 4 - {1, "125"}, // * 8 - {2, "625"}, // * 16 - {2, "3125"}, // * 32 - {2, "15625"}, // * 64 - {3, "78125"}, // * 128 - {3, "390625"}, // * 256 - {3, "1953125"}, // * 512 - {4, "9765625"}, // * 1024 - {4, "48828125"}, // * 2048 - {4, "244140625"}, // * 4096 - {4, "1220703125"}, // * 8192 - {5, "6103515625"}, // * 16384 - {5, "30517578125"}, // * 32768 - {5, "152587890625"}, // * 65536 - {6, "762939453125"}, // * 131072 - {6, "3814697265625"}, // * 262144 - {6, "19073486328125"}, // * 524288 - {7, "95367431640625"}, // * 1048576 - {7, "476837158203125"}, // * 2097152 - {7, "2384185791015625"}, // * 4194304 - {7, "11920928955078125"}, // * 8388608 - {8, "59604644775390625"}, // * 16777216 - {8, "298023223876953125"}, // * 33554432 - {8, "1490116119384765625"}, // * 67108864 - {9, "7450580596923828125"}, // * 134217728 - {9, "37252902984619140625"}, // * 268435456 - {9, "186264514923095703125"}, // * 536870912 - {10, "931322574615478515625"}, // * 1073741824 - {10, "4656612873077392578125"}, // * 2147483648 - {10, "23283064365386962890625"}, // * 4294967296 - {10, "116415321826934814453125"}, // * 8589934592 - {11, "582076609134674072265625"}, // * 17179869184 - {11, "2910383045673370361328125"}, // * 34359738368 - {11, "14551915228366851806640625"}, // * 68719476736 - {12, "72759576141834259033203125"}, // * 137438953472 - {12, "363797880709171295166015625"}, // * 274877906944 - {12, "1818989403545856475830078125"}, // * 549755813888 - {13, "9094947017729282379150390625"}, // * 1099511627776 - {13, "45474735088646411895751953125"}, // * 2199023255552 - {13, "227373675443232059478759765625"}, // * 4398046511104 - {13, "1136868377216160297393798828125"}, // * 8796093022208 - {14, "5684341886080801486968994140625"}, // * 17592186044416 - {14, "28421709430404007434844970703125"}, // * 35184372088832 - {14, "142108547152020037174224853515625"}, // * 70368744177664 - {15, "710542735760100185871124267578125"}, // * 140737488355328 - {15, "3552713678800500929355621337890625"}, // * 281474976710656 - {15, "17763568394002504646778106689453125"}, // * 562949953421312 - {16, "88817841970012523233890533447265625"}, // * 1125899906842624 - {16, "444089209850062616169452667236328125"}, // * 2251799813685248 - {16, "2220446049250313080847263336181640625"}, // * 4503599627370496 - {16, "11102230246251565404236316680908203125"}, // * 9007199254740992 - {17, "55511151231257827021181583404541015625"}, // * 18014398509481984 - {17, "277555756156289135105907917022705078125"}, // * 36028797018963968 - {17, "1387778780781445675529539585113525390625"}, // * 72057594037927936 - {18, "6938893903907228377647697925567626953125"}, // * 144115188075855872 - {18, "34694469519536141888238489627838134765625"}, // * 288230376151711744 - {18, "173472347597680709441192448139190673828125"}, // * 576460752303423488 - {19, "867361737988403547205962240695953369140625"}, // * 1152921504606846976 -} - -// Is the leading prefix of b lexicographically less than s? -func prefixIsLessThan(b []byte, s string) bool { - for i := 0; i < len(s); i++ { - if i >= len(b) { - return true - } - if b[i] != s[i] { - return b[i] < s[i] - } - } - return false -} - -// Binary shift left (* 2) by k bits. k <= maxShift to avoid overflow. -func leftShift(a *decimal, k uint) { - delta := leftcheats[k].delta - if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) { - delta-- - } - - r := a.nd // read index - w := a.nd + delta // write index - - // Pick up a digit, put down a digit. - var n uint - for r--; r >= 0; r-- { - n += (uint(a.d[r]) - '0') << k - quo := n / 10 - rem := n - 10*quo - w-- - if w < len(a.d) { - a.d[w] = byte(rem + '0') - } else if rem != 0 { - a.trunc = true - } - n = quo - } - - // Put down extra digits. - for n > 0 { - quo := n / 10 - rem := n - 10*quo - w-- - if w < len(a.d) { - a.d[w] = byte(rem + '0') - } else if rem != 0 { - a.trunc = true - } - n = quo - } - - a.nd += delta - if a.nd >= len(a.d) { - a.nd = len(a.d) - } - a.dp += delta - trim(a) -} - -// Binary shift left (k > 0) or right (k < 0). -func (a *decimal) Shift(k int) { - switch { - case a.nd == 0: - // nothing to do: a == 0 - case k > 0: - for k > maxShift { - leftShift(a, maxShift) - k -= maxShift - } - leftShift(a, uint(k)) - case k < 0: - for k < -maxShift { - rightShift(a, maxShift) - k += maxShift - } - rightShift(a, uint(-k)) - } -} - -// If we chop a at nd digits, should we round up? -func shouldRoundUp(a *decimal, nd int) bool { - if nd < 0 || nd >= a.nd { - return false - } - if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even - // if we truncated, a little higher than what's recorded - always round up - if a.trunc { - return true - } - return nd > 0 && (a.d[nd-1]-'0')%2 != 0 - } - // not halfway - digit tells all - return a.d[nd] >= '5' -} - -// Round a to nd digits (or fewer). -// If nd is zero, it means we're rounding -// just to the left of the digits, as in -// 0.09 -> 0.1. -func (a *decimal) Round(nd int) { - if nd < 0 || nd >= a.nd { - return - } - if shouldRoundUp(a, nd) { - a.RoundUp(nd) - } else { - a.RoundDown(nd) - } -} - -// Round a down to nd digits (or fewer). -func (a *decimal) RoundDown(nd int) { - if nd < 0 || nd >= a.nd { - return - } - a.nd = nd - trim(a) -} - -// Round a up to nd digits (or fewer). -func (a *decimal) RoundUp(nd int) { - if nd < 0 || nd >= a.nd { - return - } - - // round up - for i := nd - 1; i >= 0; i-- { - c := a.d[i] - if c < '9' { // can stop after this digit - a.d[i]++ - a.nd = i + 1 - return - } - } - - // Number is all 9s. - // Change to single 1 with adjusted decimal point. - a.d[0] = '1' - a.nd = 1 - a.dp++ -} - -// Extract integer part, rounded appropriately. -// No guarantees about overflow. -func (a *decimal) RoundedInteger() uint64 { - if a.dp > 20 { - return 0xFFFFFFFFFFFFFFFF - } - var i int - n := uint64(0) - for i = 0; i < a.dp && i < a.nd; i++ { - n = n*10 + uint64(a.d[i]-'0') - } - for ; i < a.dp; i++ { - n *= 10 - } - if shouldRoundUp(a, a.dp) { - n++ - } - return n -} diff --git a/vendor/github.com/shopspring/decimal/decimal.go b/vendor/github.com/shopspring/decimal/decimal.go deleted file mode 100644 index 801c1a04..00000000 --- a/vendor/github.com/shopspring/decimal/decimal.go +++ /dev/null @@ -1,1477 +0,0 @@ -// Package decimal implements an arbitrary precision fixed-point decimal. -// -// The zero-value of a Decimal is 0, as you would expect. -// -// The best way to create a new Decimal is to use decimal.NewFromString, ex: -// -// n, err := decimal.NewFromString("-123.4567") -// n.String() // output: "-123.4567" -// -// To use Decimal as part of a struct: -// -// type Struct struct { -// Number Decimal -// } -// -// Note: This can "only" represent numbers with a maximum of 2^31 digits after the decimal point. -package decimal - -import ( - "database/sql/driver" - "encoding/binary" - "fmt" - "math" - "math/big" - "strconv" - "strings" -) - -// DivisionPrecision is the number of decimal places in the result when it -// doesn't divide exactly. -// -// Example: -// -// d1 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) -// d1.String() // output: "0.6666666666666667" -// d2 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(30000)) -// d2.String() // output: "0.0000666666666667" -// d3 := decimal.NewFromFloat(20000).Div(decimal.NewFromFloat(3)) -// d3.String() // output: "6666.6666666666666667" -// decimal.DivisionPrecision = 3 -// d4 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) -// d4.String() // output: "0.667" -// -var DivisionPrecision = 16 - -// MarshalJSONWithoutQuotes should be set to true if you want the decimal to -// be JSON marshaled as a number, instead of as a string. -// WARNING: this is dangerous for decimals with many digits, since many JSON -// unmarshallers (ex: Javascript's) will unmarshal JSON numbers to IEEE 754 -// double-precision floating point numbers, which means you can potentially -// silently lose precision. -var MarshalJSONWithoutQuotes = false - -// Zero constant, to make computations faster. -// Zero should never be compared with == or != directly, please use decimal.Equal or decimal.Cmp instead. -var Zero = New(0, 1) - -var zeroInt = big.NewInt(0) -var oneInt = big.NewInt(1) -var twoInt = big.NewInt(2) -var fourInt = big.NewInt(4) -var fiveInt = big.NewInt(5) -var tenInt = big.NewInt(10) -var twentyInt = big.NewInt(20) - -// Decimal represents a fixed-point decimal. It is immutable. -// number = value * 10 ^ exp -type Decimal struct { - value *big.Int - - // NOTE(vadim): this must be an int32, because we cast it to float64 during - // calculations. If exp is 64 bit, we might lose precision. - // If we cared about being able to represent every possible decimal, we - // could make exp a *big.Int but it would hurt performance and numbers - // like that are unrealistic. - exp int32 -} - -// New returns a new fixed-point decimal, value * 10 ^ exp. -func New(value int64, exp int32) Decimal { - return Decimal{ - value: big.NewInt(value), - exp: exp, - } -} - -// NewFromInt converts a int64 to Decimal. -// -// Example: -// -// NewFromInt(123).String() // output: "123" -// NewFromInt(-10).String() // output: "-10" -func NewFromInt(value int64) Decimal { - return Decimal{ - value: big.NewInt(value), - exp: 0, - } -} - -// NewFromInt32 converts a int32 to Decimal. -// -// Example: -// -// NewFromInt(123).String() // output: "123" -// NewFromInt(-10).String() // output: "-10" -func NewFromInt32(value int32) Decimal { - return Decimal{ - value: big.NewInt(int64(value)), - exp: 0, - } -} - -// NewFromBigInt returns a new Decimal from a big.Int, value * 10 ^ exp -func NewFromBigInt(value *big.Int, exp int32) Decimal { - return Decimal{ - value: big.NewInt(0).Set(value), - exp: exp, - } -} - -// NewFromString returns a new Decimal from a string representation. -// Trailing zeroes are not trimmed. -// -// Example: -// -// d, err := NewFromString("-123.45") -// d2, err := NewFromString(".0001") -// d3, err := NewFromString("1.47000") -// -func NewFromString(value string) (Decimal, error) { - originalInput := value - var intString string - var exp int64 - - // Check if number is using scientific notation - eIndex := strings.IndexAny(value, "Ee") - if eIndex != -1 { - expInt, err := strconv.ParseInt(value[eIndex+1:], 10, 32) - if err != nil { - if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { - return Decimal{}, fmt.Errorf("can't convert %s to decimal: fractional part too long", value) - } - return Decimal{}, fmt.Errorf("can't convert %s to decimal: exponent is not numeric", value) - } - value = value[:eIndex] - exp = expInt - } - - parts := strings.Split(value, ".") - if len(parts) == 1 { - // There is no decimal point, we can just parse the original string as - // an int - intString = value - } else if len(parts) == 2 { - intString = parts[0] + parts[1] - expInt := -len(parts[1]) - exp += int64(expInt) - } else { - return Decimal{}, fmt.Errorf("can't convert %s to decimal: too many .s", value) - } - - dValue := new(big.Int) - _, ok := dValue.SetString(intString, 10) - if !ok { - return Decimal{}, fmt.Errorf("can't convert %s to decimal", value) - } - - if exp < math.MinInt32 || exp > math.MaxInt32 { - // NOTE(vadim): I doubt a string could realistically be this long - return Decimal{}, fmt.Errorf("can't convert %s to decimal: fractional part too long", originalInput) - } - - return Decimal{ - value: dValue, - exp: int32(exp), - }, nil -} - -// RequireFromString returns a new Decimal from a string representation -// or panics if NewFromString would have returned an error. -// -// Example: -// -// d := RequireFromString("-123.45") -// d2 := RequireFromString(".0001") -// -func RequireFromString(value string) Decimal { - dec, err := NewFromString(value) - if err != nil { - panic(err) - } - return dec -} - -// NewFromFloat converts a float64 to Decimal. -// -// The converted number will contain the number of significant digits that can be -// represented in a float with reliable roundtrip. -// This is typically 15 digits, but may be more in some cases. -// See https://www.exploringbinary.com/decimal-precision-of-binary-floating-point-numbers/ for more information. -// -// For slightly faster conversion, use NewFromFloatWithExponent where you can specify the precision in absolute terms. -// -// NOTE: this will panic on NaN, +/-inf -func NewFromFloat(value float64) Decimal { - if value == 0 { - return New(0, 0) - } - return newFromFloat(value, math.Float64bits(value), &float64info) -} - -// NewFromFloat32 converts a float32 to Decimal. -// -// The converted number will contain the number of significant digits that can be -// represented in a float with reliable roundtrip. -// This is typically 6-8 digits depending on the input. -// See https://www.exploringbinary.com/decimal-precision-of-binary-floating-point-numbers/ for more information. -// -// For slightly faster conversion, use NewFromFloatWithExponent where you can specify the precision in absolute terms. -// -// NOTE: this will panic on NaN, +/-inf -func NewFromFloat32(value float32) Decimal { - if value == 0 { - return New(0, 0) - } - // XOR is workaround for https://github.com/golang/go/issues/26285 - a := math.Float32bits(value) ^ 0x80808080 - return newFromFloat(float64(value), uint64(a)^0x80808080, &float32info) -} - -func newFromFloat(val float64, bits uint64, flt *floatInfo) Decimal { - if math.IsNaN(val) || math.IsInf(val, 0) { - panic(fmt.Sprintf("Cannot create a Decimal from %v", val)) - } - exp := int(bits>>flt.mantbits) & (1<>(flt.expbits+flt.mantbits) != 0 - - roundShortest(&d, mant, exp, flt) - // If less than 19 digits, we can do calculation in an int64. - if d.nd < 19 { - tmp := int64(0) - m := int64(1) - for i := d.nd - 1; i >= 0; i-- { - tmp += m * int64(d.d[i]-'0') - m *= 10 - } - if d.neg { - tmp *= -1 - } - return Decimal{value: big.NewInt(tmp), exp: int32(d.dp) - int32(d.nd)} - } - dValue := new(big.Int) - dValue, ok := dValue.SetString(string(d.d[:d.nd]), 10) - if ok { - return Decimal{value: dValue, exp: int32(d.dp) - int32(d.nd)} - } - - return NewFromFloatWithExponent(val, int32(d.dp)-int32(d.nd)) -} - -// NewFromFloatWithExponent converts a float64 to Decimal, with an arbitrary -// number of fractional digits. -// -// Example: -// -// NewFromFloatWithExponent(123.456, -2).String() // output: "123.46" -// -func NewFromFloatWithExponent(value float64, exp int32) Decimal { - if math.IsNaN(value) || math.IsInf(value, 0) { - panic(fmt.Sprintf("Cannot create a Decimal from %v", value)) - } - - bits := math.Float64bits(value) - mant := bits & (1<<52 - 1) - exp2 := int32((bits >> 52) & (1<<11 - 1)) - sign := bits >> 63 - - if exp2 == 0 { - // specials - if mant == 0 { - return Decimal{} - } - // subnormal - exp2++ - } else { - // normal - mant |= 1 << 52 - } - - exp2 -= 1023 + 52 - - // normalizing base-2 values - for mant&1 == 0 { - mant = mant >> 1 - exp2++ - } - - // maximum number of fractional base-10 digits to represent 2^N exactly cannot be more than -N if N<0 - if exp < 0 && exp < exp2 { - if exp2 < 0 { - exp = exp2 - } else { - exp = 0 - } - } - - // representing 10^M * 2^N as 5^M * 2^(M+N) - exp2 -= exp - - temp := big.NewInt(1) - dMant := big.NewInt(int64(mant)) - - // applying 5^M - if exp > 0 { - temp = temp.SetInt64(int64(exp)) - temp = temp.Exp(fiveInt, temp, nil) - } else if exp < 0 { - temp = temp.SetInt64(-int64(exp)) - temp = temp.Exp(fiveInt, temp, nil) - dMant = dMant.Mul(dMant, temp) - temp = temp.SetUint64(1) - } - - // applying 2^(M+N) - if exp2 > 0 { - dMant = dMant.Lsh(dMant, uint(exp2)) - } else if exp2 < 0 { - temp = temp.Lsh(temp, uint(-exp2)) - } - - // rounding and downscaling - if exp > 0 || exp2 < 0 { - halfDown := new(big.Int).Rsh(temp, 1) - dMant = dMant.Add(dMant, halfDown) - dMant = dMant.Quo(dMant, temp) - } - - if sign == 1 { - dMant = dMant.Neg(dMant) - } - - return Decimal{ - value: dMant, - exp: exp, - } -} - -// rescale returns a rescaled version of the decimal. Returned -// decimal may be less precise if the given exponent is bigger -// than the initial exponent of the Decimal. -// NOTE: this will truncate, NOT round -// -// Example: -// -// d := New(12345, -4) -// d2 := d.rescale(-1) -// d3 := d2.rescale(-4) -// println(d1) -// println(d2) -// println(d3) -// -// Output: -// -// 1.2345 -// 1.2 -// 1.2000 -// -func (d Decimal) rescale(exp int32) Decimal { - d.ensureInitialized() - - if d.exp == exp { - return Decimal{ - new(big.Int).Set(d.value), - d.exp, - } - } - - // NOTE(vadim): must convert exps to float64 before - to prevent overflow - diff := math.Abs(float64(exp) - float64(d.exp)) - value := new(big.Int).Set(d.value) - - expScale := new(big.Int).Exp(tenInt, big.NewInt(int64(diff)), nil) - if exp > d.exp { - value = value.Quo(value, expScale) - } else if exp < d.exp { - value = value.Mul(value, expScale) - } - - return Decimal{ - value: value, - exp: exp, - } -} - -// Abs returns the absolute value of the decimal. -func (d Decimal) Abs() Decimal { - d.ensureInitialized() - d2Value := new(big.Int).Abs(d.value) - return Decimal{ - value: d2Value, - exp: d.exp, - } -} - -// Add returns d + d2. -func (d Decimal) Add(d2 Decimal) Decimal { - rd, rd2 := RescalePair(d, d2) - - d3Value := new(big.Int).Add(rd.value, rd2.value) - return Decimal{ - value: d3Value, - exp: rd.exp, - } -} - -// Sub returns d - d2. -func (d Decimal) Sub(d2 Decimal) Decimal { - rd, rd2 := RescalePair(d, d2) - - d3Value := new(big.Int).Sub(rd.value, rd2.value) - return Decimal{ - value: d3Value, - exp: rd.exp, - } -} - -// Neg returns -d. -func (d Decimal) Neg() Decimal { - d.ensureInitialized() - val := new(big.Int).Neg(d.value) - return Decimal{ - value: val, - exp: d.exp, - } -} - -// Mul returns d * d2. -func (d Decimal) Mul(d2 Decimal) Decimal { - d.ensureInitialized() - d2.ensureInitialized() - - expInt64 := int64(d.exp) + int64(d2.exp) - if expInt64 > math.MaxInt32 || expInt64 < math.MinInt32 { - // NOTE(vadim): better to panic than give incorrect results, as - // Decimals are usually used for money - panic(fmt.Sprintf("exponent %v overflows an int32!", expInt64)) - } - - d3Value := new(big.Int).Mul(d.value, d2.value) - return Decimal{ - value: d3Value, - exp: int32(expInt64), - } -} - -// Shift shifts the decimal in base 10. -// It shifts left when shift is positive and right if shift is negative. -// In simpler terms, the given value for shift is added to the exponent -// of the decimal. -func (d Decimal) Shift(shift int32) Decimal { - d.ensureInitialized() - return Decimal{ - value: new(big.Int).Set(d.value), - exp: d.exp + shift, - } -} - -// Div returns d / d2. If it doesn't divide exactly, the result will have -// DivisionPrecision digits after the decimal point. -func (d Decimal) Div(d2 Decimal) Decimal { - return d.DivRound(d2, int32(DivisionPrecision)) -} - -// QuoRem does divsion with remainder -// d.QuoRem(d2,precision) returns quotient q and remainder r such that -// d = d2 * q + r, q an integer multiple of 10^(-precision) -// 0 <= r < abs(d2) * 10 ^(-precision) if d>=0 -// 0 >= r > -abs(d2) * 10 ^(-precision) if d<0 -// Note that precision<0 is allowed as input. -func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) { - d.ensureInitialized() - d2.ensureInitialized() - if d2.value.Sign() == 0 { - panic("decimal division by 0") - } - scale := -precision - e := int64(d.exp - d2.exp - scale) - if e > math.MaxInt32 || e < math.MinInt32 { - panic("overflow in decimal QuoRem") - } - var aa, bb, expo big.Int - var scalerest int32 - // d = a 10^ea - // d2 = b 10^eb - if e < 0 { - aa = *d.value - expo.SetInt64(-e) - bb.Exp(tenInt, &expo, nil) - bb.Mul(d2.value, &bb) - scalerest = d.exp - // now aa = a - // bb = b 10^(scale + eb - ea) - } else { - expo.SetInt64(e) - aa.Exp(tenInt, &expo, nil) - aa.Mul(d.value, &aa) - bb = *d2.value - scalerest = scale + d2.exp - // now aa = a ^ (ea - eb - scale) - // bb = b - } - var q, r big.Int - q.QuoRem(&aa, &bb, &r) - dq := Decimal{value: &q, exp: scale} - dr := Decimal{value: &r, exp: scalerest} - return dq, dr -} - -// DivRound divides and rounds to a given precision -// i.e. to an integer multiple of 10^(-precision) -// for a positive quotient digit 5 is rounded up, away from 0 -// if the quotient is negative then digit 5 is rounded down, away from 0 -// Note that precision<0 is allowed as input. -func (d Decimal) DivRound(d2 Decimal, precision int32) Decimal { - // QuoRem already checks initialization - q, r := d.QuoRem(d2, precision) - // the actual rounding decision is based on comparing r*10^precision and d2/2 - // instead compare 2 r 10 ^precision and d2 - var rv2 big.Int - rv2.Abs(r.value) - rv2.Lsh(&rv2, 1) - // now rv2 = abs(r.value) * 2 - r2 := Decimal{value: &rv2, exp: r.exp + precision} - // r2 is now 2 * r * 10 ^ precision - var c = r2.Cmp(d2.Abs()) - - if c < 0 { - return q - } - - if d.value.Sign()*d2.value.Sign() < 0 { - return q.Sub(New(1, -precision)) - } - - return q.Add(New(1, -precision)) -} - -// Mod returns d % d2. -func (d Decimal) Mod(d2 Decimal) Decimal { - quo := d.Div(d2).Truncate(0) - return d.Sub(d2.Mul(quo)) -} - -// Pow returns d to the power d2 -func (d Decimal) Pow(d2 Decimal) Decimal { - var temp Decimal - if d2.IntPart() == 0 { - return NewFromFloat(1) - } - temp = d.Pow(d2.Div(NewFromFloat(2))) - if d2.IntPart()%2 == 0 { - return temp.Mul(temp) - } - if d2.IntPart() > 0 { - return temp.Mul(temp).Mul(d) - } - return temp.Mul(temp).Div(d) -} - -// Cmp compares the numbers represented by d and d2 and returns: -// -// -1 if d < d2 -// 0 if d == d2 -// +1 if d > d2 -// -func (d Decimal) Cmp(d2 Decimal) int { - d.ensureInitialized() - d2.ensureInitialized() - - if d.exp == d2.exp { - return d.value.Cmp(d2.value) - } - - rd, rd2 := RescalePair(d, d2) - - return rd.value.Cmp(rd2.value) -} - -// Equal returns whether the numbers represented by d and d2 are equal. -func (d Decimal) Equal(d2 Decimal) bool { - return d.Cmp(d2) == 0 -} - -// Equals is deprecated, please use Equal method instead -func (d Decimal) Equals(d2 Decimal) bool { - return d.Equal(d2) -} - -// GreaterThan (GT) returns true when d is greater than d2. -func (d Decimal) GreaterThan(d2 Decimal) bool { - return d.Cmp(d2) == 1 -} - -// GreaterThanOrEqual (GTE) returns true when d is greater than or equal to d2. -func (d Decimal) GreaterThanOrEqual(d2 Decimal) bool { - cmp := d.Cmp(d2) - return cmp == 1 || cmp == 0 -} - -// LessThan (LT) returns true when d is less than d2. -func (d Decimal) LessThan(d2 Decimal) bool { - return d.Cmp(d2) == -1 -} - -// LessThanOrEqual (LTE) returns true when d is less than or equal to d2. -func (d Decimal) LessThanOrEqual(d2 Decimal) bool { - cmp := d.Cmp(d2) - return cmp == -1 || cmp == 0 -} - -// Sign returns: -// -// -1 if d < 0 -// 0 if d == 0 -// +1 if d > 0 -// -func (d Decimal) Sign() int { - if d.value == nil { - return 0 - } - return d.value.Sign() -} - -// IsPositive return -// -// true if d > 0 -// false if d == 0 -// false if d < 0 -func (d Decimal) IsPositive() bool { - return d.Sign() == 1 -} - -// IsNegative return -// -// true if d < 0 -// false if d == 0 -// false if d > 0 -func (d Decimal) IsNegative() bool { - return d.Sign() == -1 -} - -// IsZero return -// -// true if d == 0 -// false if d > 0 -// false if d < 0 -func (d Decimal) IsZero() bool { - return d.Sign() == 0 -} - -// Exponent returns the exponent, or scale component of the decimal. -func (d Decimal) Exponent() int32 { - return d.exp -} - -// Coefficient returns the coefficient of the decimal. It is scaled by 10^Exponent() -func (d Decimal) Coefficient() *big.Int { - d.ensureInitialized() - // we copy the coefficient so that mutating the result does not mutate the - // Decimal. - return big.NewInt(0).Set(d.value) -} - -// IntPart returns the integer component of the decimal. -func (d Decimal) IntPart() int64 { - scaledD := d.rescale(0) - return scaledD.value.Int64() -} - -// BigInt returns integer component of the decimal as a BigInt. -func (d Decimal) BigInt() *big.Int { - scaledD := d.rescale(0) - i := &big.Int{} - i.SetString(scaledD.String(), 10) - return i -} - -// BigFloat returns decimal as BigFloat. -// Be aware that casting decimal to BigFloat might cause a loss of precision. -func (d Decimal) BigFloat() *big.Float { - f := &big.Float{} - f.SetString(d.String()) - return f -} - -// Rat returns a rational number representation of the decimal. -func (d Decimal) Rat() *big.Rat { - d.ensureInitialized() - if d.exp <= 0 { - // NOTE(vadim): must negate after casting to prevent int32 overflow - denom := new(big.Int).Exp(tenInt, big.NewInt(-int64(d.exp)), nil) - return new(big.Rat).SetFrac(d.value, denom) - } - - mul := new(big.Int).Exp(tenInt, big.NewInt(int64(d.exp)), nil) - num := new(big.Int).Mul(d.value, mul) - return new(big.Rat).SetFrac(num, oneInt) -} - -// Float64 returns the nearest float64 value for d and a bool indicating -// whether f represents d exactly. -// For more details, see the documentation for big.Rat.Float64 -func (d Decimal) Float64() (f float64, exact bool) { - return d.Rat().Float64() -} - -// String returns the string representation of the decimal -// with the fixed point. -// -// Example: -// -// d := New(-12345, -3) -// println(d.String()) -// -// Output: -// -// -12.345 -// -func (d Decimal) String() string { - return d.string(true) -} - -// StringFixed returns a rounded fixed-point string with places digits after -// the decimal point. -// -// Example: -// -// NewFromFloat(0).StringFixed(2) // output: "0.00" -// NewFromFloat(0).StringFixed(0) // output: "0" -// NewFromFloat(5.45).StringFixed(0) // output: "5" -// NewFromFloat(5.45).StringFixed(1) // output: "5.5" -// NewFromFloat(5.45).StringFixed(2) // output: "5.45" -// NewFromFloat(5.45).StringFixed(3) // output: "5.450" -// NewFromFloat(545).StringFixed(-1) // output: "550" -// -func (d Decimal) StringFixed(places int32) string { - rounded := d.Round(places) - return rounded.string(false) -} - -// StringFixedBank returns a banker rounded fixed-point string with places digits -// after the decimal point. -// -// Example: -// -// NewFromFloat(0).StringFixedBank(2) // output: "0.00" -// NewFromFloat(0).StringFixedBank(0) // output: "0" -// NewFromFloat(5.45).StringFixedBank(0) // output: "5" -// NewFromFloat(5.45).StringFixedBank(1) // output: "5.4" -// NewFromFloat(5.45).StringFixedBank(2) // output: "5.45" -// NewFromFloat(5.45).StringFixedBank(3) // output: "5.450" -// NewFromFloat(545).StringFixedBank(-1) // output: "540" -// -func (d Decimal) StringFixedBank(places int32) string { - rounded := d.RoundBank(places) - return rounded.string(false) -} - -// StringFixedCash returns a Swedish/Cash rounded fixed-point string. For -// more details see the documentation at function RoundCash. -func (d Decimal) StringFixedCash(interval uint8) string { - rounded := d.RoundCash(interval) - return rounded.string(false) -} - -// Round rounds the decimal to places decimal places. -// If places < 0, it will round the integer part to the nearest 10^(-places). -// -// Example: -// -// NewFromFloat(5.45).Round(1).String() // output: "5.5" -// NewFromFloat(545).Round(-1).String() // output: "550" -// -func (d Decimal) Round(places int32) Decimal { - // truncate to places + 1 - ret := d.rescale(-places - 1) - - // add sign(d) * 0.5 - if ret.value.Sign() < 0 { - ret.value.Sub(ret.value, fiveInt) - } else { - ret.value.Add(ret.value, fiveInt) - } - - // floor for positive numbers, ceil for negative numbers - _, m := ret.value.DivMod(ret.value, tenInt, new(big.Int)) - ret.exp++ - if ret.value.Sign() < 0 && m.Cmp(zeroInt) != 0 { - ret.value.Add(ret.value, oneInt) - } - - return ret -} - -// RoundBank rounds the decimal to places decimal places. -// If the final digit to round is equidistant from the nearest two integers the -// rounded value is taken as the even number -// -// If places < 0, it will round the integer part to the nearest 10^(-places). -// -// Examples: -// -// NewFromFloat(5.45).Round(1).String() // output: "5.4" -// NewFromFloat(545).Round(-1).String() // output: "540" -// NewFromFloat(5.46).Round(1).String() // output: "5.5" -// NewFromFloat(546).Round(-1).String() // output: "550" -// NewFromFloat(5.55).Round(1).String() // output: "5.6" -// NewFromFloat(555).Round(-1).String() // output: "560" -// -func (d Decimal) RoundBank(places int32) Decimal { - - round := d.Round(places) - remainder := d.Sub(round).Abs() - - half := New(5, -places-1) - if remainder.Cmp(half) == 0 && round.value.Bit(0) != 0 { - if round.value.Sign() < 0 { - round.value.Add(round.value, oneInt) - } else { - round.value.Sub(round.value, oneInt) - } - } - - return round -} - -// RoundCash aka Cash/Penny/öre rounding rounds decimal to a specific -// interval. The amount payable for a cash transaction is rounded to the nearest -// multiple of the minimum currency unit available. The following intervals are -// available: 5, 10, 25, 50 and 100; any other number throws a panic. -// 5: 5 cent rounding 3.43 => 3.45 -// 10: 10 cent rounding 3.45 => 3.50 (5 gets rounded up) -// 25: 25 cent rounding 3.41 => 3.50 -// 50: 50 cent rounding 3.75 => 4.00 -// 100: 100 cent rounding 3.50 => 4.00 -// For more details: https://en.wikipedia.org/wiki/Cash_rounding -func (d Decimal) RoundCash(interval uint8) Decimal { - var iVal *big.Int - switch interval { - case 5: - iVal = twentyInt - case 10: - iVal = tenInt - case 25: - iVal = fourInt - case 50: - iVal = twoInt - case 100: - iVal = oneInt - default: - panic(fmt.Sprintf("Decimal does not support this Cash rounding interval `%d`. Supported: 5, 10, 25, 50, 100", interval)) - } - dVal := Decimal{ - value: iVal, - } - - // TODO: optimize those calculations to reduce the high allocations (~29 allocs). - return d.Mul(dVal).Round(0).Div(dVal).Truncate(2) -} - -// Floor returns the nearest integer value less than or equal to d. -func (d Decimal) Floor() Decimal { - d.ensureInitialized() - - if d.exp >= 0 { - return d - } - - exp := big.NewInt(10) - - // NOTE(vadim): must negate after casting to prevent int32 overflow - exp.Exp(exp, big.NewInt(-int64(d.exp)), nil) - - z := new(big.Int).Div(d.value, exp) - return Decimal{value: z, exp: 0} -} - -// Ceil returns the nearest integer value greater than or equal to d. -func (d Decimal) Ceil() Decimal { - d.ensureInitialized() - - if d.exp >= 0 { - return d - } - - exp := big.NewInt(10) - - // NOTE(vadim): must negate after casting to prevent int32 overflow - exp.Exp(exp, big.NewInt(-int64(d.exp)), nil) - - z, m := new(big.Int).DivMod(d.value, exp, new(big.Int)) - if m.Cmp(zeroInt) != 0 { - z.Add(z, oneInt) - } - return Decimal{value: z, exp: 0} -} - -// Truncate truncates off digits from the number, without rounding. -// -// NOTE: precision is the last digit that will not be truncated (must be >= 0). -// -// Example: -// -// decimal.NewFromString("123.456").Truncate(2).String() // "123.45" -// -func (d Decimal) Truncate(precision int32) Decimal { - d.ensureInitialized() - if precision >= 0 && -precision > d.exp { - return d.rescale(-precision) - } - return d -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (d *Decimal) UnmarshalJSON(decimalBytes []byte) error { - if string(decimalBytes) == "null" { - return nil - } - - str, err := unquoteIfQuoted(decimalBytes) - if err != nil { - return fmt.Errorf("error decoding string '%s': %s", decimalBytes, err) - } - - decimal, err := NewFromString(str) - *d = decimal - if err != nil { - return fmt.Errorf("error decoding string '%s': %s", str, err) - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (d Decimal) MarshalJSON() ([]byte, error) { - var str string - if MarshalJSONWithoutQuotes { - str = d.String() - } else { - str = "\"" + d.String() + "\"" - } - return []byte(str), nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. As a string representation -// is already used when encoding to text, this method stores that string as []byte -func (d *Decimal) UnmarshalBinary(data []byte) error { - // Extract the exponent - d.exp = int32(binary.BigEndian.Uint32(data[:4])) - - // Extract the value - d.value = new(big.Int) - return d.value.GobDecode(data[4:]) -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d Decimal) MarshalBinary() (data []byte, err error) { - // Write the exponent first since it's a fixed size - v1 := make([]byte, 4) - binary.BigEndian.PutUint32(v1, uint32(d.exp)) - - // Add the value - var v2 []byte - if v2, err = d.value.GobEncode(); err != nil { - return - } - - // Return the byte array - data = append(v1, v2...) - return -} - -// Scan implements the sql.Scanner interface for database deserialization. -func (d *Decimal) Scan(value interface{}) error { - // first try to see if the data is stored in database as a Numeric datatype - switch v := value.(type) { - - case float32: - *d = NewFromFloat(float64(v)) - return nil - - case float64: - // numeric in sqlite3 sends us float64 - *d = NewFromFloat(v) - return nil - - case int64: - // at least in sqlite3 when the value is 0 in db, the data is sent - // to us as an int64 instead of a float64 ... - *d = New(v, 0) - return nil - - default: - // default is trying to interpret value stored as string - str, err := unquoteIfQuoted(v) - if err != nil { - return err - } - *d, err = NewFromString(str) - return err - } -} - -// Value implements the driver.Valuer interface for database serialization. -func (d Decimal) Value() (driver.Value, error) { - return d.String(), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface for XML -// deserialization. -func (d *Decimal) UnmarshalText(text []byte) error { - str := string(text) - - dec, err := NewFromString(str) - *d = dec - if err != nil { - return fmt.Errorf("error decoding string '%s': %s", str, err) - } - - return nil -} - -// MarshalText implements the encoding.TextMarshaler interface for XML -// serialization. -func (d Decimal) MarshalText() (text []byte, err error) { - return []byte(d.String()), nil -} - -// GobEncode implements the gob.GobEncoder interface for gob serialization. -func (d Decimal) GobEncode() ([]byte, error) { - return d.MarshalBinary() -} - -// GobDecode implements the gob.GobDecoder interface for gob serialization. -func (d *Decimal) GobDecode(data []byte) error { - return d.UnmarshalBinary(data) -} - -// StringScaled first scales the decimal then calls .String() on it. -// NOTE: buggy, unintuitive, and DEPRECATED! Use StringFixed instead. -func (d Decimal) StringScaled(exp int32) string { - return d.rescale(exp).String() -} - -func (d Decimal) string(trimTrailingZeros bool) string { - if d.exp >= 0 { - return d.rescale(0).value.String() - } - - abs := new(big.Int).Abs(d.value) - str := abs.String() - - var intPart, fractionalPart string - - // NOTE(vadim): this cast to int will cause bugs if d.exp == INT_MIN - // and you are on a 32-bit machine. Won't fix this super-edge case. - dExpInt := int(d.exp) - if len(str) > -dExpInt { - intPart = str[:len(str)+dExpInt] - fractionalPart = str[len(str)+dExpInt:] - } else { - intPart = "0" - - num0s := -dExpInt - len(str) - fractionalPart = strings.Repeat("0", num0s) + str - } - - if trimTrailingZeros { - i := len(fractionalPart) - 1 - for ; i >= 0; i-- { - if fractionalPart[i] != '0' { - break - } - } - fractionalPart = fractionalPart[:i+1] - } - - number := intPart - if len(fractionalPart) > 0 { - number += "." + fractionalPart - } - - if d.value.Sign() < 0 { - return "-" + number - } - - return number -} - -func (d *Decimal) ensureInitialized() { - if d.value == nil { - d.value = new(big.Int) - } -} - -// Min returns the smallest Decimal that was passed in the arguments. -// -// To call this function with an array, you must do: -// -// Min(arr[0], arr[1:]...) -// -// This makes it harder to accidentally call Min with 0 arguments. -func Min(first Decimal, rest ...Decimal) Decimal { - ans := first - for _, item := range rest { - if item.Cmp(ans) < 0 { - ans = item - } - } - return ans -} - -// Max returns the largest Decimal that was passed in the arguments. -// -// To call this function with an array, you must do: -// -// Max(arr[0], arr[1:]...) -// -// This makes it harder to accidentally call Max with 0 arguments. -func Max(first Decimal, rest ...Decimal) Decimal { - ans := first - for _, item := range rest { - if item.Cmp(ans) > 0 { - ans = item - } - } - return ans -} - -// Sum returns the combined total of the provided first and rest Decimals -func Sum(first Decimal, rest ...Decimal) Decimal { - total := first - for _, item := range rest { - total = total.Add(item) - } - - return total -} - -// Avg returns the average value of the provided first and rest Decimals -func Avg(first Decimal, rest ...Decimal) Decimal { - count := New(int64(len(rest)+1), 0) - sum := Sum(first, rest...) - return sum.Div(count) -} - -// RescalePair rescales two decimals to common exponential value (minimal exp of both decimals) -func RescalePair(d1 Decimal, d2 Decimal) (Decimal, Decimal) { - d1.ensureInitialized() - d2.ensureInitialized() - - if d1.exp == d2.exp { - return d1, d2 - } - - baseScale := min(d1.exp, d2.exp) - if baseScale != d1.exp { - return d1.rescale(baseScale), d2 - } - return d1, d2.rescale(baseScale) -} - -func min(x, y int32) int32 { - if x >= y { - return y - } - return x -} - -func unquoteIfQuoted(value interface{}) (string, error) { - var bytes []byte - - switch v := value.(type) { - case string: - bytes = []byte(v) - case []byte: - bytes = v - default: - return "", fmt.Errorf("could not convert value '%+v' to byte array of type '%T'", - value, value) - } - - // If the amount is quoted, strip the quotes - if len(bytes) > 2 && bytes[0] == '"' && bytes[len(bytes)-1] == '"' { - bytes = bytes[1 : len(bytes)-1] - } - return string(bytes), nil -} - -// NullDecimal represents a nullable decimal with compatibility for -// scanning null values from the database. -type NullDecimal struct { - Decimal Decimal - Valid bool -} - -// Scan implements the sql.Scanner interface for database deserialization. -func (d *NullDecimal) Scan(value interface{}) error { - if value == nil { - d.Valid = false - return nil - } - d.Valid = true - return d.Decimal.Scan(value) -} - -// Value implements the driver.Valuer interface for database serialization. -func (d NullDecimal) Value() (driver.Value, error) { - if !d.Valid { - return nil, nil - } - return d.Decimal.Value() -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (d *NullDecimal) UnmarshalJSON(decimalBytes []byte) error { - if string(decimalBytes) == "null" { - d.Valid = false - return nil - } - d.Valid = true - return d.Decimal.UnmarshalJSON(decimalBytes) -} - -// MarshalJSON implements the json.Marshaler interface. -func (d NullDecimal) MarshalJSON() ([]byte, error) { - if !d.Valid { - return []byte("null"), nil - } - return d.Decimal.MarshalJSON() -} - -// Trig functions - -// Atan returns the arctangent, in radians, of x. -func (d Decimal) Atan() Decimal { - if d.Equal(NewFromFloat(0.0)) { - return d - } - if d.GreaterThan(NewFromFloat(0.0)) { - return d.satan() - } - return d.Neg().satan().Neg() -} - -func (d Decimal) xatan() Decimal { - P0 := NewFromFloat(-8.750608600031904122785e-01) - P1 := NewFromFloat(-1.615753718733365076637e+01) - P2 := NewFromFloat(-7.500855792314704667340e+01) - P3 := NewFromFloat(-1.228866684490136173410e+02) - P4 := NewFromFloat(-6.485021904942025371773e+01) - Q0 := NewFromFloat(2.485846490142306297962e+01) - Q1 := NewFromFloat(1.650270098316988542046e+02) - Q2 := NewFromFloat(4.328810604912902668951e+02) - Q3 := NewFromFloat(4.853903996359136964868e+02) - Q4 := NewFromFloat(1.945506571482613964425e+02) - z := d.Mul(d) - b1 := P0.Mul(z).Add(P1).Mul(z).Add(P2).Mul(z).Add(P3).Mul(z).Add(P4).Mul(z) - b2 := z.Add(Q0).Mul(z).Add(Q1).Mul(z).Add(Q2).Mul(z).Add(Q3).Mul(z).Add(Q4) - z = b1.Div(b2) - z = d.Mul(z).Add(d) - return z -} - -// satan reduces its argument (known to be positive) -// to the range [0, 0.66] and calls xatan. -func (d Decimal) satan() Decimal { - Morebits := NewFromFloat(6.123233995736765886130e-17) // pi/2 = PIO2 + Morebits - Tan3pio8 := NewFromFloat(2.41421356237309504880) // tan(3*pi/8) - pi := NewFromFloat(3.14159265358979323846264338327950288419716939937510582097494459) - - if d.LessThanOrEqual(NewFromFloat(0.66)) { - return d.xatan() - } - if d.GreaterThan(Tan3pio8) { - return pi.Div(NewFromFloat(2.0)).Sub(NewFromFloat(1.0).Div(d).xatan()).Add(Morebits) - } - return pi.Div(NewFromFloat(4.0)).Add((d.Sub(NewFromFloat(1.0)).Div(d.Add(NewFromFloat(1.0)))).xatan()).Add(NewFromFloat(0.5).Mul(Morebits)) -} - -// sin coefficients -var _sin = [...]Decimal{ - NewFromFloat(1.58962301576546568060e-10), // 0x3de5d8fd1fd19ccd - NewFromFloat(-2.50507477628578072866e-8), // 0xbe5ae5e5a9291f5d - NewFromFloat(2.75573136213857245213e-6), // 0x3ec71de3567d48a1 - NewFromFloat(-1.98412698295895385996e-4), // 0xbf2a01a019bfdf03 - NewFromFloat(8.33333333332211858878e-3), // 0x3f8111111110f7d0 - NewFromFloat(-1.66666666666666307295e-1), // 0xbfc5555555555548 -} - -// Sin returns the sine of the radian argument x. -func (d Decimal) Sin() Decimal { - PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts - PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, - PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, - M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi - - if d.Equal(NewFromFloat(0.0)) { - return d - } - // make argument positive but save the sign - sign := false - if d.LessThan(NewFromFloat(0.0)) { - d = d.Neg() - sign = true - } - - j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle - y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float - - // map zeros to origin - if j&1 == 1 { - j++ - y = y.Add(NewFromFloat(1.0)) - } - j &= 7 // octant modulo 2Pi radians (360 degrees) - // reflect in x axis - if j > 3 { - sign = !sign - j -= 4 - } - z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic - zz := z.Mul(z) - - if j == 1 || j == 2 { - w := zz.Mul(zz).Mul(_cos[0].Mul(zz).Add(_cos[1]).Mul(zz).Add(_cos[2]).Mul(zz).Add(_cos[3]).Mul(zz).Add(_cos[4]).Mul(zz).Add(_cos[5])) - y = NewFromFloat(1.0).Sub(NewFromFloat(0.5).Mul(zz)).Add(w) - } else { - y = z.Add(z.Mul(zz).Mul(_sin[0].Mul(zz).Add(_sin[1]).Mul(zz).Add(_sin[2]).Mul(zz).Add(_sin[3]).Mul(zz).Add(_sin[4]).Mul(zz).Add(_sin[5]))) - } - if sign { - y = y.Neg() - } - return y -} - -// cos coefficients -var _cos = [...]Decimal{ - NewFromFloat(-1.13585365213876817300e-11), // 0xbda8fa49a0861a9b - NewFromFloat(2.08757008419747316778e-9), // 0x3e21ee9d7b4e3f05 - NewFromFloat(-2.75573141792967388112e-7), // 0xbe927e4f7eac4bc6 - NewFromFloat(2.48015872888517045348e-5), // 0x3efa01a019c844f5 - NewFromFloat(-1.38888888888730564116e-3), // 0xbf56c16c16c14f91 - NewFromFloat(4.16666666666665929218e-2), // 0x3fa555555555554b -} - -// Cos returns the cosine of the radian argument x. -func (d Decimal) Cos() Decimal { - - PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts - PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, - PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, - M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi - - // make argument positive - sign := false - if d.LessThan(NewFromFloat(0.0)) { - d = d.Neg() - } - - j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle - y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float - - // map zeros to origin - if j&1 == 1 { - j++ - y = y.Add(NewFromFloat(1.0)) - } - j &= 7 // octant modulo 2Pi radians (360 degrees) - // reflect in x axis - if j > 3 { - sign = !sign - j -= 4 - } - if j > 1 { - sign = !sign - } - - z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic - zz := z.Mul(z) - - if j == 1 || j == 2 { - y = z.Add(z.Mul(zz).Mul(_sin[0].Mul(zz).Add(_sin[1]).Mul(zz).Add(_sin[2]).Mul(zz).Add(_sin[3]).Mul(zz).Add(_sin[4]).Mul(zz).Add(_sin[5]))) - } else { - w := zz.Mul(zz).Mul(_cos[0].Mul(zz).Add(_cos[1]).Mul(zz).Add(_cos[2]).Mul(zz).Add(_cos[3]).Mul(zz).Add(_cos[4]).Mul(zz).Add(_cos[5])) - y = NewFromFloat(1.0).Sub(NewFromFloat(0.5).Mul(zz)).Add(w) - } - if sign { - y = y.Neg() - } - return y -} - -var _tanP = [...]Decimal{ - NewFromFloat(-1.30936939181383777646e+4), // 0xc0c992d8d24f3f38 - NewFromFloat(1.15351664838587416140e+6), // 0x413199eca5fc9ddd - NewFromFloat(-1.79565251976484877988e+7), // 0xc1711fead3299176 -} -var _tanQ = [...]Decimal{ - NewFromFloat(1.00000000000000000000e+0), - NewFromFloat(1.36812963470692954678e+4), //0x40cab8a5eeb36572 - NewFromFloat(-1.32089234440210967447e+6), //0xc13427bc582abc96 - NewFromFloat(2.50083801823357915839e+7), //0x4177d98fc2ead8ef - NewFromFloat(-5.38695755929454629881e+7), //0xc189afe03cbe5a31 -} - -// Tan returns the tangent of the radian argument x. -func (d Decimal) Tan() Decimal { - - PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts - PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, - PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, - M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi - - if d.Equal(NewFromFloat(0.0)) { - return d - } - - // make argument positive but save the sign - sign := false - if d.LessThan(NewFromFloat(0.0)) { - d = d.Neg() - sign = true - } - - j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle - y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float - - // map zeros to origin - if j&1 == 1 { - j++ - y = y.Add(NewFromFloat(1.0)) - } - - z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic - zz := z.Mul(z) - - if zz.GreaterThan(NewFromFloat(1e-14)) { - w := zz.Mul(_tanP[0].Mul(zz).Add(_tanP[1]).Mul(zz).Add(_tanP[2])) - x := zz.Add(_tanQ[1]).Mul(zz).Add(_tanQ[2]).Mul(zz).Add(_tanQ[3]).Mul(zz).Add(_tanQ[4]) - y = z.Add(z.Mul(w.Div(x))) - } else { - y = z - } - if j&2 == 2 { - y = NewFromFloat(-1.0).Div(y) - } - if sign { - y = y.Neg() - } - return y -} diff --git a/vendor/github.com/shopspring/decimal/rounding.go b/vendor/github.com/shopspring/decimal/rounding.go deleted file mode 100644 index 8008f55c..00000000 --- a/vendor/github.com/shopspring/decimal/rounding.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Multiprecision decimal numbers. -// For floating-point formatting only; not general purpose. -// Only operations are assign and (binary) left/right shift. -// Can do binary floating point in multiprecision decimal precisely -// because 2 divides 10; cannot do decimal floating point -// in multiprecision binary precisely. - -package decimal - -type floatInfo struct { - mantbits uint - expbits uint - bias int -} - -var float32info = floatInfo{23, 8, -127} -var float64info = floatInfo{52, 11, -1023} - -// roundShortest rounds d (= mant * 2^exp) to the shortest number of digits -// that will let the original floating point value be precisely reconstructed. -func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) { - // If mantissa is zero, the number is zero; stop now. - if mant == 0 { - d.nd = 0 - return - } - - // Compute upper and lower such that any decimal number - // between upper and lower (possibly inclusive) - // will round to the original floating point number. - - // We may see at once that the number is already shortest. - // - // Suppose d is not denormal, so that 2^exp <= d < 10^dp. - // The closest shorter number is at least 10^(dp-nd) away. - // The lower/upper bounds computed below are at distance - // at most 2^(exp-mantbits). - // - // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits), - // or equivalently log2(10)*(dp-nd) > exp-mantbits. - // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32). - minexp := flt.bias + 1 // minimum possible exponent - if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) { - // The number is already shortest. - return - } - - // d = mant << (exp - mantbits) - // Next highest floating point number is mant+1 << exp-mantbits. - // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1. - upper := new(decimal) - upper.Assign(mant*2 + 1) - upper.Shift(exp - int(flt.mantbits) - 1) - - // d = mant << (exp - mantbits) - // Next lowest floating point number is mant-1 << exp-mantbits, - // unless mant-1 drops the significant bit and exp is not the minimum exp, - // in which case the next lowest is mant*2-1 << exp-mantbits-1. - // Either way, call it mantlo << explo-mantbits. - // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1. - var mantlo uint64 - var explo int - if mant > 1<_ACTIVE_HELP where is the name of the root command in upper -// case, with all - replaced by _. +// case, with all non-ASCII-alphanumeric characters replaced by `_`. // It will always return "0" if the global environment variable COBRA_ACTIVE_HELP // is set to "0". func GetActiveHelpConfig(cmd *Command) string { @@ -55,9 +58,10 @@ func GetActiveHelpConfig(cmd *Command) string { // activeHelpEnvVar returns the name of the program-specific ActiveHelp environment // variable. It has the format _ACTIVE_HELP where is the name of the -// root command in upper case, with all - replaced by _. +// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. func activeHelpEnvVar(name string) string { // This format should not be changed: users will be using it explicitly. activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix)) - return strings.ReplaceAll(activeHelpEnvVar, "-", "_") + activeHelpEnvVar = activeHelpEnvVarPrefixSubstRegexp.ReplaceAllString(activeHelpEnvVar, "_") + return activeHelpEnvVar } diff --git a/vendor/github.com/spf13/cobra/active_help.md b/vendor/github.com/spf13/cobra/active_help.md deleted file mode 100644 index 5e7f59af..00000000 --- a/vendor/github.com/spf13/cobra/active_help.md +++ /dev/null @@ -1,157 +0,0 @@ -# Active Help - -Active Help is a framework provided by Cobra which allows a program to define messages (hints, warnings, etc) that will be printed during program usage. It aims to make it easier for your users to learn how to use your program. If configured by the program, Active Help is printed when the user triggers shell completion. - -For example, -``` -bash-5.1$ helm repo add [tab] -You must choose a name for the repo you are adding. - -bash-5.1$ bin/helm package [tab] -Please specify the path to the chart to package - -bash-5.1$ bin/helm package [tab][tab] -bin/ internal/ scripts/ pkg/ testdata/ -``` - -**Hint**: A good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions to guide the user in knowing what is expected by the program. -## Supported shells - -Active Help is currently only supported for the following shells: -- Bash (using [bash completion V2](shell_completions.md#bash-completion-v2) only). Note that bash 4.4 or higher is required for the prompt to appear when an Active Help message is printed. -- Zsh - -## Adding Active Help messages - -As Active Help uses the shell completion system, the implementation of Active Help messages is done by enhancing custom dynamic completions. If you are not familiar with dynamic completions, please refer to [Shell Completions](shell_completions.md). - -Adding Active Help is done through the use of the `cobra.AppendActiveHelp(...)` function, where the program repeatedly adds Active Help messages to the list of completions. Keep reading for details. - -### Active Help for nouns - -Adding Active Help when completing a noun is done within the `ValidArgsFunction(...)` of a command. Please notice the use of `cobra.AppendActiveHelp(...)` in the following example: - -```go -cmd := &cobra.Command{ - Use: "add [NAME] [URL]", - Short: "add a chart repository", - Args: require.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - return addRepo(args) - }, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - var comps []string - if len(args) == 0 { - comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding") - } else if len(args) == 1 { - comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding") - } else { - comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments") - } - return comps, cobra.ShellCompDirectiveNoFileComp - }, -} -``` -The example above defines the completions (none, in this specific example) as well as the Active Help messages for the `helm repo add` command. It yields the following behavior: -``` -bash-5.1$ helm repo add [tab] -You must choose a name for the repo you are adding - -bash-5.1$ helm repo add grafana [tab] -You must specify the URL for the repo you are adding - -bash-5.1$ helm repo add grafana https://grafana.github.io/helm-charts [tab] -This command does not take any more arguments -``` -**Hint**: As can be seen in the above example, a good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions. - -### Active Help for flags - -Providing Active Help for flags is done in the same fashion as for nouns, but using the completion function registered for the flag. For example: -```go -_ = cmd.RegisterFlagCompletionFunc("version", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - if len(args) != 2 { - return cobra.AppendActiveHelp(nil, "You must first specify the chart to install before the --version flag can be completed"), cobra.ShellCompDirectiveNoFileComp - } - return compVersionFlag(args[1], toComplete) - }) -``` -The example above prints an Active Help message when not enough information was given by the user to complete the `--version` flag. -``` -bash-5.1$ bin/helm install myrelease --version 2.0.[tab] -You must first specify the chart to install before the --version flag can be completed - -bash-5.1$ bin/helm install myrelease bitnami/solr --version 2.0.[tab][tab] -2.0.1 2.0.2 2.0.3 -``` - -## User control of Active Help - -You may want to allow your users to disable Active Help or choose between different levels of Active Help. It is entirely up to the program to define the type of configurability of Active Help that it wants to offer, if any. -Allowing to configure Active Help is entirely optional; you can use Active Help in your program without doing anything about Active Help configuration. - -The way to configure Active Help is to use the program's Active Help environment -variable. That variable is named `_ACTIVE_HELP` where `` is the name of your -program in uppercase with any `-` replaced by an `_`. The variable should be set by the user to whatever -Active Help configuration values are supported by the program. - -For example, say `helm` has chosen to support three levels for Active Help: `on`, `off`, `local`. Then a user -would set the desired behavior to `local` by doing `export HELM_ACTIVE_HELP=local` in their shell. - -For simplicity, when in `cmd.ValidArgsFunction(...)` or a flag's completion function, the program should read the -Active Help configuration using the `cobra.GetActiveHelpConfig(cmd)` function and select what Active Help messages -should or should not be added (instead of reading the environment variable directly). - -For example: -```go -ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - activeHelpLevel := cobra.GetActiveHelpConfig(cmd) - - var comps []string - if len(args) == 0 { - if activeHelpLevel != "off" { - comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding") - } - } else if len(args) == 1 { - if activeHelpLevel != "off" { - comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding") - } - } else { - if activeHelpLevel == "local" { - comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments") - } - } - return comps, cobra.ShellCompDirectiveNoFileComp -}, -``` -**Note 1**: If the `_ACTIVE_HELP` environment variable is set to the string "0", Cobra will automatically disable all Active Help output (even if some output was specified by the program using the `cobra.AppendActiveHelp(...)` function). Using "0" can simplify your code in situations where you want to blindly disable Active Help without having to call `cobra.GetActiveHelpConfig(cmd)` explicitly. - -**Note 2**: If a user wants to disable Active Help for every single program based on Cobra, she can set the environment variable `COBRA_ACTIVE_HELP` to "0". In this case `cobra.GetActiveHelpConfig(cmd)` will return "0" no matter what the variable `_ACTIVE_HELP` is set to. - -**Note 3**: If the user does not set `_ACTIVE_HELP` or `COBRA_ACTIVE_HELP` (which will be a common case), the default value for the Active Help configuration returned by `cobra.GetActiveHelpConfig(cmd)` will be the empty string. -## Active Help with Cobra's default completion command - -Cobra provides a default `completion` command for programs that wish to use it. -When using the default `completion` command, Active Help is configurable in the same -fashion as described above using environment variables. You may wish to document this in more -details for your users. - -## Debugging Active Help - -Debugging your Active Help code is done in the same way as debugging your dynamic completion code, which is with Cobra's hidden `__complete` command. Please refer to [debugging shell completion](shell_completions.md#debugging) for details. - -When debugging with the `__complete` command, if you want to specify different Active Help configurations, you should use the active help environment variable. That variable is named `_ACTIVE_HELP` where any `-` is replaced by an `_`. For example, we can test deactivating some Active Help as shown below: -``` -$ HELM_ACTIVE_HELP=1 bin/helm __complete install wordpress bitnami/h -bitnami/haproxy -bitnami/harbor -_activeHelp_ WARNING: cannot re-use a name that is still in use -:0 -Completion ended with directive: ShellCompDirectiveDefault - -$ HELM_ACTIVE_HELP=0 bin/helm __complete install wordpress bitnami/h -bitnami/haproxy -bitnami/harbor -:0 -Completion ended with directive: ShellCompDirectiveDefault -``` diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 10c78847..8a531518 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -85,7 +85,7 @@ __%[1]s_handle_go_custom_completion() local out requestComp lastParam lastChar comp directive args # Prepare the command to request completions for the program. - # Calling ${words[0]} instead of directly %[1]s allows to handle aliases + # Calling ${words[0]} instead of directly %[1]s allows handling aliases args=("${words[@]:1}") # Disable ActiveHelp which is not supported for bash completion v1 requestComp="%[8]s=0 ${words[0]} %[2]s ${args[*]}" diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md deleted file mode 100644 index 52919b2f..00000000 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ /dev/null @@ -1,93 +0,0 @@ -# Generating Bash Completions For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - -## Bash legacy dynamic completions - -For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution. - -**Note**: Cobra's default `completion` command uses bash completion V2. If you are currently using Cobra's legacy dynamic completion solution, you should not use the default `completion` command but continue using your own. - -The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions. - -Some code that works in kubernetes: - -```bash -const ( - bash_completion_func = `__kubectl_parse_get() -{ - local kubectl_output out - if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then - out=($(echo "${kubectl_output}" | awk '{print $1}')) - COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) - fi -} - -__kubectl_get_resource() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - return 1 - fi - __kubectl_parse_get ${nouns[${#nouns[@]} -1]} - if [[ $? -eq 0 ]]; then - return 0 - fi -} - -__kubectl_custom_func() { - case ${last_command} in - kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) - __kubectl_get_resource - return - ;; - *) - ;; - esac -} -`) -``` - -And then I set that in my command definition: - -```go -cmds := &cobra.Command{ - Use: "kubectl", - Short: "kubectl controls the Kubernetes cluster manager", - Long: `kubectl controls the Kubernetes cluster manager. - -Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, - Run: runHelp, - BashCompletionFunction: bash_completion_func, -} -``` - -The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`___custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! - -Similarly, for flags: - -```go - annotation := make(map[string][]string) - annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"} - - flag := &pflag.Flag{ - Name: "namespace", - Usage: usage, - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction` -value, e.g.: - -```bash -__kubectl_get_namespaces() -{ - local template - template="{{ range .items }}{{ .metadata.name }} {{ end }}" - local kubectl_out - if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then - COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) - fi -} -``` diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go index 19b09560..1cce5c32 100644 --- a/vendor/github.com/spf13/cobra/bash_completionsV2.go +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -57,7 +57,7 @@ __%[1]s_get_completion_results() { local requestComp lastParam lastChar args # Prepare the command to request completions for the program. - # Calling ${words[0]} instead of directly %[1]s allows to handle aliases + # Calling ${words[0]} instead of directly %[1]s allows handling aliases args=("${words[@]:1}") requestComp="${words[0]} %[2]s ${args[*]}" diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index b07b44a0..a6b160ce 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -43,12 +43,13 @@ var initializers []func() var finalizers []func() const ( - defaultPrefixMatching = false - defaultCommandSorting = true - defaultCaseInsensitive = false + defaultPrefixMatching = false + defaultCommandSorting = true + defaultCaseInsensitive = false + defaultTraverseRunHooks = false ) -// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing +// EnablePrefixMatching allows setting automatic prefix matching. Automatic prefix matching can be a dangerous thing // to automatically enable in CLI tools. // Set this to true to enable it. var EnablePrefixMatching = defaultPrefixMatching @@ -60,6 +61,10 @@ var EnableCommandSorting = defaultCommandSorting // EnableCaseInsensitive allows case-insensitive commands names. (case sensitive by default) var EnableCaseInsensitive = defaultCaseInsensitive +// EnableTraverseRunHooks executes persistent pre-run and post-run hooks from all parents. +// By default this is disabled, which means only the first run hook to be found is executed. +var EnableTraverseRunHooks = defaultTraverseRunHooks + // MousetrapHelpText enables an information splash screen on Windows // if the CLI is started from explorer.exe. // To disable the mousetrap, just set this variable to blank string (""). diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 01f7c6f1..2fbe6c13 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -30,7 +30,10 @@ import ( flag "github.com/spf13/pflag" ) -const FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" +const ( + FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" + CommandDisplayNameAnnotation = "cobra_annotation_command_display_name" +) // FParseErrWhitelist configures Flag parse errors to be ignored type FParseErrWhitelist flag.ParseErrorsWhitelist @@ -99,7 +102,7 @@ type Command struct { Deprecated string // Annotations are key/value pairs that can be used by applications to identify or - // group commands. + // group commands or set special options. Annotations map[string]string // Version defines the version for this command. If this value is non-empty and the command does not @@ -115,6 +118,8 @@ type Command struct { // * PostRun() // * PersistentPostRun() // All functions get the same args, the arguments after the command name. + // The *PreRun and *PostRun functions will only be executed if the Run function of the current + // command has been declared. // // PersistentPreRun: children of this command will inherit and execute. PersistentPreRun func(cmd *Command, args []string) @@ -181,6 +186,9 @@ type Command struct { // versionTemplate is the version template defined by user. versionTemplate string + // errPrefix is the error message prefix defined by user. + errPrefix string + // inReader is a reader defined by the user that replaces stdin inReader io.Reader // outWriter is a writer defined by the user that replaces stdout @@ -346,6 +354,11 @@ func (c *Command) SetVersionTemplate(s string) { c.versionTemplate = s } +// SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix. +func (c *Command) SetErrPrefix(s string) { + c.errPrefix = s +} + // SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. // The user should not have a cyclic dependency on commands. func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { @@ -595,6 +608,18 @@ func (c *Command) VersionTemplate() string { ` } +// ErrPrefix return error message prefix for the command +func (c *Command) ErrPrefix() string { + if c.errPrefix != "" { + return c.errPrefix + } + + if c.HasParent() { + return c.parent.ErrPrefix() + } + return "Error:" +} + func hasNoOptDefVal(name string, fs *flag.FlagSet) bool { flag := fs.Lookup(name) if flag == nil { @@ -752,7 +777,9 @@ func (c *Command) findNext(next string) *Command { } if len(matches) == 1 { - return matches[0] + // Temporarily disable gosec G602, which produces a false positive. + // See https://github.com/securego/gosec/issues/1005. + return matches[0] // #nosec G602 } return nil @@ -910,15 +937,31 @@ func (c *Command) execute(a []string) (err error) { return err } + parents := make([]*Command, 0, 5) for p := c; p != nil; p = p.Parent() { + if EnableTraverseRunHooks { + // When EnableTraverseRunHooks is set: + // - Execute all persistent pre-runs from the root parent till this command. + // - Execute all persistent post-runs from this command till the root parent. + parents = append([]*Command{p}, parents...) + } else { + // Otherwise, execute only the first found persistent hook. + parents = append(parents, p) + } + } + for _, p := range parents { if p.PersistentPreRunE != nil { if err := p.PersistentPreRunE(c, argWoFlags); err != nil { return err } - break + if !EnableTraverseRunHooks { + break + } } else if p.PersistentPreRun != nil { p.PersistentPreRun(c, argWoFlags) - break + if !EnableTraverseRunHooks { + break + } } } if c.PreRunE != nil { @@ -955,10 +998,14 @@ func (c *Command) execute(a []string) (err error) { if err := p.PersistentPostRunE(c, argWoFlags); err != nil { return err } - break + if !EnableTraverseRunHooks { + break + } } else if p.PersistentPostRun != nil { p.PersistentPostRun(c, argWoFlags) - break + if !EnableTraverseRunHooks { + break + } } } @@ -1048,7 +1095,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { c = cmd } if !c.SilenceErrors { - c.PrintErrln("Error:", err.Error()) + c.PrintErrln(c.ErrPrefix(), err.Error()) c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath()) } return c, err @@ -1077,7 +1124,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { // If root command has SilenceErrors flagged, // all subcommands should respect it if !cmd.SilenceErrors && !c.SilenceErrors { - c.PrintErrln("Error:", err.Error()) + c.PrintErrln(cmd.ErrPrefix(), err.Error()) } // If root command has SilenceUsage flagged, @@ -1380,6 +1427,9 @@ func (c *Command) CommandPath() string { if c.HasParent() { return c.Parent().CommandPath() + " " + c.Name() } + if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok { + return displayName + } return c.Name() } @@ -1402,6 +1452,7 @@ func (c *Command) UseLine() string { // DebugFlags used to determine which flags have been assigned to which commands // and which persist. +// nolint:goconst func (c *Command) DebugFlags() { c.Println("DebugFlags called on", c.Name()) var debugflags func(*Command) diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index ee38c4d0..b60f6b20 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -145,6 +145,20 @@ func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Comman return nil } +// GetFlagCompletionFunc returns the completion function for the given flag of the command, if available. +func (c *Command) GetFlagCompletionFunc(flagName string) (func(*Command, []string, string) ([]string, ShellCompDirective), bool) { + flag := c.Flag(flagName) + if flag == nil { + return nil, false + } + + flagCompletionMutex.RLock() + defer flagCompletionMutex.RUnlock() + + completionFunc, exists := flagCompletionFunctions[flag] + return completionFunc, exists +} + // Returns a string listing the different directive enabled in the specified parameter func (d ShellCompDirective) string() string { var directives []string @@ -283,9 +297,13 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi // These flags are normally added when `execute()` is called on `finalCmd`, // however, when doing completion, we don't call `finalCmd.execute()`. - // Let's add the --help and --version flag ourselves. - finalCmd.InitDefaultHelpFlag() - finalCmd.InitDefaultVersionFlag() + // Let's add the --help and --version flag ourselves but only if the finalCmd + // has not disabled flag parsing; if flag parsing is disabled, it is up to the + // finalCmd itself to handle the completion of *all* flags. + if !finalCmd.DisableFlagParsing { + finalCmd.InitDefaultHelpFlag() + finalCmd.InitDefaultVersionFlag() + } // Check if we are doing flag value completion before parsing the flags. // This is important because if we are completing a flag value, we need to also @@ -389,6 +407,11 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { doCompleteFlags(flag) }) + // Try to complete non-inherited flags even if DisableFlagParsing==true. + // This allows programs to tell Cobra about flags for completion even + // if the actual parsing of flags is not done by Cobra. + // For instance, Helm uses this to provide flag name completion for + // some of its plugins. finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { doCompleteFlags(flag) }) diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go index 12ca0d2b..12d61b69 100644 --- a/vendor/github.com/spf13/cobra/fish_completions.go +++ b/vendor/github.com/spf13/cobra/fish_completions.go @@ -113,7 +113,7 @@ function __%[1]s_clear_perform_completion_once_result __%[1]s_debug "" __%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable ==========" set --erase __%[1]s_perform_completion_once_result - __%[1]s_debug "Succesfully erased the variable __%[1]s_perform_completion_once_result" + __%[1]s_debug "Successfully erased the variable __%[1]s_perform_completion_once_result" end function __%[1]s_requires_order_preservation diff --git a/vendor/github.com/spf13/cobra/fish_completions.md b/vendor/github.com/spf13/cobra/fish_completions.md deleted file mode 100644 index 19b2ed12..00000000 --- a/vendor/github.com/spf13/cobra/fish_completions.md +++ /dev/null @@ -1,4 +0,0 @@ -## Generating Fish Completions For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go index b35fde15..0671ec5f 100644 --- a/vendor/github.com/spf13/cobra/flag_groups.go +++ b/vendor/github.com/spf13/cobra/flag_groups.go @@ -24,6 +24,7 @@ import ( const ( requiredAsGroup = "cobra_annotation_required_if_others_set" + oneRequired = "cobra_annotation_one_required" mutuallyExclusive = "cobra_annotation_mutually_exclusive" ) @@ -43,6 +44,22 @@ func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) { } } +// MarkFlagsOneRequired marks the given flags with annotations so that Cobra errors +// if the command is invoked without at least one flag from the given set of flags. +func (c *Command) MarkFlagsOneRequired(flagNames ...string) { + c.mergePersistentFlags() + for _, v := range flagNames { + f := c.Flags().Lookup(v) + if f == nil { + panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v)) + } + if err := c.Flags().SetAnnotation(v, oneRequired, append(f.Annotations[oneRequired], strings.Join(flagNames, " "))); err != nil { + // Only errs if the flag isn't found. + panic(err) + } + } +} + // MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors // if the command is invoked with more than one flag from the given set of flags. func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { @@ -59,7 +76,7 @@ func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { } } -// ValidateFlagGroups validates the mutuallyExclusive/requiredAsGroup logic and returns the +// ValidateFlagGroups validates the mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the // first error encountered. func (c *Command) ValidateFlagGroups() error { if c.DisableFlagParsing { @@ -71,15 +88,20 @@ func (c *Command) ValidateFlagGroups() error { // groupStatus format is the list of flags as a unique ID, // then a map of each flag name and whether it is set or not. groupStatus := map[string]map[string]bool{} + oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} flags.VisitAll(func(pflag *flag.Flag) { processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) }) if err := validateRequiredFlagGroups(groupStatus); err != nil { return err } + if err := validateOneRequiredFlagGroups(oneRequiredGroupStatus); err != nil { + return err + } if err := validateExclusiveFlagGroups(mutuallyExclusiveGroupStatus); err != nil { return err } @@ -142,6 +164,27 @@ func validateRequiredFlagGroups(data map[string]map[string]bool) error { return nil } +func validateOneRequiredFlagGroups(data map[string]map[string]bool) error { + keys := sortedKeys(data) + for _, flagList := range keys { + flagnameAndStatus := data[flagList] + var set []string + for flagname, isSet := range flagnameAndStatus { + if isSet { + set = append(set, flagname) + } + } + if len(set) >= 1 { + continue + } + + // Sort values, so they can be tested/scripted against consistently. + sort.Strings(set) + return fmt.Errorf("at least one of the flags in the group [%v] is required", flagList) + } + return nil +} + func validateExclusiveFlagGroups(data map[string]map[string]bool) error { keys := sortedKeys(data) for _, flagList := range keys { @@ -176,6 +219,7 @@ func sortedKeys(m map[string]map[string]bool) []string { // enforceFlagGroupsForCompletion will do the following: // - when a flag in a group is present, other flags in the group will be marked required +// - when none of the flags in a one-required group are present, all flags in the group will be marked required // - when a flag in a mutually exclusive group is present, other flags in the group will be marked as hidden // This allows the standard completion logic to behave appropriately for flag groups func (c *Command) enforceFlagGroupsForCompletion() { @@ -185,9 +229,11 @@ func (c *Command) enforceFlagGroupsForCompletion() { flags := c.Flags() groupStatus := map[string]map[string]bool{} + oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} c.Flags().VisitAll(func(pflag *flag.Flag) { processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) }) @@ -204,6 +250,26 @@ func (c *Command) enforceFlagGroupsForCompletion() { } } + // If none of the flags of a one-required group are present, we make all the flags + // of that group required so that the shell completion suggests them automatically + for flagList, flagnameAndStatus := range oneRequiredGroupStatus { + set := 0 + + for _, isSet := range flagnameAndStatus { + if isSet { + set++ + } + } + + // None of the flags of the group are set, mark all flags in the group + // as required + if set == 0 { + for _, fName := range strings.Split(flagList, " ") { + _ = c.MarkFlagRequired(fName) + } + } + } + // If a flag that is mutually exclusive to others is present, we hide the other // flags of that group so the shell completion does not suggest them for flagList, flagnameAndStatus := range mutuallyExclusiveGroupStatus { diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index 177d2755..55195193 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -47,7 +47,7 @@ filter __%[1]s_escapeStringWithSpecialChars { `+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+` } -[scriptblock]$__%[2]sCompleterBlock = { +[scriptblock]${__%[2]sCompleterBlock} = { param( $WordToComplete, $CommandAst, @@ -122,7 +122,7 @@ filter __%[1]s_escapeStringWithSpecialChars { __%[1]s_debug "Calling $RequestComp" # First disable ActiveHelp which is not supported for Powershell - $env:%[10]s=0 + ${env:%[10]s}=0 #call the command store the output in $out and redirect stderr and stdout to null # $Out is an array contains each line per element @@ -279,7 +279,7 @@ filter __%[1]s_escapeStringWithSpecialChars { } } -Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock $__%[2]sCompleterBlock +Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock ${__%[2]sCompleterBlock} `, name, nameForVar, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) diff --git a/vendor/github.com/spf13/cobra/powershell_completions.md b/vendor/github.com/spf13/cobra/powershell_completions.md deleted file mode 100644 index c449f1e5..00000000 --- a/vendor/github.com/spf13/cobra/powershell_completions.md +++ /dev/null @@ -1,3 +0,0 @@ -# Generating PowerShell Completions For Your Own cobra.Command - -Please refer to [Shell Completions](shell_completions.md#powershell-completions) for details. diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md deleted file mode 100644 index 8a291eb2..00000000 --- a/vendor/github.com/spf13/cobra/projects_using_cobra.md +++ /dev/null @@ -1,64 +0,0 @@ -## Projects using Cobra - -- [Allero](https://github.com/allero-io/allero) -- [Arewefastyet](https://benchmark.vitess.io) -- [Arduino CLI](https://github.com/arduino/arduino-cli) -- [Bleve](https://blevesearch.com/) -- [Cilium](https://cilium.io/) -- [CloudQuery](https://github.com/cloudquery/cloudquery) -- [CockroachDB](https://www.cockroachlabs.com/) -- [Constellation](https://github.com/edgelesssys/constellation) -- [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) -- [Datree](https://github.com/datreeio/datree) -- [Delve](https://github.com/derekparker/delve) -- [Docker (distribution)](https://github.com/docker/distribution) -- [Etcd](https://etcd.io/) -- [Gardener](https://github.com/gardener/gardenctl) -- [Giant Swarm's gsctl](https://github.com/giantswarm/gsctl) -- [Git Bump](https://github.com/erdaltsksn/git-bump) -- [GitHub CLI](https://github.com/cli/cli) -- [GitHub Labeler](https://github.com/erdaltsksn/gh-label) -- [Golangci-lint](https://golangci-lint.run) -- [GopherJS](https://github.com/gopherjs/gopherjs) -- [GoReleaser](https://goreleaser.com) -- [Helm](https://helm.sh) -- [Hugo](https://gohugo.io) -- [Infracost](https://github.com/infracost/infracost) -- [Istio](https://istio.io) -- [Kool](https://github.com/kool-dev/kool) -- [Kubernetes](https://kubernetes.io/) -- [Kubescape](https://github.com/kubescape/kubescape) -- [KubeVirt](https://github.com/kubevirt/kubevirt) -- [Linkerd](https://linkerd.io/) -- [Mattermost-server](https://github.com/mattermost/mattermost-server) -- [Mercure](https://mercure.rocks/) -- [Meroxa CLI](https://github.com/meroxa/cli) -- [Metal Stack CLI](https://github.com/metal-stack/metalctl) -- [Moby (former Docker)](https://github.com/moby/moby) -- [Moldy](https://github.com/Moldy-Community/moldy) -- [Multi-gitter](https://github.com/lindell/multi-gitter) -- [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) -- [nFPM](https://nfpm.goreleaser.com) -- [Okteto](https://github.com/okteto/okteto) -- [OpenShift](https://www.openshift.com/) -- [Ory Hydra](https://github.com/ory/hydra) -- [Ory Kratos](https://github.com/ory/kratos) -- [Pixie](https://github.com/pixie-io/pixie) -- [Polygon Edge](https://github.com/0xPolygon/polygon-edge) -- [Pouch](https://github.com/alibaba/pouch) -- [ProjectAtomic (enterprise)](https://www.projectatomic.io/) -- [Prototool](https://github.com/uber/prototool) -- [Pulumi](https://www.pulumi.com) -- [QRcp](https://github.com/claudiodangelis/qrcp) -- [Random](https://github.com/erdaltsksn/random) -- [Rclone](https://rclone.org/) -- [Scaleway CLI](https://github.com/scaleway/scaleway-cli) -- [Sia](https://github.com/SiaFoundation/siad) -- [Skaffold](https://skaffold.dev/) -- [Tendermint](https://github.com/tendermint/tendermint) -- [Twitch CLI](https://github.com/twitchdev/twitch-cli) -- [UpCloud CLI (`upctl`)](https://github.com/UpCloudLtd/upcloud-cli) -- [Vitess](https://vitess.io) -- VMware's [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) & [Tanzu Framework](https://github.com/vmware-tanzu/tanzu-framework) -- [Werf](https://werf.io/) -- [ZITADEL](https://github.com/zitadel/zitadel) diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md deleted file mode 100644 index 065c0621..00000000 --- a/vendor/github.com/spf13/cobra/shell_completions.md +++ /dev/null @@ -1,576 +0,0 @@ -# Generating shell completions - -Cobra can generate shell completions for multiple shells. -The currently supported shells are: -- Bash -- Zsh -- fish -- PowerShell - -Cobra will automatically provide your program with a fully functional `completion` command, -similarly to how it provides the `help` command. - -## Creating your own completion command - -If you do not wish to use the default `completion` command, you can choose to -provide your own, which will take precedence over the default one. (This also provides -backwards-compatibility with programs that already have their own `completion` command.) - -If you are using the `cobra-cli` generator, -which can be found at [spf13/cobra-cli](https://github.com/spf13/cobra-cli), -you can create a completion command by running - -```bash -cobra-cli add completion -``` -and then modifying the generated `cmd/completion.go` file to look something like this -(writing the shell script to stdout allows the most flexible use): - -```go -var completionCmd = &cobra.Command{ - Use: "completion [bash|zsh|fish|powershell]", - Short: "Generate completion script", - Long: fmt.Sprintf(`To load completions: - -Bash: - - $ source <(%[1]s completion bash) - - # To load completions for each session, execute once: - # Linux: - $ %[1]s completion bash > /etc/bash_completion.d/%[1]s - # macOS: - $ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s - -Zsh: - - # If shell completion is not already enabled in your environment, - # you will need to enable it. You can execute the following once: - - $ echo "autoload -U compinit; compinit" >> ~/.zshrc - - # To load completions for each session, execute once: - $ %[1]s completion zsh > "${fpath[1]}/_%[1]s" - - # You will need to start a new shell for this setup to take effect. - -fish: - - $ %[1]s completion fish | source - - # To load completions for each session, execute once: - $ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish - -PowerShell: - - PS> %[1]s completion powershell | Out-String | Invoke-Expression - - # To load completions for every new session, run: - PS> %[1]s completion powershell > %[1]s.ps1 - # and source this file from your PowerShell profile. -`,cmd.Root().Name()), - DisableFlagsInUseLine: true, - ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, - Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), - Run: func(cmd *cobra.Command, args []string) { - switch args[0] { - case "bash": - cmd.Root().GenBashCompletion(os.Stdout) - case "zsh": - cmd.Root().GenZshCompletion(os.Stdout) - case "fish": - cmd.Root().GenFishCompletion(os.Stdout, true) - case "powershell": - cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) - } - }, -} -``` - -**Note:** The cobra generator may include messages printed to stdout, for example, if the config file is loaded; this will break the auto-completion script so must be removed. - -## Adapting the default completion command - -Cobra provides a few options for the default `completion` command. To configure such options you must set -the `CompletionOptions` field on the *root* command. - -To tell Cobra *not* to provide the default `completion` command: -``` -rootCmd.CompletionOptions.DisableDefaultCmd = true -``` - -To tell Cobra to mark the default `completion` command as *hidden*: -``` -rootCmd.CompletionOptions.HiddenDefaultCmd = true -``` - -To tell Cobra *not* to provide the user with the `--no-descriptions` flag to the completion sub-commands: -``` -rootCmd.CompletionOptions.DisableNoDescFlag = true -``` - -To tell Cobra to completely disable descriptions for completions: -``` -rootCmd.CompletionOptions.DisableDescriptions = true -``` - -# Customizing completions - -The generated completion scripts will automatically handle completing commands and flags. However, you can make your completions much more powerful by providing information to complete your program's nouns and flag values. - -## Completion of nouns - -### Static completion of nouns - -Cobra allows you to provide a pre-defined list of completion choices for your nouns using the `ValidArgs` field. -For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. -Some simplified code from `kubectl get` looks like: - -```go -validArgs = []string{ "pod", "node", "service", "replicationcontroller" } - -cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", - Short: "Display one or many resources", - Long: get_long, - Example: get_example, - Run: func(cmd *cobra.Command, args []string) { - cobra.CheckErr(RunGet(f, out, cmd, args)) - }, - ValidArgs: validArgs, -} -``` - -Notice we put the `ValidArgs` field on the `get` sub-command. Doing so will give results like: - -```bash -$ kubectl get [tab][tab] -node pod replicationcontroller service -``` - -#### Aliases for nouns - -If your nouns have aliases, you can define them alongside `ValidArgs` using `ArgAliases`: - -```go -argAliases = []string { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } - -cmd := &cobra.Command{ - ... - ValidArgs: validArgs, - ArgAliases: argAliases -} -``` - -The aliases are shown to the user on tab completion only if no completions were found within sub-commands or `ValidArgs`. - -### Dynamic completion of nouns - -In some cases it is not possible to provide a list of completions in advance. Instead, the list of completions must be determined at execution-time. In a similar fashion as for static completions, you can use the `ValidArgsFunction` field to provide a Go function that Cobra will execute when it needs the list of completion choices for the nouns of a command. Note that either `ValidArgs` or `ValidArgsFunction` can be used for a single cobra command, but not both. -Simplified code from `helm status` looks like: - -```go -cmd := &cobra.Command{ - Use: "status RELEASE_NAME", - Short: "Display the status of the named release", - Long: status_long, - RunE: func(cmd *cobra.Command, args []string) { - RunGet(args[0]) - }, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - if len(args) != 0 { - return nil, cobra.ShellCompDirectiveNoFileComp - } - return getReleasesFromCluster(toComplete), cobra.ShellCompDirectiveNoFileComp - }, -} -``` -Where `getReleasesFromCluster()` is a Go function that obtains the list of current Helm releases running on the Kubernetes cluster. -Notice we put the `ValidArgsFunction` on the `status` sub-command. Let's assume the Helm releases on the cluster are: `harbor`, `notary`, `rook` and `thanos` then this dynamic completion will give results like: - -```bash -$ helm status [tab][tab] -harbor notary rook thanos -``` -You may have noticed the use of `cobra.ShellCompDirective`. These directives are bit fields allowing to control some shell completion behaviors for your particular completion. You can combine them with the bit-or operator such as `cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp` -```go -// Indicates that the shell will perform its default behavior after completions -// have been provided (this implies none of the other directives). -ShellCompDirectiveDefault - -// Indicates an error occurred and completions should be ignored. -ShellCompDirectiveError - -// Indicates that the shell should not add a space after the completion, -// even if there is a single completion provided. -ShellCompDirectiveNoSpace - -// Indicates that the shell should not provide file completion even when -// no completion is provided. -ShellCompDirectiveNoFileComp - -// Indicates that the returned completions should be used as file extension filters. -// For example, to complete only files of the form *.json or *.yaml: -// return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt -// For flags, using MarkFlagFilename() and MarkPersistentFlagFilename() -// is a shortcut to using this directive explicitly. -// -ShellCompDirectiveFilterFileExt - -// Indicates that only directory names should be provided in file completion. -// For example: -// return nil, ShellCompDirectiveFilterDirs -// For flags, using MarkFlagDirname() is a shortcut to using this directive explicitly. -// -// To request directory names within another directory, the returned completions -// should specify a single directory name within which to search. For example, -// to complete directories within "themes/": -// return []string{"themes"}, ShellCompDirectiveFilterDirs -// -ShellCompDirectiveFilterDirs - -// ShellCompDirectiveKeepOrder indicates that the shell should preserve the order -// in which the completions are provided -ShellCompDirectiveKeepOrder -``` - -***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function. - -#### Debugging - -Cobra achieves dynamic completion through the use of a hidden command called by the completion script. To debug your Go completion code, you can call this hidden command directly: -```bash -$ helm __complete status har -harbor -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -***Important:*** If the noun to complete is empty (when the user has not yet typed any letters of that noun), you must pass an empty parameter to the `__complete` command: -```bash -$ helm __complete status "" -harbor -notary -rook -thanos -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -Calling the `__complete` command directly allows you to run the Go debugger to troubleshoot your code. You can also add printouts to your code; Cobra provides the following functions to use for printouts in Go completion code: -```go -// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE -// is set to a file path) and optionally prints to stderr. -cobra.CompDebug(msg string, printToStdErr bool) { -cobra.CompDebugln(msg string, printToStdErr bool) - -// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE -// is set to a file path) and to stderr. -cobra.CompError(msg string) -cobra.CompErrorln(msg string) -``` -***Important:*** You should **not** leave traces that print directly to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned above. - -## Completions for flags - -### Mark flags as required - -Most of the time completions will only show sub-commands. But if a flag is required to make a sub-command work, you probably want it to show up when the user types [tab][tab]. You can mark a flag as 'Required' like so: - -```go -cmd.MarkFlagRequired("pod") -cmd.MarkFlagRequired("container") -``` - -and you'll get something like - -```bash -$ kubectl exec [tab][tab] --c --container= -p --pod= -``` - -### Specify dynamic flag completion - -As for nouns, Cobra provides a way of defining dynamic completion of flags. To provide a Go function that Cobra will execute when it needs the list of completion choices for a flag, you must register the function using the `command.RegisterFlagCompletionFunc()` function. - -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"json", "table", "yaml"}, cobra.ShellCompDirectiveDefault -}) -``` -Notice that calling `RegisterFlagCompletionFunc()` is done through the `command` with which the flag is associated. In our example this dynamic completion will give results like so: - -```bash -$ helm status --output [tab][tab] -json table yaml -``` - -#### Debugging - -You can also easily debug your Go completion code for flags: -```bash -$ helm __complete status --output "" -json -table -yaml -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned further above. - -### Specify valid filename extensions for flags that take a filename - -To limit completions of flag values to file names with certain extensions you can either use the different `MarkFlagFilename()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterFileExt`, like so: -```go -flagName := "output" -cmd.MarkFlagFilename(flagName, "yaml", "json") -``` -or -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt}) -``` - -### Limit flag completions to directory names - -To limit completions of flag values to directory names you can either use the `MarkFlagDirname()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs`, like so: -```go -flagName := "output" -cmd.MarkFlagDirname(flagName) -``` -or -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return nil, cobra.ShellCompDirectiveFilterDirs -}) -``` -To limit completions of flag values to directory names *within another directory* you can use a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs` like so: -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"themes"}, cobra.ShellCompDirectiveFilterDirs -}) -``` -### Descriptions for completions - -Cobra provides support for completion descriptions. Such descriptions are supported for each shell -(however, for bash, it is only available in the [completion V2 version](#bash-completion-v2)). -For commands and flags, Cobra will provide the descriptions automatically, based on usage information. -For example, using zsh: -``` -$ helm s[tab] -search -- search for a keyword in charts -show -- show information of a chart -status -- displays the status of the named release -``` -while using fish: -``` -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) -``` - -Cobra allows you to add descriptions to your own completions. Simply add the description text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example: -```go -ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"harbor\tAn image registry", "thanos\tLong-term metrics"}, cobra.ShellCompDirectiveNoFileComp -}} -``` -or -```go -ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"} -``` - -If you don't want to show descriptions in the completions, you can add `--no-descriptions` to the default `completion` command to disable them, like: - -```bash -$ source <(helm completion bash) -$ helm completion [tab][tab] -bash (generate autocompletion script for bash) powershell (generate autocompletion script for powershell) -fish (generate autocompletion script for fish) zsh (generate autocompletion script for zsh) - -$ source <(helm completion bash --no-descriptions) -$ helm completion [tab][tab] -bash fish powershell zsh -``` -## Bash completions - -### Dependencies - -The bash completion script generated by Cobra requires the `bash_completion` package. You should update the help text of your completion command to show how to install the `bash_completion` package ([Kubectl docs](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion)) - -### Aliases - -You can also configure `bash` aliases for your program and they will also support completions. - -```bash -alias aliasname=origcommand -complete -o default -F __start_origcommand aliasname - -# and now when you run `aliasname` completion will make -# suggestions as it did for `origcommand`. - -$ aliasname -completion firstcommand secondcommand -``` -### Bash legacy dynamic completions - -For backward compatibility, Cobra still supports its bash legacy dynamic completion solution. -Please refer to [Bash Completions](bash_completions.md) for details. - -### Bash completion V2 - -Cobra provides two versions for bash completion. The original bash completion (which started it all!) can be used by calling -`GenBashCompletion()` or `GenBashCompletionFile()`. - -A new V2 bash completion version is also available. This version can be used by calling `GenBashCompletionV2()` or -`GenBashCompletionFileV2()`. The V2 version does **not** support the legacy dynamic completion -(see [Bash Completions](bash_completions.md)) but instead works only with the Go dynamic completion -solution described in this document. -Unless your program already uses the legacy dynamic completion solution, it is recommended that you use the bash -completion V2 solution which provides the following extra features: -- Supports completion descriptions (like the other shells) -- Small completion script of less than 300 lines (v1 generates scripts of thousands of lines; `kubectl` for example has a bash v1 completion script of over 13K lines) -- Streamlined user experience thanks to a completion behavior aligned with the other shells - -`Bash` completion V2 supports descriptions for completions. When calling `GenBashCompletionV2()` or `GenBashCompletionFileV2()` -you must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra -will provide the description automatically based on usage information. You can choose to make this option configurable by -your users. - -``` -# With descriptions -$ helm s[tab][tab] -search (search for a keyword in charts) status (display the status of the named release) -show (show information of a chart) - -# Without descriptions -$ helm s[tab][tab] -search show status -``` -**Note**: Cobra's default `completion` command uses bash completion V2. If for some reason you need to use bash completion V1, you will need to implement your own `completion` command. -## Zsh completions - -Cobra supports native zsh completion generated from the root `cobra.Command`. -The generated completion script should be put somewhere in your `$fpath` and be named -`_`. You will need to start a new shell for the completions to become available. - -Zsh supports descriptions for completions. Cobra will provide the description automatically, -based on usage information. Cobra provides a way to completely disable such descriptions by -using `GenZshCompletionNoDesc()` or `GenZshCompletionFileNoDesc()`. You can choose to make -this a configurable option to your users. -``` -# With descriptions -$ helm s[tab] -search -- search for a keyword in charts -show -- show information of a chart -status -- displays the status of the named release - -# Without descriptions -$ helm s[tab] -search show status -``` -*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. - -### Limitations - -* Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `zsh` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `zsh`. - * You should instead use `RegisterFlagCompletionFunc()`. - -### Zsh completions standardization - -Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backward-compatible, some small changes in behavior were introduced. -Please refer to [Zsh Completions](zsh_completions.md) for details. - -## fish completions - -Cobra supports native fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. -``` -# With descriptions -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) - -# Without descriptions -$ helm s[tab] -search show status -``` -*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. - -### Limitations - -* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `fish`. - * You should instead use `RegisterFlagCompletionFunc()`. -* The following flag completion annotations are not supported and will be ignored for `fish`: - * `BashCompFilenameExt` (filtering by file extension) - * `BashCompSubdirsInDir` (filtering by directory) -* The functions corresponding to the above annotations are consequently not supported and will be ignored for `fish`: - * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) - * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) -* Similarly, the following completion directives are not supported and will be ignored for `fish`: - * `ShellCompDirectiveFilterFileExt` (filtering by file extension) - * `ShellCompDirectiveFilterDirs` (filtering by directory) - -## PowerShell completions - -Cobra supports native PowerShell completions generated from the root `cobra.Command`. You can use the `command.GenPowerShellCompletion()` or `command.GenPowerShellCompletionFile()` functions. To include descriptions use `command.GenPowerShellCompletionWithDesc()` and `command.GenPowerShellCompletionFileWithDesc()`. Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. - -The script is designed to support all three PowerShell completion modes: - -* TabCompleteNext (default windows style - on each key press the next option is displayed) -* Complete (works like bash) -* MenuComplete (works like zsh) - -You set the mode with `Set-PSReadLineKeyHandler -Key Tab -Function `. Descriptions are only displayed when using the `Complete` or `MenuComplete` mode. - -Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles. - -``` -# With descriptions and Mode 'Complete' -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) - -# With descriptions and Mode 'MenuComplete' The description of the current selected value will be displayed below the suggestions. -$ helm s[tab] -search show status - -search for a keyword in charts - -# Without descriptions -$ helm s[tab] -search show status -``` -### Aliases - -You can also configure `powershell` aliases for your program and they will also support completions. - -``` -$ sal aliasname origcommand -$ Register-ArgumentCompleter -CommandName 'aliasname' -ScriptBlock $__origcommandCompleterBlock - -# and now when you run `aliasname` completion will make -# suggestions as it did for `origcommand`. - -$ aliasname -completion firstcommand secondcommand -``` -The name of the completer block variable is of the form `$__CompleterBlock` where every `-` and `:` in the program name have been replaced with `_`, to respect powershell naming syntax. - -### Limitations - -* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `powershell` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `powershell`. - * You should instead use `RegisterFlagCompletionFunc()`. -* The following flag completion annotations are not supported and will be ignored for `powershell`: - * `BashCompFilenameExt` (filtering by file extension) - * `BashCompSubdirsInDir` (filtering by directory) -* The functions corresponding to the above annotations are consequently not supported and will be ignored for `powershell`: - * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) - * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) -* Similarly, the following completion directives are not supported and will be ignored for `powershell`: - * `ShellCompDirectiveFilterFileExt` (filtering by file extension) - * `ShellCompDirectiveFilterDirs` (filtering by directory) diff --git a/vendor/github.com/spf13/cobra/user_guide.md b/vendor/github.com/spf13/cobra/user_guide.md deleted file mode 100644 index 85201d84..00000000 --- a/vendor/github.com/spf13/cobra/user_guide.md +++ /dev/null @@ -1,726 +0,0 @@ -# User Guide - -While you are welcome to provide your own organization, typically a Cobra-based -application will follow the following organizational structure: - -``` - ▾ appName/ - ▾ cmd/ - add.go - your.go - commands.go - here.go - main.go -``` - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -## Using the Cobra Generator - -Cobra-CLI is its own program that will create your application and add any -commands you want. It's the easiest way to incorporate Cobra into your application. - -For complete details on using the Cobra generator, please refer to [The Cobra-CLI Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) - -## Using the Cobra Library - -To manually implement Cobra you need to create a bare main.go file and a rootCmd file. -You will optionally provide additional commands as you see fit. - -### Create rootCmd - -Cobra doesn't require any special constructors. Simply create your commands. - -Ideally you place this in app/cmd/root.go: - -```go -var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at https://gohugo.io/documentation/`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, -} - -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} -``` - -You will additionally define flags and handle configuration in your init() function. - -For example cmd/root.go: - -```go -package cmd - -import ( - "fmt" - "os" - - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - // Used for flags. - cfgFile string - userLicense string - - rootCmd = &cobra.Command{ - Use: "cobra-cli", - Short: "A generator for Cobra based Applications", - Long: `Cobra is a CLI library for Go that empowers applications. -This application is a tool to generate the needed files -to quickly create a Cobra application.`, - } -) - -// Execute executes the root command. -func Execute() error { - return rootCmd.Execute() -} - -func init() { - cobra.OnInitialize(initConfig) - - rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution") - rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project") - rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") - - rootCmd.AddCommand(addCmd) - rootCmd.AddCommand(initCmd) -} - -func initConfig() { - if cfgFile != "" { - // Use config file from the flag. - viper.SetConfigFile(cfgFile) - } else { - // Find home directory. - home, err := os.UserHomeDir() - cobra.CheckErr(err) - - // Search config in home directory with name ".cobra" (without extension). - viper.AddConfigPath(home) - viper.SetConfigType("yaml") - viper.SetConfigName(".cobra") - } - - viper.AutomaticEnv() - - if err := viper.ReadInConfig(); err == nil { - fmt.Println("Using config file:", viper.ConfigFileUsed()) - } -} -``` - -### Create your main.go - -With the root command you need to have your main function execute it. -Execute should be run on the root for clarity, though it can be called on any command. - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -### Create additional commands - -Additional commands can be defined and typically are each given their own file -inside of the cmd/ directory. - -If you wanted to create a version command you would create cmd/version.go and -populate it with the following: - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(versionCmd) -} - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, -} -``` - -### Organizing subcommands - -A command may have subcommands which in turn may have other subcommands. This is achieved by using -`AddCommand`. In some cases, especially in larger applications, each subcommand may be defined in -its own go package. - -The suggested approach is for the parent command to use `AddCommand` to add its most immediate -subcommands. For example, consider the following directory structure: - -```text -├── cmd -│   ├── root.go -│   └── sub1 -│   ├── sub1.go -│   └── sub2 -│   ├── leafA.go -│   ├── leafB.go -│   └── sub2.go -└── main.go -``` - -In this case: - -* The `init` function of `root.go` adds the command defined in `sub1.go` to the root command. -* The `init` function of `sub1.go` adds the command defined in `sub2.go` to the sub1 command. -* The `init` function of `sub2.go` adds the commands defined in `leafA.go` and `leafB.go` to the - sub2 command. - -This approach ensures the subcommands are always included at compile time while avoiding cyclic -references. - -### Returning and handling errors - -If you wish to return an error to the caller of a command, `RunE` can be used. - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(tryCmd) -} - -var tryCmd = &cobra.Command{ - Use: "try", - Short: "Try and possibly fail at something", - RunE: func(cmd *cobra.Command, args []string) error { - if err := someFunc(); err != nil { - return err - } - return nil - }, -} -``` - -The error can then be caught at the execute function call. - -## Working with Flags - -Flags provide modifiers to control how the action command operates. - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - -```go -var Verbose bool -var Source string -``` - -There are two different approaches to assign a flag. - -### Persistent Flags - -A flag can be 'persistent', meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags, assign a flag as a persistent flag on the root. - -```go -rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") -``` - -### Local Flags - -A flag can also be assigned locally, which will only apply to that specific command. - -```go -localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") -``` - -### Local Flag on Parent Commands - -By default, Cobra only parses local flags on the target command, and any local flags on -parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will -parse local flags on each command before executing the target command. - -```go -command := cobra.Command{ - Use: "print [OPTIONS] [COMMANDS]", - TraverseChildren: true, -} -``` - -### Bind Flags with Config - -You can also bind your flags with [viper](https://github.com/spf13/viper): -```go -var author string - -func init() { - rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) -} -``` - -In this example, the persistent flag `author` is bound with `viper`. -**Note**: the variable `author` will not be set to the value from config, -when the `--author` flag is provided by user. - -More in [viper documentation](https://github.com/spf13/viper#working-with-flags). - -### Required flags - -Flags are optional by default. If instead you wish your command to report an error -when a flag has not been set, mark it as required: -```go -rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkFlagRequired("region") -``` - -Or, for persistent flags: -```go -rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkPersistentFlagRequired("region") -``` - -### Flag Groups - -If you have different flags that must be provided together (e.g. if they provide the `--username` flag they MUST provide the `--password` flag as well) then -Cobra can enforce that requirement: -```go -rootCmd.Flags().StringVarP(&u, "username", "u", "", "Username (required if password is set)") -rootCmd.Flags().StringVarP(&pw, "password", "p", "", "Password (required if username is set)") -rootCmd.MarkFlagsRequiredTogether("username", "password") -``` - -You can also prevent different flags from being provided together if they represent mutually -exclusive options such as specifying an output format as either `--json` or `--yaml` but never both: -```go -rootCmd.Flags().BoolVar(&ofJson, "json", false, "Output in JSON") -rootCmd.Flags().BoolVar(&ofYaml, "yaml", false, "Output in YAML") -rootCmd.MarkFlagsMutuallyExclusive("json", "yaml") -``` - -In both of these cases: - - both local and persistent flags can be used - - **NOTE:** the group is only enforced on commands where every flag is defined - - a flag may appear in multiple groups - - a group may contain any number of flags - -## Positional and Custom Arguments - -Validation of positional arguments can be specified using the `Args` field of `Command`. -The following validators are built in: - -- Number of arguments: - - `NoArgs` - report an error if there are any positional args. - - `ArbitraryArgs` - accept any number of args. - - `MinimumNArgs(int)` - report an error if less than N positional args are provided. - - `MaximumNArgs(int)` - report an error if more than N positional args are provided. - - `ExactArgs(int)` - report an error if there are not exactly N positional args. - - `RangeArgs(min, max)` - report an error if the number of args is not between `min` and `max`. -- Content of the arguments: - - `OnlyValidArgs` - report an error if there are any positional args not specified in the `ValidArgs` field of `Command`, which can optionally be set to a list of valid values for positional args. - -If `Args` is undefined or `nil`, it defaults to `ArbitraryArgs`. - -Moreover, `MatchAll(pargs ...PositionalArgs)` enables combining existing checks with arbitrary other checks. -For instance, if you want to report an error if there are not exactly N positional args OR if there are any positional -args that are not in the `ValidArgs` field of `Command`, you can call `MatchAll` on `ExactArgs` and `OnlyValidArgs`, as -shown below: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: cobra.MatchAll(cobra.ExactArgs(2), cobra.OnlyValidArgs), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} -``` - -It is possible to set any custom validator that satisfies `func(cmd *cobra.Command, args []string) error`. -For example: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: func(cmd *cobra.Command, args []string) error { - // Optionally run one of the validators provided by cobra - if err := cobra.MinimumNArgs(1)(cmd, args); err != nil { - return err - } - // Run the custom validation logic - if myapp.IsValidColor(args[0]) { - return nil - } - return fmt.Errorf("invalid color specified: %s", args[0]) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} -``` - -## Example - -In the example below, we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable, meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - -```go -package main - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func main() { - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. -For many years people have printed back to the screen.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. -Echo works a lot like print, except it has a child command.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Echo: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing -a count and a string.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() -} -``` - -For a more complete example of a larger application, please checkout [Hugo](https://gohugo.io/). - -## Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally, help will also -support all other commands as input. Say, for instance, you have a command called -'create' without any additional configuration; Cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by Cobra. Nothing beyond the -command and flag definitions are needed. - - $ cobra-cli help - - Cobra is a CLI library for Go that empowers applications. - This application is a tool to generate the needed files - to quickly create a Cobra application. - - Usage: - cobra-cli [command] - - Available Commands: - add Add a command to a Cobra Application - completion Generate the autocompletion script for the specified shell - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra-cli - -l, --license string name of license for the project - --viper use Viper for configuration - - Use "cobra-cli [command] --help" for more information about a command. - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact, you can provide your own if you want. - -### Grouping commands in help - -Cobra supports grouping of available commands in the help output. To group commands, each group must be explicitly -defined using `AddGroup()` on the parent command. Then a subcommand can be added to a group using the `GroupID` element -of that subcommand. The groups will appear in the help output in the same order as they are defined using different -calls to `AddGroup()`. If you use the generated `help` or `completion` commands, you can set their group ids using -`SetHelpCommandGroupId()` and `SetCompletionCommandGroupId()` on the root command, respectively. - -### Defining your own help - -You can provide your own Help command or your own template for the default command to use -with the following functions: - -```go -cmd.SetHelpCommand(cmd *Command) -cmd.SetHelpFunc(f func(*Command, []string)) -cmd.SetHelpTemplate(s string) -``` - -The latter two will also apply to any children commands. - -## Usage Message - -When the user provides an invalid flag or invalid command, Cobra responds by -showing the user the 'usage'. - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of its output. - - $ cobra-cli --invalid - Error: unknown flag: --invalid - Usage: - cobra-cli [command] - - Available Commands: - add Add a command to a Cobra Application - completion Generate the autocompletion script for the specified shell - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra-cli - -l, --license string name of license for the project - --viper use Viper for configuration - - Use "cobra [command] --help" for more information about a command. - -### Defining your own usage -You can provide your own usage function or template for Cobra to use. -Like help, the function and template are overridable through public methods: - -```go -cmd.SetUsageFunc(f func(*Command) error) -cmd.SetUsageTemplate(s string) -``` - -## Version Flag - -Cobra adds a top-level '--version' flag if the Version field is set on the root command. -Running an application with the '--version' flag will print the version to stdout using -the version template. The template can be customized using the -`cmd.SetVersionTemplate(s string)` function. - -## PreRun and PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistentPostRun` - -An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - rootCmd.Execute() - fmt.Println() - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - rootCmd.Execute() -} -``` - -Output: -``` -Inside rootCmd PersistentPreRun with args: [] -Inside rootCmd PreRun with args: [] -Inside rootCmd Run with args: [] -Inside rootCmd PostRun with args: [] -Inside rootCmd PersistentPostRun with args: [] - -Inside rootCmd PersistentPreRun with args: [arg1 arg2] -Inside subCmd PreRun with args: [arg1 arg2] -Inside subCmd Run with args: [arg1 arg2] -Inside subCmd PostRun with args: [arg1 arg2] -Inside subCmd PersistentPostRun with args: [arg1 arg2] -``` - -## Suggestions when "unknown command" happens - -Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: - -``` -$ hugo srever -Error: unknown command "srever" for "hugo" - -Did you mean this? - server - -Run 'hugo --help' for usage. -``` - -Suggestions are automatically generated based on existing subcommands and use an implementation of [Levenshtein distance](https://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. - -If you need to disable suggestions or tweak the string distance in your command, use: - -```go -command.DisableSuggestions = true -``` - -or - -```go -command.SuggestionsMinimumDistance = 1 -``` - -You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but make sense in your set of commands but for which -you don't want aliases. Example: - -``` -$ kubectl remove -Error: unknown command "remove" for "kubectl" - -Did you mean this? - delete - -Run 'kubectl help' for usage. -``` - -## Generating documentation for your command - -Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md). - -## Generating shell completions - -Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md). - -## Providing Active Help - -Cobra makes use of the shell-completion system to define a framework allowing you to provide Active Help to your users. Active Help are messages (hints, warnings, etc) printed as the program is being used. Read more about it in [Active Help](active_help.md). diff --git a/vendor/github.com/spf13/cobra/zsh_completions.md b/vendor/github.com/spf13/cobra/zsh_completions.md deleted file mode 100644 index 7cff6178..00000000 --- a/vendor/github.com/spf13/cobra/zsh_completions.md +++ /dev/null @@ -1,48 +0,0 @@ -## Generating Zsh Completion For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - -## Zsh completions standardization - -Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backwards-compatible, some small changes in behavior were introduced. - -### Deprecation summary - -See further below for more details on these deprecations. - -* `cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` is no longer needed. It is therefore **deprecated** and silently ignored. -* `cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` is **deprecated** and silently ignored. - * Instead use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt`. -* `cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored. - * Instead use `ValidArgsFunction`. - -### Behavioral changes - -**Noun completion** -|Old behavior|New behavior| -|---|---| -|No file completion by default (opposite of bash)|File completion by default; use `ValidArgsFunction` with `ShellCompDirectiveNoFileComp` to turn off file completion on a per-argument basis| -|Completion of flag names without the `-` prefix having been typed|Flag names are only completed if the user has typed the first `-`| -`cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` used to turn on file completion on a per-argument position basis|File completion for all arguments by default; `cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored| -|`cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` used to turn on file completion **with glob filtering** on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored; use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt` for file **extension** filtering (not full glob filtering)| -|`cmd.MarkZshCompPositionalArgumentWords(pos, words[])` used to provide completion choices on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored; use `ValidArgsFunction` to achieve the same behavior| - -**Flag-value completion** - -|Old behavior|New behavior| -|---|---| -|No file completion by default (opposite of bash)|File completion by default; use `RegisterFlagCompletionFunc()` with `ShellCompDirectiveNoFileComp` to turn off file completion| -|`cmd.MarkFlagFilename(flag, []string{})` and similar used to turn on file completion|File completion by default; `cmd.MarkFlagFilename(flag, []string{})` no longer needed in this context and silently ignored| -|`cmd.MarkFlagFilename(flag, glob[])` used to turn on file completion **with glob filtering** (syntax of `[]string{"*.yaml", "*.yml"}` incompatible with bash)|Will continue to work, however, support for bash syntax is added and should be used instead so as to work for all shells (`[]string{"yaml", "yml"}`)| -|`cmd.MarkFlagDirname(flag)` only completes directories (zsh-specific)|Has been added for all shells| -|Completion of a flag name does not repeat, unless flag is of type `*Array` or `*Slice` (not supported by bash)|Retained for `zsh` and added to `fish`| -|Completion of a flag name does not provide the `=` form (unlike bash)|Retained for `zsh` and added to `fish`| - -**Improvements** - -* Custom completion support (`ValidArgsFunction` and `RegisterFlagCompletionFunc()`) -* File completion by default if no other completions found -* Handling of required flags -* File extension filtering no longer mutually exclusive with bash usage -* Completion of directory names *within* another directory -* Support for `=` form of flags diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go index a014ac92..063e7784 100644 --- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && gc && !purego -// +build amd64,gc,!purego package argon2 diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s index b2cc0515..f3b653a1 100644 --- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && gc && !purego -// +build amd64,gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go index 167c59d2..16d58c65 100644 --- a/vendor/golang.org/x/crypto/argon2/blamka_ref.go +++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !amd64 || purego || !gc -// +build !amd64 purego !gc package argon2 diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go deleted file mode 100644 index fc311609..00000000 --- a/vendor/golang.org/x/crypto/bcrypt/base64.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bcrypt - -import "encoding/base64" - -const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" - -var bcEncoding = base64.NewEncoding(alphabet) - -func base64Encode(src []byte) []byte { - n := bcEncoding.EncodedLen(len(src)) - dst := make([]byte, n) - bcEncoding.Encode(dst, src) - for dst[n-1] == '=' { - n-- - } - return dst[:n] -} - -func base64Decode(src []byte) ([]byte, error) { - numOfEquals := 4 - (len(src) % 4) - for i := 0; i < numOfEquals; i++ { - src = append(src, '=') - } - - dst := make([]byte, bcEncoding.DecodedLen(len(src))) - n, err := bcEncoding.Decode(dst, src) - if err != nil { - return nil, err - } - return dst[:n], nil -} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go deleted file mode 100644 index 5577c0f9..00000000 --- a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing -// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf -package bcrypt // import "golang.org/x/crypto/bcrypt" - -// The code is a port of Provos and Mazières's C implementation. -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "fmt" - "io" - "strconv" - - "golang.org/x/crypto/blowfish" -) - -const ( - MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword - MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword - DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword -) - -// The error returned from CompareHashAndPassword when a password and hash do -// not match. -var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") - -// The error returned from CompareHashAndPassword when a hash is too short to -// be a bcrypt hash. -var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") - -// The error returned from CompareHashAndPassword when a hash was created with -// a bcrypt algorithm newer than this implementation. -type HashVersionTooNewError byte - -func (hv HashVersionTooNewError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) -} - -// The error returned from CompareHashAndPassword when a hash starts with something other than '$' -type InvalidHashPrefixError byte - -func (ih InvalidHashPrefixError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) -} - -type InvalidCostError int - -func (ic InvalidCostError) Error() string { - return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), MinCost, MaxCost) -} - -const ( - majorVersion = '2' - minorVersion = 'a' - maxSaltSize = 16 - maxCryptedHashSize = 23 - encodedSaltSize = 22 - encodedHashSize = 31 - minHashSize = 59 -) - -// magicCipherData is an IV for the 64 Blowfish encryption calls in -// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. -var magicCipherData = []byte{ - 0x4f, 0x72, 0x70, 0x68, - 0x65, 0x61, 0x6e, 0x42, - 0x65, 0x68, 0x6f, 0x6c, - 0x64, 0x65, 0x72, 0x53, - 0x63, 0x72, 0x79, 0x44, - 0x6f, 0x75, 0x62, 0x74, -} - -type hashed struct { - hash []byte - salt []byte - cost int // allowed range is MinCost to MaxCost - major byte - minor byte -} - -// ErrPasswordTooLong is returned when the password passed to -// GenerateFromPassword is too long (i.e. > 72 bytes). -var ErrPasswordTooLong = errors.New("bcrypt: password length exceeds 72 bytes") - -// GenerateFromPassword returns the bcrypt hash of the password at the given -// cost. If the cost given is less than MinCost, the cost will be set to -// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, -// to compare the returned hashed password with its cleartext version. -// GenerateFromPassword does not accept passwords longer than 72 bytes, which -// is the longest password bcrypt will operate on. -func GenerateFromPassword(password []byte, cost int) ([]byte, error) { - if len(password) > 72 { - return nil, ErrPasswordTooLong - } - p, err := newFromPassword(password, cost) - if err != nil { - return nil, err - } - return p.Hash(), nil -} - -// CompareHashAndPassword compares a bcrypt hashed password with its possible -// plaintext equivalent. Returns nil on success, or an error on failure. -func CompareHashAndPassword(hashedPassword, password []byte) error { - p, err := newFromHash(hashedPassword) - if err != nil { - return err - } - - otherHash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return err - } - - otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} - if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { - return nil - } - - return ErrMismatchedHashAndPassword -} - -// Cost returns the hashing cost used to create the given hashed -// password. When, in the future, the hashing cost of a password system needs -// to be increased in order to adjust for greater computational power, this -// function allows one to establish which passwords need to be updated. -func Cost(hashedPassword []byte) (int, error) { - p, err := newFromHash(hashedPassword) - if err != nil { - return 0, err - } - return p.cost, nil -} - -func newFromPassword(password []byte, cost int) (*hashed, error) { - if cost < MinCost { - cost = DefaultCost - } - p := new(hashed) - p.major = majorVersion - p.minor = minorVersion - - err := checkCost(cost) - if err != nil { - return nil, err - } - p.cost = cost - - unencodedSalt := make([]byte, maxSaltSize) - _, err = io.ReadFull(rand.Reader, unencodedSalt) - if err != nil { - return nil, err - } - - p.salt = base64Encode(unencodedSalt) - hash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return nil, err - } - p.hash = hash - return p, err -} - -func newFromHash(hashedSecret []byte) (*hashed, error) { - if len(hashedSecret) < minHashSize { - return nil, ErrHashTooShort - } - p := new(hashed) - n, err := p.decodeVersion(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - n, err = p.decodeCost(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - - // The "+2" is here because we'll have to append at most 2 '=' to the salt - // when base64 decoding it in expensiveBlowfishSetup(). - p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) - copy(p.salt, hashedSecret[:encodedSaltSize]) - - hashedSecret = hashedSecret[encodedSaltSize:] - p.hash = make([]byte, len(hashedSecret)) - copy(p.hash, hashedSecret) - - return p, nil -} - -func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { - cipherData := make([]byte, len(magicCipherData)) - copy(cipherData, magicCipherData) - - c, err := expensiveBlowfishSetup(password, uint32(cost), salt) - if err != nil { - return nil, err - } - - for i := 0; i < 24; i += 8 { - for j := 0; j < 64; j++ { - c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) - } - } - - // Bug compatibility with C bcrypt implementations. We only encode 23 of - // the 24 bytes encrypted. - hsh := base64Encode(cipherData[:maxCryptedHashSize]) - return hsh, nil -} - -func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { - csalt, err := base64Decode(salt) - if err != nil { - return nil, err - } - - // Bug compatibility with C bcrypt implementations. They use the trailing - // NULL in the key string during expansion. - // We copy the key to prevent changing the underlying array. - ckey := append(key[:len(key):len(key)], 0) - - c, err := blowfish.NewSaltedCipher(ckey, csalt) - if err != nil { - return nil, err - } - - var i, rounds uint64 - rounds = 1 << cost - for i = 0; i < rounds; i++ { - blowfish.ExpandKey(ckey, c) - blowfish.ExpandKey(csalt, c) - } - - return c, nil -} - -func (p *hashed) Hash() []byte { - arr := make([]byte, 60) - arr[0] = '$' - arr[1] = p.major - n := 2 - if p.minor != 0 { - arr[2] = p.minor - n = 3 - } - arr[n] = '$' - n++ - copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) - n += 2 - arr[n] = '$' - n++ - copy(arr[n:], p.salt) - n += encodedSaltSize - copy(arr[n:], p.hash) - n += encodedHashSize - return arr[:n] -} - -func (p *hashed) decodeVersion(sbytes []byte) (int, error) { - if sbytes[0] != '$' { - return -1, InvalidHashPrefixError(sbytes[0]) - } - if sbytes[1] > majorVersion { - return -1, HashVersionTooNewError(sbytes[1]) - } - p.major = sbytes[1] - n := 3 - if sbytes[2] != '$' { - p.minor = sbytes[2] - n++ - } - return n, nil -} - -// sbytes should begin where decodeVersion left off. -func (p *hashed) decodeCost(sbytes []byte) (int, error) { - cost, err := strconv.Atoi(string(sbytes[0:2])) - if err != nil { - return -1, err - } - err = checkCost(cost) - if err != nil { - return -1, err - } - p.cost = cost - return 3, nil -} - -func (p *hashed) String() string { - return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) -} - -func checkCost(cost int) error { - if cost < MinCost || cost > MaxCost { - return InvalidCostError(cost) - } - return nil -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go index 56bfaaa1..4f506f87 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.7 && amd64 && gc && !purego -// +build go1.7,amd64,gc,!purego package blake2b diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s index 4b9daa18..353bb7ca 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.7 && amd64 && gc && !purego -// +build go1.7,amd64,gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go index 5fa1b328..1d0770ab 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.7 && amd64 && gc && !purego -// +build !go1.7,amd64,gc,!purego package blake2b diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s index ae75eb9a..adfac00c 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && gc && !purego -// +build amd64,gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go index b0137cdf..6e28668c 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !amd64 || purego || !gc -// +build !amd64 purego !gc package blake2b diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go index 9d863396..d9fcac3a 100644 --- a/vendor/golang.org/x/crypto/blake2b/register.go +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.9 -// +build go1.9 package blake2b diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go index 5dfacbb9..661ea132 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s index f1f66230..7dd2638e 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go index 02ff3d05..db42e667 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (!arm64 && !s390x && !ppc64le) || !gc || purego -// +build !arm64,!s390x,!ppc64le !gc purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go index da420b2e..3a4287f9 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s index 5c0fed26..66aebae2 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s @@ -20,7 +20,6 @@ // due to the calling conventions and initialization of constants. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go index 4652247b..683ccfd1 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s index f3ef5a01..1eda91a3 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego #include "go_asm.h" #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go index edcf163c..70c54169 100644 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go @@ -1,7 +1,6 @@ // Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. //go:build amd64 && gc && !purego -// +build amd64,gc,!purego package field diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s index 293f013c..60817acc 100644 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s @@ -1,7 +1,6 @@ // Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. //go:build amd64 && gc && !purego -// +build amd64,gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go index ddb6c9b8..9da280d1 100644 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !amd64 || !gc || purego -// +build !amd64 !gc purego package field diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go index af459ef5..075fe9b9 100644 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && gc && !purego -// +build arm64,gc,!purego package field diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s index 5c91e458..3126a434 100644 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && gc && !purego -// +build arm64,gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go index 234a5b2e..fc029ac1 100644 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !arm64 || !gc || purego -// +build !arm64 !gc purego package field diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go index dda3f143..f4ded5fe 100644 --- a/vendor/golang.org/x/crypto/hkdf/hkdf.go +++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -56,7 +56,9 @@ func (f *hkdf) Read(p []byte) (int, error) { // Fill the rest of the buffer for len(p) > 0 { - f.expander.Reset() + if f.counter > 1 { + f.expander.Reset() + } f.expander.Write(f.prev) f.expander.Write(f.info) f.expander.Write([]byte{f.counter}) diff --git a/vendor/golang.org/x/crypto/internal/alias/alias.go b/vendor/golang.org/x/crypto/internal/alias/alias.go index 69c17f82..551ff0c3 100644 --- a/vendor/golang.org/x/crypto/internal/alias/alias.go +++ b/vendor/golang.org/x/crypto/internal/alias/alias.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !purego -// +build !purego // Package alias implements memory aliasing tests. package alias diff --git a/vendor/golang.org/x/crypto/internal/alias/alias_purego.go b/vendor/golang.org/x/crypto/internal/alias/alias_purego.go index 4775b0a4..6fe61b5c 100644 --- a/vendor/golang.org/x/crypto/internal/alias/alias_purego.go +++ b/vendor/golang.org/x/crypto/internal/alias/alias_purego.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build purego -// +build purego // Package alias implements memory aliasing tests. package alias diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go index 45b5c966..d33c8890 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.13 -// +build !go1.13 package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go index ed52b341..495c1fa6 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.13 -// +build go1.13 package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go index f184b67d..333da285 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (!amd64 && !ppc64le && !s390x) || !gc || purego -// +build !amd64,!ppc64le,!s390x !gc purego package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go index 6d522333..164cd47d 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s index 1d74f0f8..e0d3c647 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go index 4a069941..4aec4874 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s index 58422aad..d2ca5dee 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go index ec959668..e1d033a4 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s index aa9e0494..0fe3a7c2 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go deleted file mode 100644 index 904b57e0..00000000 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC -2898 / PKCS #5 v2.0. - -A key derivation function is useful when encrypting data based on a password -or any other not-fully-random data. It uses a pseudorandom function to derive -a secure encryption key based on the password. - -While v2.0 of the standard defines only one pseudorandom function to use, -HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved -Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To -choose, you can pass the `New` functions from the different SHA packages to -pbkdf2.Key. -*/ -package pbkdf2 // import "golang.org/x/crypto/pbkdf2" - -import ( - "crypto/hmac" - "hash" -) - -// Key derives a key from the password, salt and iteration count, returning a -// []byte of length keylen that can be used as cryptographic key. The key is -// derived based on the method described as PBKDF2 with the HMAC variant using -// the supplied hash function. -// -// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you -// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by -// doing: -// -// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) -// -// Remember to get a good random salt. At least 8 bytes is recommended by the -// RFC. -// -// Using a higher iteration count will increase the cost of an exhaustive -// search but will also make derivation proportionally slower. -func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { - prf := hmac.New(h, password) - hashLen := prf.Size() - numBlocks := (keyLen + hashLen - 1) / hashLen - - var buf [4]byte - dk := make([]byte, 0, numBlocks*hashLen) - U := make([]byte, hashLen) - for block := 1; block <= numBlocks; block++ { - // N.B.: || means concatenation, ^ means XOR - // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter - // U_1 = PRF(password, salt || uint(i)) - prf.Reset() - prf.Write(salt) - buf[0] = byte(block >> 24) - buf[1] = byte(block >> 16) - buf[2] = byte(block >> 8) - buf[3] = byte(block) - prf.Write(buf[:4]) - dk = prf.Sum(dk) - T := dk[len(dk)-hashLen:] - copy(U, T) - - // U_n = PRF(password, U_(n-1)) - for n := 2; n <= iter; n++ { - prf.Reset() - prf.Write(U) - U = U[:0] - U = prf.Sum(U) - for x := range U { - T[x] ^= U[x] - } - } - } - return dk[:keyLen] -} diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go deleted file mode 100644 index c971a99f..00000000 --- a/vendor/golang.org/x/crypto/scrypt/scrypt.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package scrypt implements the scrypt key derivation function as defined in -// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard -// Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf). -package scrypt // import "golang.org/x/crypto/scrypt" - -import ( - "crypto/sha256" - "encoding/binary" - "errors" - "math/bits" - - "golang.org/x/crypto/pbkdf2" -) - -const maxInt = int(^uint(0) >> 1) - -// blockCopy copies n numbers from src into dst. -func blockCopy(dst, src []uint32, n int) { - copy(dst, src[:n]) -} - -// blockXOR XORs numbers from dst with n numbers from src. -func blockXOR(dst, src []uint32, n int) { - for i, v := range src[:n] { - dst[i] ^= v - } -} - -// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in, -// and puts the result into both tmp and out. -func salsaXOR(tmp *[16]uint32, in, out []uint32) { - w0 := tmp[0] ^ in[0] - w1 := tmp[1] ^ in[1] - w2 := tmp[2] ^ in[2] - w3 := tmp[3] ^ in[3] - w4 := tmp[4] ^ in[4] - w5 := tmp[5] ^ in[5] - w6 := tmp[6] ^ in[6] - w7 := tmp[7] ^ in[7] - w8 := tmp[8] ^ in[8] - w9 := tmp[9] ^ in[9] - w10 := tmp[10] ^ in[10] - w11 := tmp[11] ^ in[11] - w12 := tmp[12] ^ in[12] - w13 := tmp[13] ^ in[13] - w14 := tmp[14] ^ in[14] - w15 := tmp[15] ^ in[15] - - x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8 - x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15 - - for i := 0; i < 8; i += 2 { - x4 ^= bits.RotateLeft32(x0+x12, 7) - x8 ^= bits.RotateLeft32(x4+x0, 9) - x12 ^= bits.RotateLeft32(x8+x4, 13) - x0 ^= bits.RotateLeft32(x12+x8, 18) - - x9 ^= bits.RotateLeft32(x5+x1, 7) - x13 ^= bits.RotateLeft32(x9+x5, 9) - x1 ^= bits.RotateLeft32(x13+x9, 13) - x5 ^= bits.RotateLeft32(x1+x13, 18) - - x14 ^= bits.RotateLeft32(x10+x6, 7) - x2 ^= bits.RotateLeft32(x14+x10, 9) - x6 ^= bits.RotateLeft32(x2+x14, 13) - x10 ^= bits.RotateLeft32(x6+x2, 18) - - x3 ^= bits.RotateLeft32(x15+x11, 7) - x7 ^= bits.RotateLeft32(x3+x15, 9) - x11 ^= bits.RotateLeft32(x7+x3, 13) - x15 ^= bits.RotateLeft32(x11+x7, 18) - - x1 ^= bits.RotateLeft32(x0+x3, 7) - x2 ^= bits.RotateLeft32(x1+x0, 9) - x3 ^= bits.RotateLeft32(x2+x1, 13) - x0 ^= bits.RotateLeft32(x3+x2, 18) - - x6 ^= bits.RotateLeft32(x5+x4, 7) - x7 ^= bits.RotateLeft32(x6+x5, 9) - x4 ^= bits.RotateLeft32(x7+x6, 13) - x5 ^= bits.RotateLeft32(x4+x7, 18) - - x11 ^= bits.RotateLeft32(x10+x9, 7) - x8 ^= bits.RotateLeft32(x11+x10, 9) - x9 ^= bits.RotateLeft32(x8+x11, 13) - x10 ^= bits.RotateLeft32(x9+x8, 18) - - x12 ^= bits.RotateLeft32(x15+x14, 7) - x13 ^= bits.RotateLeft32(x12+x15, 9) - x14 ^= bits.RotateLeft32(x13+x12, 13) - x15 ^= bits.RotateLeft32(x14+x13, 18) - } - x0 += w0 - x1 += w1 - x2 += w2 - x3 += w3 - x4 += w4 - x5 += w5 - x6 += w6 - x7 += w7 - x8 += w8 - x9 += w9 - x10 += w10 - x11 += w11 - x12 += w12 - x13 += w13 - x14 += w14 - x15 += w15 - - out[0], tmp[0] = x0, x0 - out[1], tmp[1] = x1, x1 - out[2], tmp[2] = x2, x2 - out[3], tmp[3] = x3, x3 - out[4], tmp[4] = x4, x4 - out[5], tmp[5] = x5, x5 - out[6], tmp[6] = x6, x6 - out[7], tmp[7] = x7, x7 - out[8], tmp[8] = x8, x8 - out[9], tmp[9] = x9, x9 - out[10], tmp[10] = x10, x10 - out[11], tmp[11] = x11, x11 - out[12], tmp[12] = x12, x12 - out[13], tmp[13] = x13, x13 - out[14], tmp[14] = x14, x14 - out[15], tmp[15] = x15, x15 -} - -func blockMix(tmp *[16]uint32, in, out []uint32, r int) { - blockCopy(tmp[:], in[(2*r-1)*16:], 16) - for i := 0; i < 2*r; i += 2 { - salsaXOR(tmp, in[i*16:], out[i*8:]) - salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:]) - } -} - -func integer(b []uint32, r int) uint64 { - j := (2*r - 1) * 16 - return uint64(b[j]) | uint64(b[j+1])<<32 -} - -func smix(b []byte, r, N int, v, xy []uint32) { - var tmp [16]uint32 - R := 32 * r - x := xy - y := xy[R:] - - j := 0 - for i := 0; i < R; i++ { - x[i] = binary.LittleEndian.Uint32(b[j:]) - j += 4 - } - for i := 0; i < N; i += 2 { - blockCopy(v[i*R:], x, R) - blockMix(&tmp, x, y, r) - - blockCopy(v[(i+1)*R:], y, R) - blockMix(&tmp, y, x, r) - } - for i := 0; i < N; i += 2 { - j := int(integer(x, r) & uint64(N-1)) - blockXOR(x, v[j*R:], R) - blockMix(&tmp, x, y, r) - - j = int(integer(y, r) & uint64(N-1)) - blockXOR(y, v[j*R:], R) - blockMix(&tmp, y, x, r) - } - j = 0 - for _, v := range x[:R] { - binary.LittleEndian.PutUint32(b[j:], v) - j += 4 - } -} - -// Key derives a key from the password, salt, and cost parameters, returning -// a byte slice of length keyLen that can be used as cryptographic key. -// -// N is a CPU/memory cost parameter, which must be a power of two greater than 1. -// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the -// limits, the function returns a nil byte slice and an error. -// -// For example, you can get a derived key for e.g. AES-256 (which needs a -// 32-byte key) by doing: -// -// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32) -// -// The recommended parameters for interactive logins as of 2017 are N=32768, r=8 -// and p=1. The parameters N, r, and p should be increased as memory latency and -// CPU parallelism increases; consider setting N to the highest power of 2 you -// can derive within 100 milliseconds. Remember to get a good random salt. -func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) { - if N <= 1 || N&(N-1) != 0 { - return nil, errors.New("scrypt: N must be > 1 and a power of 2") - } - if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r { - return nil, errors.New("scrypt: parameters are too large") - } - - xy := make([]uint32, 64*r) - v := make([]uint32, 32*N*r) - b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New) - - for i := 0; i < p; i++ { - smix(b[i*128*r:], r, N, v, xy) - } - - return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil -} diff --git a/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/vendor/golang.org/x/crypto/sha3/hashes_generic.go index c74fc20f..fe8c8479 100644 --- a/vendor/golang.org/x/crypto/sha3/hashes_generic.go +++ b/vendor/golang.org/x/crypto/sha3/hashes_generic.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !gc || purego || !s390x -// +build !gc purego !s390x package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go index e5faa375..ce48b1dd 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf.go +++ b/vendor/golang.org/x/crypto/sha3/keccakf.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !amd64 || purego || !gc -// +build !amd64 purego !gc package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go index 248a3824..b908696b 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && !purego && gc -// +build amd64,!purego,gc package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s index 4cfa5438..8fb26aeb 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && !purego && gc -// +build amd64,!purego,gc // This code was translated into a form compatible with 6a from the public // domain sources at https://github.com/gvanas/KeccakCodePackage diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go index 8b4453aa..addfd504 100644 --- a/vendor/golang.org/x/crypto/sha3/register.go +++ b/vendor/golang.org/x/crypto/sha3/register.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.4 -// +build go1.4 package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go index ec26f147..d861bca5 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s index a0e051b0..826b862c 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/sha3/shake_generic.go b/vendor/golang.org/x/crypto/sha3/shake_generic.go index 5c0710ef..8d31cf5b 100644 --- a/vendor/golang.org/x/crypto/sha3/shake_generic.go +++ b/vendor/golang.org/x/crypto/sha3/shake_generic.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !gc || purego || !s390x -// +build !gc purego !s390x package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go index 59c8eb94..7337cca8 100644 --- a/vendor/golang.org/x/crypto/sha3/xor.go +++ b/vendor/golang.org/x/crypto/sha3/xor.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (!amd64 && !386 && !ppc64le) || purego -// +build !amd64,!386,!ppc64le purego package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go index 1ce60624..870e2d16 100644 --- a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go +++ b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (amd64 || 386 || ppc64le) && !purego -// +build amd64 386 ppc64le -// +build !purego package sha3 diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go index 9f09aae7..fecba8eb 100644 --- a/vendor/golang.org/x/crypto/ssh/agent/client.go +++ b/vendor/golang.org/x/crypto/ssh/agent/client.go @@ -141,9 +141,14 @@ const ( agentAddSmartcardKeyConstrained = 26 // 3.7 Key constraint identifiers - agentConstrainLifetime = 1 - agentConstrainConfirm = 2 - agentConstrainExtension = 3 + agentConstrainLifetime = 1 + agentConstrainConfirm = 2 + // Constraint extension identifier up to version 2 of the protocol. A + // backward incompatible change will be required if we want to add support + // for SSH_AGENT_CONSTRAIN_MAXSIGN which uses the same ID. + agentConstrainExtensionV00 = 3 + // Constraint extension identifier in version 3 and later of the protocol. + agentConstrainExtension = 255 ) // maxAgentResponseBytes is the maximum agent reply size that is accepted. This @@ -205,7 +210,7 @@ type constrainLifetimeAgentMsg struct { } type constrainExtensionAgentMsg struct { - ExtensionName string `sshtype:"3"` + ExtensionName string `sshtype:"255|3"` ExtensionDetails []byte // Rest is a field used for parsing, not part of message diff --git a/vendor/golang.org/x/crypto/ssh/agent/server.go b/vendor/golang.org/x/crypto/ssh/agent/server.go index dd2e0a3e..e35ca7ce 100644 --- a/vendor/golang.org/x/crypto/ssh/agent/server.go +++ b/vendor/golang.org/x/crypto/ssh/agent/server.go @@ -208,7 +208,7 @@ func parseConstraints(constraints []byte) (lifetimeSecs uint32, confirmBeforeUse case agentConstrainConfirm: confirmBeforeUse = true constraints = constraints[1:] - case agentConstrainExtension: + case agentConstrainExtension, agentConstrainExtensionV00: var msg constrainExtensionAgentMsg if err = ssh.Unmarshal(constraints, &msg); err != nil { return 0, false, nil, err diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go index b419c761..dd2ab0d6 100644 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -10,7 +10,6 @@ import ( "fmt" "io" "math" - "strings" "sync" _ "crypto/sha1" @@ -140,8 +139,6 @@ var supportedPubKeyAuthAlgos = []string{ KeyAlgoDSA, } -var supportedPubKeyAuthAlgosList = strings.Join(supportedPubKeyAuthAlgos, ",") - // unexpectedMessageError results when the SSH message that we received didn't // match what we wanted. func unexpectedMessageError(expected, got uint8) error { diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index 70a7369f..49bbba76 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -11,6 +11,7 @@ import ( "io" "log" "net" + "strings" "sync" ) @@ -50,6 +51,10 @@ type handshakeTransport struct { // connection. hostKeys []Signer + // publicKeyAuthAlgorithms is non-empty if we are the server. In that case, + // it contains the supported client public key authentication algorithms. + publicKeyAuthAlgorithms []string + // hostKeyAlgorithms is non-empty if we are the client. In that case, // we accept these key types from the server as host key. hostKeyAlgorithms []string @@ -141,6 +146,7 @@ func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byt func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) t.hostKeys = config.hostKeys + t.publicKeyAuthAlgorithms = config.PublicKeyAuthAlgorithms go t.readLoop() go t.kexLoop() return t @@ -649,6 +655,7 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { // message with the server-sig-algs extension if the client supports it. See // RFC 8308, Sections 2.4 and 3.1, and [PROTOCOL], Section 1.9. if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") { + supportedPubKeyAuthAlgosList := strings.Join(t.publicKeyAuthAlgorithms, ",") extInfo := &extInfoMsg{ NumExtensions: 2, Payload: make([]byte, 0, 4+15+4+len(supportedPubKeyAuthAlgosList)+4+16+4+1), diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index ef1bad73..df4ebdad 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -1232,16 +1232,27 @@ func ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase []byte) (interface{}, return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) } + var result interface{} + switch block.Type { case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(buf) + result, err = x509.ParsePKCS1PrivateKey(buf) case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(buf) + result, err = x509.ParseECPrivateKey(buf) case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(buf) + result, err = ParseDSAPrivateKey(buf) default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) + err = fmt.Errorf("ssh: unsupported key type %q", block.Type) } + // Because of deficiencies in the format, DecryptPEMBlock does not always + // detect an incorrect password. In these cases decrypted DER bytes is + // random noise. If the parsing of the key returns an asn1.StructuralError + // we return x509.IncorrectPasswordError. + if _, ok := err.(asn1.StructuralError); ok { + return nil, x509.IncorrectPasswordError + } + + return result, err } // ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 727c71b9..8f1505af 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -64,6 +64,13 @@ type ServerConfig struct { // Config contains configuration shared between client and server. Config + // PublicKeyAuthAlgorithms specifies the supported client public key + // authentication algorithms. Note that this should not include certificate + // types since those use the underlying algorithm. This list is sent to the + // client if it supports the server-sig-algs extension. Order is irrelevant. + // If unspecified then a default set of algorithms is used. + PublicKeyAuthAlgorithms []string + hostKeys []Signer // NoClientAuth is true if clients are allowed to connect without @@ -201,6 +208,15 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha if fullConf.MaxAuthTries == 0 { fullConf.MaxAuthTries = 6 } + if len(fullConf.PublicKeyAuthAlgorithms) == 0 { + fullConf.PublicKeyAuthAlgorithms = supportedPubKeyAuthAlgos + } else { + for _, algo := range fullConf.PublicKeyAuthAlgorithms { + if !contains(supportedPubKeyAuthAlgos, algo) { + return nil, nil, nil, fmt.Errorf("ssh: unsupported public key authentication algorithm %s", algo) + } + } + } // Check if the config contains any unsupported key exchanges for _, kex := range fullConf.KeyExchanges { if _, ok := serverForbiddenKexAlgos[kex]; ok { @@ -524,7 +540,7 @@ userAuthLoop: return nil, parseError(msgUserAuthRequest) } algo := string(algoBytes) - if !contains(supportedPubKeyAuthAlgos, underlyingAlgo(algo)) { + if !contains(config.PublicKeyAuthAlgorithms, underlyingAlgo(algo)) { authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) break } @@ -591,7 +607,7 @@ userAuthLoop: // algorithm name that corresponds to algo with // sig.Format. This is usually the same, but // for certs, the names differ. - if !contains(supportedPubKeyAuthAlgos, sig.Format) { + if !contains(config.PublicKeyAuthAlgorithms, sig.Format) { authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) break } diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go index e0869fa3..35fd1f53 100644 --- a/vendor/golang.org/x/mod/modfile/rule.go +++ b/vendor/golang.org/x/mod/modfile/rule.go @@ -542,7 +542,7 @@ func parseReplace(filename string, line *Line, verb string, args []string, fix V if strings.Contains(ns, "@") { return nil, errorf("replacement module must match format 'path version', not 'path@version'") } - return nil, errorf("replacement module without version must be directory path (rooted or starting with ./ or ../)") + return nil, errorf("replacement module without version must be directory path (rooted or starting with . or ..)") } if filepath.Separator == '/' && strings.Contains(ns, `\`) { return nil, errorf("replacement directory appears to be Windows path (on a non-windows system)") @@ -555,7 +555,6 @@ func parseReplace(filename string, line *Line, verb string, args []string, fix V } if IsDirectoryPath(ns) { return nil, errorf("replacement module directory path %q cannot have version", ns) - } } return &Replace{ @@ -679,14 +678,15 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, } } -// IsDirectoryPath reports whether the given path should be interpreted -// as a directory path. Just like on the go command line, relative paths +// IsDirectoryPath reports whether the given path should be interpreted as a directory path. +// Just like on the go command line, relative paths starting with a '.' or '..' path component // and rooted paths are directory paths; the rest are module paths. func IsDirectoryPath(ns string) bool { // Because go.mod files can move from one system to another, // we check all known path syntaxes, both Unix and Windows. - return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") || - strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) || + return ns == "." || strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, `.\`) || + ns == ".." || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, `..\`) || + strings.HasPrefix(ns, "/") || strings.HasPrefix(ns, `\`) || len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':' } diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go index 2cb9c408..0c1b8679 100644 --- a/vendor/golang.org/x/net/context/go17.go +++ b/vendor/golang.org/x/net/context/go17.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.7 -// +build go1.7 package context diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go index 64d31ecc..e31e35a9 100644 --- a/vendor/golang.org/x/net/context/go19.go +++ b/vendor/golang.org/x/net/context/go19.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.9 -// +build go1.9 package context diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go index 7b6b6851..065ff3df 100644 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.7 -// +build !go1.7 package context diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go index 1f971534..ec5a6380 100644 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.9 -// +build !go1.9 package context diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go index a3067f8d..e6f55cbd 100644 --- a/vendor/golang.org/x/net/http2/databuffer.go +++ b/vendor/golang.org/x/net/http2/databuffer.go @@ -20,41 +20,44 @@ import ( // TODO: Benchmark to determine if the pools are necessary. The GC may have // improved enough that we can instead allocate chunks like this: // make([]byte, max(16<<10, expectedBytesRemaining)) -var ( - dataChunkSizeClasses = []int{ - 1 << 10, - 2 << 10, - 4 << 10, - 8 << 10, - 16 << 10, - } - dataChunkPools = [...]sync.Pool{ - {New: func() interface{} { return make([]byte, 1<<10) }}, - {New: func() interface{} { return make([]byte, 2<<10) }}, - {New: func() interface{} { return make([]byte, 4<<10) }}, - {New: func() interface{} { return make([]byte, 8<<10) }}, - {New: func() interface{} { return make([]byte, 16<<10) }}, - } -) +var dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return new([1 << 10]byte) }}, + {New: func() interface{} { return new([2 << 10]byte) }}, + {New: func() interface{} { return new([4 << 10]byte) }}, + {New: func() interface{} { return new([8 << 10]byte) }}, + {New: func() interface{} { return new([16 << 10]byte) }}, +} func getDataBufferChunk(size int64) []byte { - i := 0 - for ; i < len(dataChunkSizeClasses)-1; i++ { - if size <= int64(dataChunkSizeClasses[i]) { - break - } + switch { + case size <= 1<<10: + return dataChunkPools[0].Get().(*[1 << 10]byte)[:] + case size <= 2<<10: + return dataChunkPools[1].Get().(*[2 << 10]byte)[:] + case size <= 4<<10: + return dataChunkPools[2].Get().(*[4 << 10]byte)[:] + case size <= 8<<10: + return dataChunkPools[3].Get().(*[8 << 10]byte)[:] + default: + return dataChunkPools[4].Get().(*[16 << 10]byte)[:] } - return dataChunkPools[i].Get().([]byte) } func putDataBufferChunk(p []byte) { - for i, n := range dataChunkSizeClasses { - if len(p) == n { - dataChunkPools[i].Put(p) - return - } + switch len(p) { + case 1 << 10: + dataChunkPools[0].Put((*[1 << 10]byte)(p)) + case 2 << 10: + dataChunkPools[1].Put((*[2 << 10]byte)(p)) + case 4 << 10: + dataChunkPools[2].Put((*[4 << 10]byte)(p)) + case 8 << 10: + dataChunkPools[3].Put((*[8 << 10]byte)(p)) + case 16 << 10: + dataChunkPools[4].Put((*[16 << 10]byte)(p)) + default: + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } - panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } // dataBuffer is an io.ReadWriter backed by a list of data chunks. diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go deleted file mode 100644 index 5bf62b03..00000000 --- a/vendor/golang.org/x/net/http2/go111.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - if trace != nil { - return trace.Got1xxResponse - } - return nil -} diff --git a/vendor/golang.org/x/net/http2/go115.go b/vendor/golang.org/x/net/http2/go115.go deleted file mode 100644 index 908af1ab..00000000 --- a/vendor/golang.org/x/net/http2/go115.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.15 -// +build go1.15 - -package http2 - -import ( - "context" - "crypto/tls" -) - -// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS -// connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - dialer := &tls.Dialer{ - Config: cfg, - } - cn, err := dialer.DialContext(ctx, network, addr) - if err != nil { - return nil, err - } - tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed - return tlsCn, nil -} diff --git a/vendor/golang.org/x/net/http2/go118.go b/vendor/golang.org/x/net/http2/go118.go deleted file mode 100644 index aca4b2b3..00000000 --- a/vendor/golang.org/x/net/http2/go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package http2 - -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return tc.NetConn() -} diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go deleted file mode 100644 index cc0baa81..00000000 --- a/vendor/golang.org/x/net/http2/not_go111.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - return nil -} diff --git a/vendor/golang.org/x/net/http2/not_go115.go b/vendor/golang.org/x/net/http2/not_go115.go deleted file mode 100644 index e6c04cf7..00000000 --- a/vendor/golang.org/x/net/http2/not_go115.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.15 -// +build !go1.15 - -package http2 - -import ( - "context" - "crypto/tls" -) - -// dialTLSWithContext opens a TLS connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if cfg.InsecureSkipVerify { - return cn, nil - } - if err := cn.VerifyHostname(cfg.ServerName); err != nil { - return nil, err - } - return cn, nil -} diff --git a/vendor/golang.org/x/net/http2/not_go118.go b/vendor/golang.org/x/net/http2/not_go118.go deleted file mode 100644 index eab532c9..00000000 --- a/vendor/golang.org/x/net/http2/not_go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package http2 - -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return nil -} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 02c88b6b..ae94c640 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -2549,7 +2549,6 @@ type responseWriterState struct { wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished - dirty bool // a Write failed; don't reuse this responseWriterState sentContentLen int64 // non-zero if handler set a Content-Length header wroteBytes int64 @@ -2669,7 +2668,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { date: date, }) if err != nil { - rws.dirty = true return 0, err } if endStream { @@ -2690,7 +2688,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { - rws.dirty = true return 0, err } } @@ -2702,9 +2699,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { trailers: rws.trailers, endStream: true, }) - if err != nil { - rws.dirty = true - } return len(p), err } return len(p), nil @@ -2920,14 +2914,12 @@ func (rws *responseWriterState) writeHeader(code int) { h.Del("Transfer-Encoding") } - if rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, httpResCode: code, h: h, endStream: rws.handlerDone && !rws.hasTrailers(), - }) != nil { - rws.dirty = true - } + }) return } @@ -2992,19 +2984,10 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, func (w *responseWriter) handlerDone() { rws := w.rws - dirty := rws.dirty rws.handlerDone = true w.Flush() w.rws = nil - if !dirty { - // Only recycle the pool if all prior Write calls to - // the serverConn goroutine completed successfully. If - // they returned earlier due to resets from the peer - // there might still be write goroutines outstanding - // from the serverConn referencing the rws memory. See - // issue 20704. - responseWriterStatePool.Put(rws) - } + responseWriterStatePool.Put(rws) } // Push errors. @@ -3187,6 +3170,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) { panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) } + sc.curHandlers++ go sc.runHandler(rw, req, sc.handler.ServeHTTP) return promisedID, nil } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 4515b22c..df578b86 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -1018,7 +1018,7 @@ func (cc *ClientConn) forceCloseConn() { if !ok { return } - if nc := tlsUnderlyingConn(tc); nc != nil { + if nc := tc.NetConn(); nc != nil { nc.Close() } } @@ -3201,3 +3201,34 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { trace.GotFirstResponseByte() } } + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} + +// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS +// connection. +func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { + dialer := &tls.Dialer{ + Config: cfg, + } + cn, err := dialer.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed + return tlsCn, nil +} diff --git a/vendor/golang.org/x/net/idna/go118.go b/vendor/golang.org/x/net/idna/go118.go index c5c4338d..712f1ad8 100644 --- a/vendor/golang.org/x/net/idna/go118.go +++ b/vendor/golang.org/x/net/idna/go118.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.18 -// +build go1.18 package idna diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go index 64ccf85f..7b371788 100644 --- a/vendor/golang.org/x/net/idna/idna10.0.0.go +++ b/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.10 -// +build go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go index ee1698ce..cc6a892a 100644 --- a/vendor/golang.org/x/net/idna/idna9.0.0.go +++ b/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.10 -// +build !go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/vendor/golang.org/x/net/idna/pre_go118.go b/vendor/golang.org/x/net/idna/pre_go118.go index 3aaccab1..40e74bb3 100644 --- a/vendor/golang.org/x/net/idna/pre_go118.go +++ b/vendor/golang.org/x/net/idna/pre_go118.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.18 -// +build !go1.18 package idna diff --git a/vendor/golang.org/x/net/idna/tables10.0.0.go b/vendor/golang.org/x/net/idna/tables10.0.0.go index d1d62ef4..c6c2bf10 100644 --- a/vendor/golang.org/x/net/idna/tables10.0.0.go +++ b/vendor/golang.org/x/net/idna/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package idna diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go index 167efba7..76789393 100644 --- a/vendor/golang.org/x/net/idna/tables11.0.0.go +++ b/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package idna diff --git a/vendor/golang.org/x/net/idna/tables12.0.0.go b/vendor/golang.org/x/net/idna/tables12.0.0.go index ab40f7bc..0600cd2a 100644 --- a/vendor/golang.org/x/net/idna/tables12.0.0.go +++ b/vendor/golang.org/x/net/idna/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/tables13.0.0.go b/vendor/golang.org/x/net/idna/tables13.0.0.go index 66701ead..2fb768ef 100644 --- a/vendor/golang.org/x/net/idna/tables13.0.0.go +++ b/vendor/golang.org/x/net/idna/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package idna diff --git a/vendor/golang.org/x/net/idna/tables15.0.0.go b/vendor/golang.org/x/net/idna/tables15.0.0.go index 40033778..5ff05fe1 100644 --- a/vendor/golang.org/x/net/idna/tables15.0.0.go +++ b/vendor/golang.org/x/net/idna/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package idna diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go index 4074b533..0f25e84c 100644 --- a/vendor/golang.org/x/net/idna/tables9.0.0.go +++ b/vendor/golang.org/x/net/idna/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package idna diff --git a/vendor/golang.org/x/net/idna/trie12.0.0.go b/vendor/golang.org/x/net/idna/trie12.0.0.go index bb63f904..8a75b966 100644 --- a/vendor/golang.org/x/net/idna/trie12.0.0.go +++ b/vendor/golang.org/x/net/idna/trie12.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.16 -// +build !go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/trie13.0.0.go b/vendor/golang.org/x/net/idna/trie13.0.0.go index 7d68a8dc..fa45bb90 100644 --- a/vendor/golang.org/x/net/idna/trie13.0.0.go +++ b/vendor/golang.org/x/net/idna/trie13.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.16 -// +build go1.16 package idna diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s index db9171c2..269e173c 100644 --- a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s +++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go index 8aaeef54..9bf0c32e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_aix.go +++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix -// +build aix package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s index c61f95a0..fcb9a388 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go index ccf542a7..a8acd3e3 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go index 0af2f248..c8ae6ddc 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go index fa7cdb9b..910728fb 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (386 || amd64 || amd64p32) && gc -// +build 386 amd64 amd64p32 -// +build gc package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go index 2aff3189..7f194678 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo -// +build gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go index 4bfbda61..9526d2ce 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo -// +build gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c index 6cc73109..3f73a05d 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (386 || amd64 || amd64p32) && gccgo -// +build 386 amd64 amd64p32 -// +build gccgo #include #include diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go index 863d415a..99c60fe9 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (386 || amd64 || amd64p32) && gccgo -// +build 386 amd64 amd64p32 -// +build gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go index 159a686f..743eb543 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !386 && !amd64 && !amd64p32 && !arm64 -// +build !386,!amd64,!amd64p32,!arm64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go index 6000db4c..4686c1d5 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) -// +build linux -// +build mips64 mips64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go index f4992b1a..cd63e733 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x -// +build linux,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le,!s390x package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go index 021356d6..197188e6 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) -// +build linux -// +build ppc64 ppc64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go index 0f57b05b..55863585 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_loong64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build loong64 -// +build loong64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go index f4063c66..fedb00cc 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build mips64 || mips64le -// +build mips64 mips64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go index 07c4e36d..ffb4ec7e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build mips || mipsle -// +build mips mipsle package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go index d7b4fb4c..e9ecf2a4 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux && arm -// +build !linux,arm package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go index f3cde129..5341e7f8 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux && !netbsd && !openbsd && arm64 -// +build !linux,!netbsd,!openbsd,arm64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go index 0dafe964..5f8f2419 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux && (mips64 || mips64le) -// +build !linux -// +build mips64 mips64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go index 060d46b6..89608fba 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !linux && (ppc64 || ppc64le) -// +build !aix -// +build !linux -// +build ppc64 ppc64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go index dd10eb79..5ab87808 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux && riscv64 -// +build !linux,riscv64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go index 4e8acd16..c14f12b1 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ppc64 || ppc64le -// +build ppc64 ppc64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go index ff7da60e..7f0c79c0 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 -// +build riscv64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s index 96f81e20..1fb4b701 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_s390x.s +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go index 7747d888..384787ea 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_wasm.go +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build wasm -// +build wasm package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index 2dcde828..c29f5e4c 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 || amd64 || amd64p32 -// +build 386 amd64 amd64p32 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.s b/vendor/golang.org/x/sys/cpu/cpu_x86.s index 39acab2f..7d7ba33e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.s +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (386 || amd64 || amd64p32) && gc -// +build 386 amd64 amd64p32 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/cpu/endian_big.go b/vendor/golang.org/x/sys/cpu/endian_big.go index 93ce03a3..7fe04b0a 100644 --- a/vendor/golang.org/x/sys/cpu/endian_big.go +++ b/vendor/golang.org/x/sys/cpu/endian_big.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 -// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/endian_little.go b/vendor/golang.org/x/sys/cpu/endian_little.go index 55db853e..48eccc4c 100644 --- a/vendor/golang.org/x/sys/cpu/endian_little.go +++ b/vendor/golang.org/x/sys/cpu/endian_little.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh || wasm -// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh wasm package cpu diff --git a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go index d87bd6b3..4cd64c70 100644 --- a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go +++ b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && arm64 -// +build linux,arm64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go index b975ea2a..4c9788ea 100644 --- a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.21 -// +build go1.21 package cpu diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go index 96134157..1b9ccb09 100644 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -9,7 +9,6 @@ // gccgo's libgo and thus must not used a CGo method. //go:build aix && gccgo -// +build aix,gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go index 904be42f..e8b6cdbe 100644 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -7,7 +7,6 @@ // (See golang.org/issue/32102) //go:build aix && ppc64 && gc -// +build aix,ppc64,gc package cpu diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go index 2000064a..5627d70e 100644 --- a/vendor/golang.org/x/sys/execabs/execabs_go118.go +++ b/vendor/golang.org/x/sys/execabs/execabs_go118.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.19 -// +build !go1.19 package execabs diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go index f364b341..d60ab1b4 100644 --- a/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ b/vendor/golang.org/x/sys/execabs/execabs_go119.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.19 -// +build go1.19 package execabs diff --git a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go index c9b69937..73687de7 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.5 -// +build go1.5 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_plan9.go index 98bf56b7..fb945821 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_plan9.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.5 -// +build !go1.5 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/race.go b/vendor/golang.org/x/sys/plan9/race.go index 62377d2f..c02d9ed3 100644 --- a/vendor/golang.org/x/sys/plan9/race.go +++ b/vendor/golang.org/x/sys/plan9/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 && race -// +build plan9,race package plan9 diff --git a/vendor/golang.org/x/sys/plan9/race0.go b/vendor/golang.org/x/sys/plan9/race0.go index f8da3087..7b15e15f 100644 --- a/vendor/golang.org/x/sys/plan9/race0.go +++ b/vendor/golang.org/x/sys/plan9/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 && !race -// +build plan9,!race package plan9 diff --git a/vendor/golang.org/x/sys/plan9/str.go b/vendor/golang.org/x/sys/plan9/str.go index 55fa8d02..ba3e8ff8 100644 --- a/vendor/golang.org/x/sys/plan9/str.go +++ b/vendor/golang.org/x/sys/plan9/str.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 -// +build plan9 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go index 67e5b011..d631fd66 100644 --- a/vendor/golang.org/x/sys/plan9/syscall.go +++ b/vendor/golang.org/x/sys/plan9/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 -// +build plan9 // Package plan9 contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go index 3f40b9bd..f780d5c8 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build plan9 && 386 -// +build plan9,386 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go index 0e6a96aa..7de61065 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build plan9 && amd64 -// +build plan9,amd64 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go index 244c501b..ea85780f 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build plan9 && arm -// +build plan9,arm package plan9 diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index abc89c10..e7d3df4b 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos -// +build go1.9 package unix diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s index db9171c2..269e173c 100644 --- a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_386.s b/vendor/golang.org/x/sys/unix/asm_bsd_386.s index e0fcd9b3..a4fcef0e 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_386.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (freebsd || netbsd || openbsd) && gc -// +build freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s index 2b99c349..1e63615c 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc -// +build darwin dragonfly freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s index d702d4ad..6496c310 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (freebsd || netbsd || openbsd) && gc -// +build freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s index fe36a739..4fd1f54d 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s index e5b9a848..42f7eb9e 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s index d560019e..f8902667 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s index 8fd101d0..3b473487 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_386.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s index 7ed38e43..67e29f31 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s index 8ef1d514..d6ae269c 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s index 98ae0276..01e5e253 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && arm64 && gc -// +build linux -// +build arm64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s index 56535728..2abf12f6 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && loong64 && gc -// +build linux -// +build loong64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s index 21231d2c..f84bae71 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) && gc -// +build linux -// +build mips64 mips64le -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s index 6783b26c..f08f6280 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) && gc -// +build linux -// +build mips mipsle -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s index 19d49893..bdfc024d 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) && gc -// +build linux -// +build ppc64 ppc64le -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s index e42eb81d..2e8c9961 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && gc -// +build riscv64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s index c46aab33..2c394b11 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && s390x && gc -// +build linux -// +build s390x -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s index 5e7a1169..fab586a2 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s index f8c5394c..f949ec54 100644 --- a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s index 3b54e185..2f67ba86 100644 --- a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x && gc -// +build zos -// +build s390x -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go index 0b7c6adb..a0865789 100644 --- a/vendor/golang.org/x/sys/unix/cap_freebsd.go +++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build freebsd -// +build freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go index 394a3965..6fb7cb77 100644 --- a/vendor/golang.org/x/sys/unix/constants.go +++ b/vendor/golang.org/x/sys/unix/constants.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go index 65a99850..d7851346 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc -// +build aix,ppc // Functions to access/create device major and minor numbers matching the // encoding used by AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go index 8fc08ad0..623a5e69 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc64 -// +build aix,ppc64 // Functions to access/create device major and minor numbers matching the // encoding used AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_zos.go b/vendor/golang.org/x/sys/unix/dev_zos.go index a388e59a..bb6a64fe 100644 --- a/vendor/golang.org/x/sys/unix/dev_zos.go +++ b/vendor/golang.org/x/sys/unix/dev_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Functions to access/create device major and minor numbers matching the // encoding used by z/OS. diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go index 2499f977..1ebf1178 100644 --- a/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go index a5202655..1095fd31 100644 --- a/vendor/golang.org/x/sys/unix/endian_big.go +++ b/vendor/golang.org/x/sys/unix/endian_big.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // //go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 -// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index b0f2bc4a..b9f0e277 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // //go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh -// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go index 29ccc4d1..a96da71f 100644 --- a/vendor/golang.org/x/sys/unix/env_unix.go +++ b/vendor/golang.org/x/sys/unix/env_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Unix environment variables. diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go index cedaf7e0..7753fdde 100644 --- a/vendor/golang.org/x/sys/unix/epoll_zos.go +++ b/vendor/golang.org/x/sys/unix/epoll_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go index e9b99125..58c6bfc7 100644 --- a/vendor/golang.org/x/sys/unix/fcntl.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build dragonfly || freebsd || linux || netbsd || openbsd -// +build dragonfly freebsd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go index 29d44808..13b4acd5 100644 --- a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go +++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc) -// +build linux,386 linux,arm linux,mips linux,mipsle linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go index a8068f94..9e83d18c 100644 --- a/vendor/golang.org/x/sys/unix/fdset.go +++ b/vendor/golang.org/x/sys/unix/fdset.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go index e377cc9f..c8bde601 100644 --- a/vendor/golang.org/x/sys/unix/fstatfs_zos.go +++ b/vendor/golang.org/x/sys/unix/fstatfs_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index b06f52d7..aca5721d 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && !aix && !hurd -// +build gccgo,!aix,!hurd package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index f98a1c54..d468b7b4 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && !aix && !hurd -// +build gccgo,!aix,!hurd #include #include diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go index e60e49a3..972d61bd 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && linux && amd64 -// +build gccgo,linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go index 15721a51..848840ae 100644 --- a/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_signed.go b/vendor/golang.org/x/sys/unix/ioctl_signed.go index 7def9580..5b0759bd 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_signed.go +++ b/vendor/golang.org/x/sys/unix/ioctl_signed.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || solaris -// +build aix solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go index 649913d1..20f470b9 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go +++ b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd -// +build darwin dragonfly freebsd hurd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go index cdc21bf7..c8b2a750 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_zos.go +++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 47fa6a7e..cbe24150 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -663,7 +663,6 @@ echo '// mkerrors.sh' "$@" echo '// Code generated by the command above; see README.md. DO NOT EDIT.' echo echo "//go:build ${GOARCH} && ${GOOS}" -echo "// +build ${GOARCH},${GOOS}" echo go tool cgo -godefs -- "$@" _const.go >_error.out cat _error.out | grep -vf _error.grep | grep -vf _signal.grep diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go index ca051363..4b68e597 100644 --- a/vendor/golang.org/x/sys/unix/mmap_nomremap.go +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || openbsd || solaris -// +build aix darwin dragonfly freebsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fa93d0aa..fd45fe52 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux || netbsd -// +build linux netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go index 53f1b4c5..4d0a3430 100644 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // For Unix, get the pagesize from the runtime. diff --git a/vendor/golang.org/x/sys/unix/pledge_openbsd.go b/vendor/golang.org/x/sys/unix/pledge_openbsd.go index eb48294b..6a09af53 100644 --- a/vendor/golang.org/x/sys/unix/pledge_openbsd.go +++ b/vendor/golang.org/x/sys/unix/pledge_openbsd.go @@ -8,54 +8,31 @@ import ( "errors" "fmt" "strconv" - "syscall" - "unsafe" ) // Pledge implements the pledge syscall. // -// The pledge syscall does not accept execpromises on OpenBSD releases -// before 6.3. -// -// execpromises must be empty when Pledge is called on OpenBSD -// releases predating 6.3, otherwise an error will be returned. +// This changes both the promises and execpromises; use PledgePromises or +// PledgeExecpromises to only change the promises or execpromises +// respectively. // // For more information see pledge(2). func Pledge(promises, execpromises string) error { - maj, min, err := majmin() - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - err = pledgeAvailable(maj, min, execpromises) + pptr, err := BytePtrFromString(promises) if err != nil { return err } - pptr, err := syscall.BytePtrFromString(promises) + exptr, err := BytePtrFromString(execpromises) if err != nil { return err } - // This variable will hold either a nil unsafe.Pointer or - // an unsafe.Pointer to a string (execpromises). - var expr unsafe.Pointer - - // If we're running on OpenBSD > 6.2, pass execpromises to the syscall. - if maj > 6 || (maj == 6 && min > 2) { - exptr, err := syscall.BytePtrFromString(execpromises) - if err != nil { - return err - } - expr = unsafe.Pointer(exptr) - } - - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) - if e != 0 { - return e - } - - return nil + return pledge(pptr, exptr) } // PledgePromises implements the pledge syscall. @@ -64,30 +41,16 @@ func Pledge(promises, execpromises string) error { // // For more information see pledge(2). func PledgePromises(promises string) error { - maj, min, err := majmin() - if err != nil { - return err - } - - err = pledgeAvailable(maj, min, "") - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - // This variable holds the execpromises and is always nil. - var expr unsafe.Pointer - - pptr, err := syscall.BytePtrFromString(promises) + pptr, err := BytePtrFromString(promises) if err != nil { return err } - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) - if e != 0 { - return e - } - - return nil + return pledge(pptr, nil) } // PledgeExecpromises implements the pledge syscall. @@ -96,30 +59,16 @@ func PledgePromises(promises string) error { // // For more information see pledge(2). func PledgeExecpromises(execpromises string) error { - maj, min, err := majmin() - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - err = pledgeAvailable(maj, min, execpromises) + exptr, err := BytePtrFromString(execpromises) if err != nil { return err } - // This variable holds the promises and is always nil. - var pptr unsafe.Pointer - - exptr, err := syscall.BytePtrFromString(execpromises) - if err != nil { - return err - } - - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(pptr), uintptr(unsafe.Pointer(exptr)), 0) - if e != 0 { - return e - } - - return nil + return pledge(nil, exptr) } // majmin returns major and minor version number for an OpenBSD system. @@ -147,16 +96,15 @@ func majmin() (major int, minor int, err error) { // pledgeAvailable checks for availability of the pledge(2) syscall // based on the running OpenBSD version. -func pledgeAvailable(maj, min int, execpromises string) error { - // If OpenBSD <= 5.9, pledge is not available. - if (maj == 5 && min != 9) || maj < 5 { - return fmt.Errorf("pledge syscall is not available on OpenBSD %d.%d", maj, min) +func pledgeAvailable() error { + maj, min, err := majmin() + if err != nil { + return err } - // If OpenBSD <= 6.2 and execpromises is not empty, - // return an error - execpromises is not available before 6.3 - if (maj < 6 || (maj == 6 && min <= 2)) && execpromises != "" { - return fmt.Errorf("cannot use execpromises on OpenBSD %d.%d", maj, min) + // Require OpenBSD 6.4 as a minimum. + if maj < 6 || (maj == 6 && min <= 3) { + return fmt.Errorf("cannot call Pledge on OpenBSD %d.%d", maj, min) } return nil diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go index 463c3eff..3f0975f3 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && !ios -// +build darwin,!ios package unix diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go index ed0509a0..a4d35db5 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ios -// +build ios package unix diff --git a/vendor/golang.org/x/sys/unix/race.go b/vendor/golang.org/x/sys/unix/race.go index 6f6c5fec..714d2aae 100644 --- a/vendor/golang.org/x/sys/unix/race.go +++ b/vendor/golang.org/x/sys/unix/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin && race) || (linux && race) || (freebsd && race) -// +build darwin,race linux,race freebsd,race package unix diff --git a/vendor/golang.org/x/sys/unix/race0.go b/vendor/golang.org/x/sys/unix/race0.go index 706e1322..4a9f6634 100644 --- a/vendor/golang.org/x/sys/unix/race0.go +++ b/vendor/golang.org/x/sys/unix/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || (darwin && !race) || (linux && !race) || (freebsd && !race) || netbsd || openbsd || solaris || dragonfly || zos -// +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly zos package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdents.go b/vendor/golang.org/x/sys/unix/readdirent_getdents.go index 4d625756..dbd2b6cc 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdents.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdents.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd -// +build aix dragonfly freebsd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go index 2a4ba47c..130398b6 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin -// +build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 3865943f..c3a62dbb 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Socket control messages diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go index 0840fe4a..4a1eab37 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index 63e8c838..5ea74da9 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Package unix contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index e94e6cda..67ce6cef 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix -// +build aix // Aix system calls. // This file is compiled as ordinary Go code, @@ -107,7 +106,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index f2871fa9..1fdaa476 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc -// +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 75718ec0..c87f9a9f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc64 -// +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 4217de51..6f328e3a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || netbsd || openbsd -// +build darwin dragonfly freebsd netbsd openbsd // BSD system call wrappers shared by *BSD based systems // including OS X (Darwin) and FreeBSD. Like the other diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index b37310ce..0eaecf5f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index d51ec996..f36c6707 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 53c96641..16dc6993 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && go1.12 -// +build darwin,go1.12 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index 4e2d3212..14bab6b2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index b8da5100..3967bca7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 47155c48..eff19ada 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 08932093..4f24b517 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index d151a0d0..ac30759e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go index d5cd64b3..aab725ca 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index 381fd467..ba46651f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build hurd -// +build hurd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go index 7cf54a3e..df89f9e6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && hurd -// +build 386,hurd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index 87db5a6a..a863f705 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -5,7 +5,6 @@ // illumos system calls not present on Solaris. //go:build amd64 && illumos -// +build amd64,illumos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index fb4e5022..a5e1c10e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -417,7 +417,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -2482,3 +2483,5 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } return attr, nil } + +//sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index c7d9945e..506dafa7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && linux -// +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go index 08086ac6..38d55641 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (386 || amd64 || mips || mipsle || mips64 || mipsle || ppc64 || ppc64le || ppc || s390x || sparc64) -// +build linux -// +build 386 amd64 mips mipsle mips64 mipsle ppc64 ppc64le ppc s390x sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 70601ce3..d557cf8d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && linux -// +build amd64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go index 8b0f0f3a..facdb83b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && linux && gc -// +build amd64,linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index da298641..cd2dd797 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && linux -// +build arm,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index f5266689..cf2ee6c7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && linux -// +build arm64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go index 2b1168d7..ffc4c2b6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gc -// +build linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go index 9843fb48..9ebfdcf4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gc && 386 -// +build linux,gc,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go index a6008fcc..5f2b57c4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && gc && linux -// +build arm,gc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go index 7740af24..d1a3ad82 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gccgo && 386 -// +build linux,gccgo,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go index e16a1229..f2f67423 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gccgo && arm -// +build linux,gccgo,arm package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index f6ab02ec..3d0e9845 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build loong64 && linux -// +build loong64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 93fe59d2..70963a95 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) -// +build linux -// +build mips64 mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index aae7f0ff..c218ebd2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) -// +build linux -// +build mips mipsle package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index 66eff19a..e6c48500 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && ppc -// +build linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 806aa257..7286a9aa 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) -// +build linux -// +build ppc64 ppc64le package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 5e6ceee1..6f5a2889 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && linux -// +build riscv64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 2f89e8f5..66f31210 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build s390x && linux -// +build s390x,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 7ca064ae..11d1f169 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build sparc64 && linux -// +build sparc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index 5199d282..7a5eb574 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index 70a9c52e..62d8957a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index 3eb5942f..ce6a0688 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go index fc6ccfd8..d46d689d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 6f34479b..d2882ee0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -137,18 +137,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { - var _p0 unsafe.Pointer + var bufptr *Statfs_t var bufsize uintptr if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + bufptr = &buf[0] bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } - r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = e1 - } - return + return getfsstat(bufptr, bufsize, flags) } //sysnb getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) @@ -326,4 +321,7 @@ func Uname(uname *Utsname) error { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) +//sys getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) +//sys pledge(promises *byte, execpromises *byte) (err error) +//sys unveil(path *byte, flags *byte) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index 6baabcdc..9ddc89f4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index bab25360..70a3c96e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index 8eed3c4d..265caa87 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go index 483dde99..ac4fda17 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go index 04aa43f4..0a451e6d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build openbsd -// +build openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go index c2796139..30a308cb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go index 23199a7f..ea954330 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index b99cfa13..60c8142d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -128,7 +128,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index 0bd25ef8..e02d8cea 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && solaris -// +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index f6eda270..77081de8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index b6919ca5..05c95bcc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || dragonfly || freebsd || (linux && !ppc64 && !ppc64le) || netbsd || openbsd || solaris) && gc -// +build darwin dragonfly freebsd linux,!ppc64,!ppc64le netbsd openbsd solaris -// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go index f6f707ac..23f39b7a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64le || ppc64) && gc -// +build linux -// +build ppc64le ppc64 -// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 4596d041..d99d05f1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_linux.go b/vendor/golang.org/x/sys/unix/sysvshm_linux.go index 2c3a4437..4fcd38de 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_linux.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go index 5bb41d17..79a84f18 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin && !ios) || linux -// +build darwin,!ios linux package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go index 71bddefd..9eb0db66 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && !ios -// +build darwin,!ios package unix diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go index 616b1b28..7997b190 100644 --- a/vendor/golang.org/x/sys/unix/timestruct.go +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/unveil_openbsd.go b/vendor/golang.org/x/sys/unix/unveil_openbsd.go index 168d5ae7..cb7e598c 100644 --- a/vendor/golang.org/x/sys/unix/unveil_openbsd.go +++ b/vendor/golang.org/x/sys/unix/unveil_openbsd.go @@ -4,39 +4,48 @@ package unix -import ( - "syscall" - "unsafe" -) +import "fmt" // Unveil implements the unveil syscall. // For more information see unveil(2). // Note that the special case of blocking further // unveil calls is handled by UnveilBlock. func Unveil(path string, flags string) error { - pathPtr, err := syscall.BytePtrFromString(path) - if err != nil { + if err := supportsUnveil(); err != nil { return err } - flagsPtr, err := syscall.BytePtrFromString(flags) + pathPtr, err := BytePtrFromString(path) if err != nil { return err } - _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(unsafe.Pointer(pathPtr)), uintptr(unsafe.Pointer(flagsPtr)), 0) - if e != 0 { - return e + flagsPtr, err := BytePtrFromString(flags) + if err != nil { + return err } - return nil + return unveil(pathPtr, flagsPtr) } // UnveilBlock blocks future unveil calls. // For more information see unveil(2). func UnveilBlock() error { - // Both pointers must be nil. - var pathUnsafe, flagsUnsafe unsafe.Pointer - _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(pathUnsafe), uintptr(flagsUnsafe), 0) - if e != 0 { - return e + if err := supportsUnveil(); err != nil { + return err } + return unveil(nil, nil) +} + +// supportsUnveil checks for availability of the unveil(2) system call based +// on the running OpenBSD version. +func supportsUnveil() error { + maj, min, err := majmin() + if err != nil { + return err + } + + // unveil is not available before 6.4 + if maj < 6 || (maj == 6 && min <= 3) { + return fmt.Errorf("cannot call Unveil on OpenBSD %d.%d", maj, min) + } + return nil } diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index f5f8e9f3..e1687939 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build freebsd || netbsd -// +build freebsd netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go index ca9799b7..2fb219d7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && aix -// +build ppc,aix // Created by cgo -godefs - DO NOT EDIT // cgo -godefs -- -maix32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go index 200c8c26..b0e6f5c8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && aix -// +build ppc64,aix // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -maix64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 14300762..e40fa852 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index ab044a74..bb02aa6c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index 17bba0e4..c0e0f869 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index f8c2c513..6c692390 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 96310c3b..dd9163f8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 777b69de..493a2a79 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index c557ac2d..8b437b30 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go index 341b4d96..67c02dd5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index f9c7f479..9c00cbf5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -481,10 +480,14 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_AFTER = 0x10 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_KPROBE_MULTI_RETURN = 0x1 + BPF_F_BEFORE = 0x8 + BPF_F_ID = 0x20 + BPF_F_LINK = 0x2000 + BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 @@ -521,6 +524,7 @@ const ( BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 BPF_MEM = 0x60 + BPF_MEMSX = 0x80 BPF_MEMWORDS = 0x10 BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 @@ -776,6 +780,8 @@ const ( DEVLINK_GENL_MCGRP_CONFIG_NAME = "config" DEVLINK_GENL_NAME = "devlink" DEVLINK_GENL_VERSION = 0x1 + DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO = 0x4 + DEVLINK_PORT_FN_CAP_IPSEC_PACKET = 0x8 DEVLINK_PORT_FN_CAP_MIGRATABLE = 0x2 DEVLINK_PORT_FN_CAP_ROCE = 0x1 DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14 @@ -1698,6 +1704,7 @@ const ( KEXEC_ON_CRASH = 0x1 KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 + KEXEC_UPDATE_ELFCOREHDR = 0x4 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CAPABILITIES = 0x1f KEYCTL_CAPS0_BIG_KEY = 0x10 @@ -2275,6 +2282,7 @@ const ( PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd PERF_MEM_LVLNUM_SHIFT = 0x21 + PERF_MEM_LVLNUM_UNC = 0x8 PERF_MEM_LVL_HIT = 0x2 PERF_MEM_LVL_IO = 0x1000 PERF_MEM_LVL_L1 = 0x8 @@ -3461,6 +3469,7 @@ const ( XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 + XDP_PKT_CONTD = 0x1 XDP_RING_NEED_WAKEUP = 0x1 XDP_RX_RING = 0x2 XDP_SHARED_UMEM = 0x1 @@ -3473,6 +3482,7 @@ const ( XDP_UMEM_REG = 0x4 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 + XDP_USE_SG = 0x10 XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 30aee00a..4920821c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/386/include -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 8ebfa512..a0c1e411 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/amd64/include -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 271a21cd..c6398556 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/arm/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 910c330a..47cc62e2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index a640798c..27ac4a09 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/loong64/include _const.go @@ -119,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 LASX_CTX_MAGIC = 0x41535801 + LBT_CTX_MAGIC = 0x42540001 LSX_CTX_MAGIC = 0x53580001 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 0d5925d3..54694642 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index d72a00e0..3adb81d7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 02ba129f..2dfe98f0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64le/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 8daa6dd9..f5398f84 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mipsle/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 63c8fa2f..c54f152d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 930799ec..76057dc7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 8605a7dd..e0c3725e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64le/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 95a016f1..18f2813e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/riscv64/include _const.go @@ -228,6 +227,9 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_GETFDPIC = 0x21 + PTRACE_GETFDPIC_EXEC = 0x0 + PTRACE_GETFDPIC_INTERP = 0x1 RLIMIT_AS = 0x9 RLIMIT_MEMLOCK = 0x8 RLIMIT_NOFILE = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 1ae0108f..11619d4e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 1bb7c633..396d994d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/sparc64/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 72f7420d..130085df 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index 8d4eb0c0..84769a1a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index 9eef9749..602ded00 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -marm _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go index 3b62ba19..efc0406e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index af20e474..5a6500f8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 6015fcb2..a5aeeb97 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 8d44955e..0e9748a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index ae16fe75..4f4449ab 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go index 03d90fe3..76a363f0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go index 8e2c51b1..43ca0cdf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go index 13d40303..b1b8bb20 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 1afee6a0..d2ddd317 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && solaris -// +build amd64,solaris // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index fc7d0506..4dfd2e05 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Hand edited based on zerrors_linux_s390x.go // TODO: auto-generate. diff --git a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go index 97f20ca2..586317c7 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("arm", "arm64"). DO NOT EDIT. //go:build linux && (arm || arm64) -// +build linux -// +build arm arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go index 0b5f7943..d7c881be 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mips", "mips64"). DO NOT EDIT. //go:build linux && (mips || mips64) -// +build linux -// +build mips mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go index 2807f7e6..2d2de5d2 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mipsle", "mips64le"). DO NOT EDIT. //go:build linux && (mipsle || mips64le) -// +build linux -// +build mipsle mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go index 281ea64e..5adc79fb 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("386", "amd64"). DO NOT EDIT. //go:build linux && (386 || amd64) -// +build linux -// +build 386 amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index d1d1d233..6ea64a3c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc -// +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index f99a18ad..99ee4399 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 -// +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index c4d50ae5..b68a7836 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 && gc -// +build aix,ppc64,gc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index 6903d3b0..0a87450b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 && gccgo -// +build aix,ppc64,gccgo package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 1cad561e..ccb02f24 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build darwin && amd64 -// +build darwin,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index b18edbd0..1b40b997 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build darwin && arm64 -// +build darwin,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 0c67df64..aad65fc7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build dragonfly && amd64 -// +build dragonfly,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index e6e05d14..c0096391 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && 386 -// +build freebsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 7508acca..7664df74 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && amd64 -// +build freebsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 7b56aead..ae099182 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && arm -// +build freebsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index cc623dca..11fd5d45 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && arm64 -// +build freebsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index 58184919..c3d2d653 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && riscv64 -// +build freebsd,riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index 6be25cd1..c698cbc0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build illumos && amd64 -// +build illumos,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1ff3aec7..faca7a55 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -2195,3 +2194,13 @@ func schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) { + _, _, e1 := Syscall6(SYS_CACHESTAT, uintptr(fd), uintptr(unsafe.Pointer(crange)), uintptr(unsafe.Pointer(cstat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 07b549cc..4def3e9f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && 386 -// +build linux,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 5f481bf8..fef2bc8b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && amd64 -// +build linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 824cd52c..a9fd76a8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && arm -// +build linux,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index e77aecfe..46006502 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && arm64 -// +build linux,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go index 806ffd1e..c8987d26 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && loong64 -// +build linux,loong64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 961a3afb..921f4306 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips -// +build linux,mips package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index ed05005e..44f06782 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64 -// +build linux,mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index d365b718..e7fa0abf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64le -// +build linux,mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index c3f1b8bb..8c512567 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mipsle -// +build linux,mipsle package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index a6574cf9..7392fd45 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc -// +build linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index f4099026..41180434 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64 -// +build linux,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 9dfcc299..40c6ce7a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64le -// +build linux,ppc64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 0ab4f2ed..2cfe34ad 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && riscv64 -// +build linux,riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 6cde3223..61e6f070 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && s390x -// +build linux,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 5253d65b..834b8420 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && sparc64 -// +build linux,sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 2df3c5ba..e91ebc14 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && 386 -// +build netbsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index a60556ba..be28babb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && amd64 -// +build netbsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 9f788917..fb587e82 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && arm -// +build netbsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 82a4cb2d..d576438b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && arm64 -// +build netbsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 66b3b645..88bfc288 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && 386 -// +build openbsd,386 package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 3dcacd30..4cbeff17 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -668,7 +668,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index c5c4cc11..b8a67b99 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && amd64 -// +build openbsd,amd64 package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 2763620b..1123f275 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -668,7 +668,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 93bfbb32..af50a65c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm -// +build openbsd,arm package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index c9223140..82badae3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -668,7 +668,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index a107b8fd..8fb4ff36 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm64 -// +build openbsd,arm64 package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index a6bc32c9..24d7eecb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -668,7 +668,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index c427de50..f469a83e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && mips64 -// +build openbsd,mips64 package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index b4e7bcea..9a498a06 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -668,7 +668,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 60c1a99a..c26ca2e1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && ppc64 -// +build openbsd,ppc64 package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index ca3f7660..1f224aa4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -801,8 +801,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getfsstat(SB) + RET +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_utimensat(SB) RET GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pledge(SB) + RET +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_unveil(SB) + RET +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 52eba360..bcc920dd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && riscv64 -// +build openbsd,riscv64 package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index 477a7d5b..87a79c70 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -668,7 +668,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index b4018946..829b87fe 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build solaris && amd64 -// +build solaris,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index 1d8fe1d4..94f01123 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 55e04847..3a58ae81 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index d2243cf8..dcb7a0eb 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index 82dc51bd..db5a7bf1 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go index cbdda1a4..7be575a7 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go index f55eae1a..d6e3174c 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go index e4405447..ee97157d 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go index a0db82fc..35c3b91d 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go index f8298ff9..5edda768 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go index 5eb433bb..0dc9e8b4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go index 703675c0..308ddf3a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 4e0d9610..418664e3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 01636b83..34d0b86d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index ad99bc10..b71cf45e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 89dcc427..e32df1c1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go index ee37aaa0..15ad6111 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 9862853d..fcf3ecbd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux package unix @@ -448,4 +447,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 8901f0f4..f56dc250 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux package unix @@ -370,4 +369,6 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 6902c37e..974bf246 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux package unix @@ -412,4 +411,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index a6d3dff8..39a2739e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux package unix @@ -315,4 +314,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index b18f3f71..cf9c9d77 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux package unix @@ -309,4 +308,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 0302e5e3..10b7362e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux package unix @@ -432,4 +431,5 @@ const ( SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 + SYS_FCHMODAT2 = 4452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 6693ba4a..cd4d8b4f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux package unix @@ -362,4 +361,5 @@ const ( SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 + SYS_FCHMODAT2 = 5452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index fd93f498..2c0efca8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux package unix @@ -362,4 +361,5 @@ const ( SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 + SYS_FCHMODAT2 = 5452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 760ddcad..a72e31d3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux package unix @@ -432,4 +431,5 @@ const ( SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 + SYS_FCHMODAT2 = 4452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index cff2b255..c7d1e374 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux package unix @@ -439,4 +438,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index a4b2405d..f4d4838c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux package unix @@ -411,4 +410,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index aca54b4e..b64f0e59 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux package unix @@ -411,4 +410,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 9d1738d6..95711195 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux package unix @@ -316,4 +315,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 022878dc..f94e943b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux package unix @@ -377,4 +376,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 4100a761..ba0c2bc5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux package unix @@ -390,4 +389,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go index 3a6699eb..b2aa8cd4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go index 5677cd4f..524a1b1c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go index e784cb6d..d59b943a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go index bd4952ef..31e771d5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index 59773381..9fd77c6c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index 16af2918..af10af28 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index f59b18a9..cc2028af 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go index 721ef591..c06dd441 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go index 01c43a01..9ddbf3e0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go index f258cfa2..19a6ee41 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go index 07919e0e..05192a78 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go index 073daad4..b2e30858 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go index 7a8161c1..3e6d57ca 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && aix -// +build ppc,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go index 07ed733c..3a219bdc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && aix -// +build ppc64,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 690cefc3..091d107f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 5bffc10e..28ff4ef7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index d0ba8e9b..30e405bb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 29dc4833..6cbd094a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 0a89b289..7c03b6ee 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index c8666bb1..422107ee 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 88fb48a8..505a12ac 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index 698dc975..cc986c79 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 18aa70b4..997bcd55 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -5883,3 +5882,15 @@ type SchedAttr struct { } const SizeofSchedAttr = 0x38 + +type Cachestat_t struct { + Cache uint64 + Dirty uint64 + Writeback uint64 + Evicted uint64 + Recently_evicted uint64 +} +type CachestatRange struct { + Off uint64 + Len uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 6d8acbcc..438a30af 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 59293c68..adceca35 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 40cfa38c..eeaa00a3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 055bc421..6739aa91 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index f28affbc..9920ef63 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 9d71e7cc..2923b799 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index fd5ccd33..ce2750ee 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 7704de77..3038811d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index df00b875..efc6fed1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 0942840d..9a654b75 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 03487439..40d358e3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index bad06704..148c6ceb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 1b4c97c3..72ba8154 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index aa268d02..71e76550 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 444045b6..4abbdb9d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 9bc4c8f9..f22e7947 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index bb05f655..066a7d83 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index db40e3a1..439548ec 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index 11121151..16085d3b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 26eba23b..afd13a3a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 5a547988..5d97f1f9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index be58c4e1..34871cdc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index 52338266..5911bceb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index 605cfdb1..e4f24f3b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go index d6724c01..ca50a793 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go index ddfd27a4..d7d7f790 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 0400747c..14160576 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && solaris -// +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index aec1efcb..54f31be6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Hand edited based on ztypes_linux_s390x.go // TODO: auto-generate. diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go index a20ebea6..ce2d713d 100644 --- a/vendor/golang.org/x/sys/windows/aliases.go +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && go1.9 -// +build windows,go1.9 package windows diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s index fdbbbcd3..ba64caca 100644 --- a/vendor/golang.org/x/sys/windows/empty.s +++ b/vendor/golang.org/x/sys/windows/empty.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.12 -// +build !go1.12 // This file is here to allow bodyless functions with go:linkname for Go 1.11 // and earlier (see https://golang.org/issue/23311). diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go index 2cd60645..6c366955 100644 --- a/vendor/golang.org/x/sys/windows/eventlog.go +++ b/vendor/golang.org/x/sys/windows/eventlog.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go index 8563f79c..dbcdb090 100644 --- a/vendor/golang.org/x/sys/windows/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build generate -// +build generate package windows diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go index 9196b089..0f1bdc38 100644 --- a/vendor/golang.org/x/sys/windows/race.go +++ b/vendor/golang.org/x/sys/windows/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && race -// +build windows,race package windows diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go index 7bae4817..0c78da78 100644 --- a/vendor/golang.org/x/sys/windows/race0.go +++ b/vendor/golang.org/x/sys/windows/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && !race -// +build windows,!race package windows diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index c44a1b96..a9dc6308 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go index 4fc01434..6a4f9ce6 100644 --- a/vendor/golang.org/x/sys/windows/str.go +++ b/vendor/golang.org/x/sys/windows/str.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go index 8732cdb9..e85ed6b9 100644 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows // Package windows contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 35cfc57c..fb6cfd04 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -233,6 +233,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock //sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock //sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 +//sys GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW @@ -969,7 +970,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { if n > 0 { sl += int32(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index b88dc7c8..359780f6 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1094,7 +1094,33 @@ const ( SOMAXCONN = 0x7fffffff - TCP_NODELAY = 1 + TCP_NODELAY = 1 + TCP_EXPEDITED_1122 = 2 + TCP_KEEPALIVE = 3 + TCP_MAXSEG = 4 + TCP_MAXRT = 5 + TCP_STDURG = 6 + TCP_NOURG = 7 + TCP_ATMARK = 8 + TCP_NOSYNRETRIES = 9 + TCP_TIMESTAMPS = 10 + TCP_OFFLOAD_PREFERENCE = 11 + TCP_CONGESTION_ALGORITHM = 12 + TCP_DELAY_FIN_ACK = 13 + TCP_MAXRTMS = 14 + TCP_FASTOPEN = 15 + TCP_KEEPCNT = 16 + TCP_KEEPIDLE = TCP_KEEPALIVE + TCP_KEEPINTVL = 17 + TCP_FAIL_CONNECT_ON_ICMP_ERROR = 18 + TCP_ICMP_ERROR_INFO = 19 + + UDP_NOCHECKSUM = 1 + UDP_SEND_MSG_SIZE = 2 + UDP_RECV_MAX_COALESCED_SIZE = 3 + UDP_CHECKSUM_COVERAGE = 20 + + UDP_COALESCED_INFO = 3 SHUT_RD = 0 SHUT_WR = 1 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 8b1688de..db6282e0 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -253,6 +253,7 @@ var ( procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procGetFileTime = modkernel32.NewProc("GetFileTime") procGetFileType = modkernel32.NewProc("GetFileType") procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") @@ -2185,6 +2186,14 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, return } +func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetFileType(filehandle Handle) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) n = uint32(r0) diff --git a/vendor/golang.org/x/term/term_unix.go b/vendor/golang.org/x/term/term_unix.go index 62c2b3f4..1ad0ddfe 100644 --- a/vendor/golang.org/x/term/term_unix.go +++ b/vendor/golang.org/x/term/term_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package term diff --git a/vendor/golang.org/x/term/term_unix_bsd.go b/vendor/golang.org/x/term/term_unix_bsd.go index 853b3d69..9dbf5462 100644 --- a/vendor/golang.org/x/term/term_unix_bsd.go +++ b/vendor/golang.org/x/term/term_unix_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || netbsd || openbsd -// +build darwin dragonfly freebsd netbsd openbsd package term diff --git a/vendor/golang.org/x/term/term_unix_other.go b/vendor/golang.org/x/term/term_unix_other.go index 1e8955c9..1b36de79 100644 --- a/vendor/golang.org/x/term/term_unix_other.go +++ b/vendor/golang.org/x/term/term_unix_other.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || linux || solaris || zos -// +build aix linux solaris zos package term diff --git a/vendor/golang.org/x/term/term_unsupported.go b/vendor/golang.org/x/term/term_unsupported.go index f1df8506..3c409e58 100644 --- a/vendor/golang.org/x/term/term_unsupported.go +++ b/vendor/golang.org/x/term/term_unsupported.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !zos && !windows && !solaris && !plan9 -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!zos,!windows,!solaris,!plan9 package term diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go index 8a7392c4..784bb880 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.10 -// +build go1.10 package bidirule diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go index bb0a9200..8e1e9439 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.10 -// +build !go1.10 package bidirule diff --git a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go index 42fa8d72..d2bd7118 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go index 56a0e1ea..f76bdca2 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go index baacf32b..3aa2c3bd 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go index ffadb7be..a7137579 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go index 92cce580..f15746f7 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go index f517fdb2..c164d379 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package bidi diff --git a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go index f5a07882..1af161c7 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go index cb7239c4..eb73ecc3 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go index 11b27330..276cb8d8 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go index f65785e8..0cceffd7 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go index e1858b87..b0819e42 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go index 0175eae5..bf65457d 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package norm diff --git a/vendor/github.com/Masterminds/goutils/LICENSE.txt b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE similarity index 100% rename from vendor/github.com/Masterminds/goutils/LICENSE.txt rename to vendor/google.golang.org/genproto/googleapis/rpc/LICENSE diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 0e6ae69a..1bc92248 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -14,21 +14,14 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Installation -With [Go module][] support (Go 1.11+), simply add the following import +Simply add the following import to your code, and then `go [build|run|test]` +will automatically fetch the necessary dependencies: + ```go import "google.golang.org/grpc" ``` -to your code, and then `go [build|run|test]` will automatically fetch the -necessary dependencies. - -Otherwise, to install the `grpc-go` package, run the following command: - -```console -$ go get -u google.golang.org/grpc -``` - > **Note:** If you are trying to access `grpc-go` from **China**, see the > [FAQ](#FAQ) below. @@ -56,15 +49,6 @@ To build Go code, there are several options: - Set up a VPN and access google.golang.org through that. -- Without Go module support: `git clone` the repo manually: - - ```sh - git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc - ``` - - You will need to do the same for all of grpc's dependencies in `golang.org`, - e.g. `golang.org/x/net`. - - With Go module support: it is possible to use the `replace` feature of `go mod` to create aliases for golang.org packages. In your project's directory: @@ -76,33 +60,13 @@ To build Go code, there are several options: ``` Again, this will need to be done for all transitive dependencies hosted on - golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). + golang.org as well. For details, refer to [golang/go issue + #28652](https://github.com/golang/go/issues/28652). ### Compiling error, undefined: grpc.SupportPackageIsVersion -#### If you are using Go modules: - -Ensure your gRPC-Go version is `require`d at the appropriate version in -the same module containing the generated `.pb.go` files. For example, -`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: - -```go -module - -require ( - google.golang.org/grpc v1.27.0 -) -``` - -#### If you are *not* using Go modules: - -Update the `proto` package, gRPC package, and rebuild the `.proto` files: - -```sh -go get -u github.com/golang/protobuf/{proto,protoc-gen-go} -go get -u google.golang.org/grpc -protoc --go_out=plugins=grpc:. *.proto -``` +Please update to the latest version of gRPC-Go using +`go get google.golang.org/grpc`. ### How to turn on logging @@ -121,9 +85,11 @@ possible reasons, including: 1. mis-configured transport credentials, connection failed on handshaking 1. bytes disrupted, possibly by a proxy in between 1. server shutdown - 1. Keepalive parameters caused connection shutdown, for example if you have configured - your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). - If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + 1. Keepalive parameters caused connection shutdown, for example if you have + configured your server to terminate connections regularly to [trigger DNS + lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your + [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), to allow longer RPC calls to finish. It can be tricky to debug this because the error happens on the client side but diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 02f5dc53..712fef4d 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -25,30 +25,35 @@ // later release. package attributes +import ( + "fmt" + "strings" +) + // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own // types for keys. Values should not be modified after they are added to an // Attributes or if they were received from one. If values implement 'Equal(o -// interface{}) bool', it will be called by (*Attributes).Equal to determine -// whether two values with the same key should be considered equal. +// any) bool', it will be called by (*Attributes).Equal to determine whether +// two values with the same key should be considered equal. type Attributes struct { - m map[interface{}]interface{} + m map[any]any } // New returns a new Attributes containing the key/value pair. -func New(key, value interface{}) *Attributes { - return &Attributes{m: map[interface{}]interface{}{key: value}} +func New(key, value any) *Attributes { + return &Attributes{m: map[any]any{key: value}} } // WithValue returns a new Attributes containing the previous keys and values // and the new key/value pair. If the same key appears multiple times, the // last value overwrites all previous values for that key. To remove an // existing key, use a nil value. value should not be modified later. -func (a *Attributes) WithValue(key, value interface{}) *Attributes { +func (a *Attributes) WithValue(key, value any) *Attributes { if a == nil { return New(key, value) } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} + n := &Attributes{m: make(map[any]any, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } @@ -58,20 +63,19 @@ func (a *Attributes) WithValue(key, value interface{}) *Attributes { // Value returns the value associated with these attributes for key, or nil if // no value is associated with key. The returned value should not be modified. -func (a *Attributes) Value(key interface{}) interface{} { +func (a *Attributes) Value(key any) any { if a == nil { return nil } return a.m[key] } -// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) -// bool' is implemented for a value in the attributes, it is called to -// determine if the value matches the one stored in the other attributes. If -// Equal is not implemented, standard equality is used to determine if the two -// values are equal. Note that some types (e.g. maps) aren't comparable by -// default, so they must be wrapped in a struct, or in an alias type, with Equal -// defined. +// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is +// implemented for a value in the attributes, it is called to determine if the +// value matches the one stored in the other attributes. If Equal is not +// implemented, standard equality is used to determine if the two values are +// equal. Note that some types (e.g. maps) aren't comparable by default, so +// they must be wrapped in a struct, or in an alias type, with Equal defined. func (a *Attributes) Equal(o *Attributes) bool { if a == nil && o == nil { return true @@ -88,7 +92,7 @@ func (a *Attributes) Equal(o *Attributes) bool { // o missing element of a return false } - if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if eq, ok := v.(interface{ Equal(o any) bool }); ok { if !eq.Equal(ov) { return false } @@ -99,3 +103,39 @@ func (a *Attributes) Equal(o *Attributes) bool { } return true } + +// String prints the attribute map. If any key or values throughout the map +// implement fmt.Stringer, it calls that method and appends. +func (a *Attributes) String() string { + var sb strings.Builder + sb.WriteString("{") + first := true + for k, v := range a.m { + if !first { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) + first = false + } + sb.WriteString("}") + return sb.String() +} + +func str(x any) string { + if v, ok := x.(fmt.Stringer); ok { + return v.String() + } else if v, ok := x.(string); ok { + return v + } + return fmt.Sprintf("<%p>", x) +} + +// MarshalJSON helps implement the json.Marshaler interface, thereby rendering +// the Attributes correctly when printing (via pretty.JSON) structs containing +// Attributes as fields. +// +// Is it impossible to unmarshal attributes from a JSON representation and this +// method is meant only for debugging purposes. +func (a *Attributes) MarshalJSON() ([]byte, error) { + return []byte(a.String()), nil +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 09d61dd1..b6377f44 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -105,8 +105,8 @@ type SubConn interface { // // This will trigger a state transition for the SubConn. // - // Deprecated: This method is now part of the ClientConn interface and will - // eventually be removed from here. + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() @@ -115,6 +115,13 @@ type SubConn interface { // creates a new one and returns it. Returns a close function which must // be called when the Producer is no longer needed. GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() } // NewSubConnOptions contains options to create new SubConn. @@ -129,6 +136,11 @@ type NewSubConnOptions struct { // HealthCheckEnabled indicates whether health check service should be // enabled on this SubConn HealthCheckEnabled bool + // StateListener is called when the state of the subconn changes. If nil, + // Balancer.UpdateSubConnState will be called instead. Will never be + // invoked until after Connect() is called on the SubConn created with + // these options. + StateListener func(SubConnState) } // State contains the balancer's state relevant to the gRPC ClientConn. @@ -150,16 +162,24 @@ type ClientConn interface { // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. // Behaviors of the SubConn can be controlled by options. + // + // Deprecated: please be aware that in a future version, SubConns will only + // support one address per SubConn. NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. + // + // Deprecated: use SubConn.Shutdown instead. RemoveSubConn(SubConn) // UpdateAddresses updates the addresses used in the passed in SubConn. // gRPC checks if the currently connected address is still in the new list. // If so, the connection will be kept. Else, the connection will be // gracefully closed, and a new connection will be created. // - // This will trigger a state transition for the SubConn. + // This may trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses(SubConn, []resolver.Address) // UpdateState notifies gRPC that the balancer's internal state has @@ -250,7 +270,7 @@ type DoneInfo struct { // trailing metadata. // // The only supported type now is *orca_v3.LoadReport. - ServerLoad interface{} + ServerLoad any } var ( @@ -286,7 +306,7 @@ type PickResult struct { // // LB policies with child policies are responsible for propagating metadata // injected by their children to the ClientConn, as part of Pick(). - Metatada metadata.MD + Metadata metadata.MD } // TransientFailureError returns e. It exists for backward compatibility and @@ -343,9 +363,13 @@ type Balancer interface { ResolverError(error) // UpdateSubConnState is called by gRPC when the state of a SubConn // changes. + // + // Deprecated: Use NewSubConnOptions.StateListener when creating the + // SubConn instead. UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. + // Close closes the balancer. The balancer is not currently required to + // call SubConn.Shutdown for its existing SubConns; however, this will be + // required in a future release, so it is recommended. Close() } @@ -390,15 +414,14 @@ var ErrBadResolverState = errors.New("bad resolver state") type ProducerBuilder interface { // Build creates a Producer. The first parameter is always a // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as interface{} to avoid a - // dependency cycle. Should also return a close function that will be - // called when all references to the Producer have been given up. - Build(grpcClientConnInterface interface{}) (p Producer, close func()) + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Should also return a close function that will be called when all + // references to the Producer have been given up. + Build(grpcClientConnInterface any) (p Producer, close func()) } // A Producer is a type shared among potentially many consumers. It is // associated with a SubConn, and an implementation will typically contain // other methods to provide additional functionality, e.g. configuration or // subscription registration. -type Producer interface { -} +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 3929c26d..a7f1eeec 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -105,7 +105,12 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { addrsSet.Set(a, nil) if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: b.config.HealthCheck, + StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue @@ -121,10 +126,10 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { sc := sci.(balancer.SubConn) // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { - b.cc.RemoveSubConn(sc) + sc.Shutdown() b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in UpdateSubConnState. + // The entry will be deleted in updateSubConnState. } } // If resolver state contains no addresses, return an error so ClientConn @@ -177,7 +182,12 @@ func (b *baseBalancer) regeneratePicker() { b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } +// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState if logger.V(2) { logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) @@ -204,8 +214,8 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su case connectivity.Idle: sc.Connect() case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) case connectivity.TransientFailure: // Save error to be reported via picker. @@ -226,7 +236,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } // Close is a nop because base balancer doesn't have internal state to clean up, -// and it doesn't need to call RemoveSubConn for the SubConns. +// and it doesn't need to call Shutdown for the SubConns. func (b *baseBalancer) Close() { } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 0359956d..a4411c22 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -25,14 +25,20 @@ import ( "sync" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" - "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/status" +) + +type ccbMode int + +const ( + ccbModeActive = iota + ccbModeIdle + ccbModeClosed + ccbModeExitingIdle ) // ccBalancerWrapper sits between the ClientConn and the Balancer. @@ -49,192 +55,89 @@ import ( // It uses the gracefulswitch.Balancer internally to ensure that balancer // switches happen in a graceful manner. type ccBalancerWrapper struct { - cc *ClientConn - - // Since these fields are accessed only from handleXxx() methods which are - // synchronized by the watcher goroutine, we do not need a mutex to protect - // these fields. + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + opts balancer.BuildOptions + + // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled in the serializer. Fields + // accessed *only* in these serializer callbacks, can therefore be accessed + // without a mutex. balancer *gracefulswitch.Balancer curBalancerName string - updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). - resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. - closed *grpcsync.Event // Indicates if close has been called. - done *grpcsync.Event // Indicates if close has completed its work. + // mu guards access to the below fields. Access to the serializer and its + // cancel function needs to be mutex protected because they are overwritten + // when the wrapper exits idle mode. + mu sync.Mutex + serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. + serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. + mode ccbMode // Tracks the current mode of the wrapper. } // newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer // is not created until the switchTo() method is invoked. func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(context.Background()) ccb := &ccBalancerWrapper{ - cc: cc, - updateCh: buffer.NewUnbounded(), - resultCh: buffer.NewUnbounded(), - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), + cc: cc, + opts: bopts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, } - go ccb.watcher() ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// The following xxxUpdate structs wrap the arguments received as part of the -// corresponding update. The watcher goroutine uses the 'type' of the update to -// invoke the appropriate handler routine to handle the update. - -type ccStateUpdate struct { - ccs *balancer.ClientConnState -} - -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} - -type exitIdleUpdate struct{} - -type resolverErrorUpdate struct { - err error -} - -type switchToUpdate struct { - name string -} - -type subConnUpdate struct { - acbw *acBalancerWrapper -} - -// watcher is a long-running goroutine which reads updates from a channel and -// invokes corresponding methods on the underlying balancer. It ensures that -// these methods are invoked in a synchronous fashion. It also ensures that -// these methods are invoked in the order in which the updates were received. -func (ccb *ccBalancerWrapper) watcher() { - for { - select { - case u := <-ccb.updateCh.Get(): - ccb.updateCh.Load() - if ccb.closed.HasFired() { - break - } - switch update := u.(type) { - case *ccStateUpdate: - ccb.handleClientConnStateChange(update.ccs) - case *scStateUpdate: - ccb.handleSubConnStateChange(update) - case *exitIdleUpdate: - ccb.handleExitIdle() - case *resolverErrorUpdate: - ccb.handleResolverError(update.err) - case *switchToUpdate: - ccb.handleSwitchTo(update.name) - case *subConnUpdate: - ccb.handleRemoveSubConn(update.acbw) - default: - logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) - } - case <-ccb.closed.Done(): - } - - if ccb.closed.HasFired() { - ccb.handleClose() - return - } - } -} - // updateClientConnState is invoked by grpc to push a ClientConnState update to // the underlying balancer. -// -// Unlike other methods invoked by grpc to push updates to the underlying -// balancer, this method cannot simply push the update onto the update channel -// and return. It needs to return the error returned by the underlying balancer -// back to grpc which propagates that to the resolver. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) - - var res interface{} - select { - case res = <-ccb.resultCh.Get(): - ccb.resultCh.Load() - case <-ccb.closed.Done(): - // Return early if the balancer wrapper is closed while we are waiting for - // the underlying balancer to process a ClientConnState update. - return nil - } - // If the returned error is nil, attempting to type assert to error leads to - // panic. So, this needs to handled separately. - if res == nil { - return nil + ccb.mu.Lock() + errCh := make(chan error, 1) + // Here and everywhere else where Schedule() is called, it is done with the + // lock held. But the lock guards only the scheduling part. The actual + // callback is called asynchronously without the lock being held. + ok := ccb.serializer.Schedule(func(_ context.Context) { + errCh <- ccb.balancer.UpdateClientConnState(*ccs) + }) + if !ok { + // If we are unable to schedule a function with the serializer, it + // indicates that it has been closed. A serializer is only closed when + // the wrapper is closed or is in idle. + ccb.mu.Unlock() + return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") } - return res.(error) -} - -// handleClientConnStateChange handles a ClientConnState update from the update -// channel and invokes the appropriate method on the underlying balancer. -// -// If the addresses specified in the update contain addresses of type "grpclb" -// and the selected LB policy is not "grpclb", these addresses will be filtered -// out and ccs will be modified with the updated address list. -func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { - if ccb.curBalancerName != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - var addrs []resolver.Address - for _, addr := range ccs.ResolverState.Addresses { - if addr.Type == resolver.GRPCLB { - continue - } - addrs = append(addrs, addr) - } - ccs.ResolverState.Addresses = addrs + ccb.mu.Unlock() + + // We get here only if the above call to Schedule succeeds, in which case it + // is guaranteed that the scheduled function will run. Therefore it is safe + // to block on this channel. + err := <-errCh + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) } - ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) + return err } // updateSubConnState is invoked by grpc to push a subConn state update to the // underlying balancer. func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { - // When updating addresses for a SubConn, if the address in use is not in - // the new addresses, the old ac will be tearDown() and a new ac will be - // created. tearDown() generates a state change with Shutdown state, we - // don't want the balancer to receive this state change. So before - // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and - // this function will be called with (nil, Shutdown). We don't need to call - // balancer method in this case. - if sc == nil { - return - } - ccb.updateCh.Put(&scStateUpdate{ - sc: sc, - state: s, - err: err, + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) }) -} - -// handleSubConnStateChange handles a SubConnState update from the update -// channel and invokes the appropriate method on the underlying balancer. -func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { - ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) -} - -func (ccb *ccBalancerWrapper) exitIdle() { - ccb.updateCh.Put(&exitIdleUpdate{}) -} - -func (ccb *ccBalancerWrapper) handleExitIdle() { - if ccb.cc.GetState() != connectivity.Idle { - return - } - ccb.balancer.ExitIdle() + ccb.mu.Unlock() } func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.updateCh.Put(&resolverErrorUpdate{err: err}) -} - -func (ccb *ccBalancerWrapper) handleResolverError(err error) { - ccb.balancer.ResolverError(err) + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.ResolverError(err) + }) + ccb.mu.Unlock() } // switchTo is invoked by grpc to instruct the balancer wrapper to switch to the @@ -248,24 +151,27 @@ func (ccb *ccBalancerWrapper) handleResolverError(err error) { // the ccBalancerWrapper keeps track of the current LB policy name, and skips // the graceful balancer switching process if the name does not change. func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.updateCh.Put(&switchToUpdate{name: name}) + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + // TODO: Other languages use case-sensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + ccb.buildLoadBalancingPolicy(name) + }) + ccb.mu.Unlock() } -// handleSwitchTo handles a balancer switch update from the update channel. It -// calls the SwitchTo() method on the gracefulswitch.Balancer with a -// balancer.Builder corresponding to name. If no balancer.Builder is registered -// for the given name, it uses the default LB policy which is "pick_first". -func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { - // TODO: Other languages use case-insensitive balancer registries. We should - // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. - if strings.EqualFold(ccb.curBalancerName, name) { - return - } - - // TODO: Ensure that name is a registered LB policy when we get here. - // We currently only validate the `loadBalancingConfig` field. We need to do - // the same for the `loadBalancingPolicy` field and reject the service config - // if the specified policy is not registered. +// buildLoadBalancingPolicy performs the following: +// - retrieve a balancer builder for the given name. Use the default LB +// policy, pick_first, if no LB policy with name is found in the registry. +// - instruct the gracefulswitch balancer to switch to the above builder. This +// will actually build the new balancer. +// - update the `curBalancerName` field +// +// Must be called from a serializer callback. +func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { builder := balancer.Get(name) if builder == nil { channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) @@ -281,26 +187,112 @@ func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { ccb.curBalancerName = builder.Name() } -// handleRemoveSucConn handles a request from the underlying balancer to remove -// a subConn. -// -// See comments in RemoveSubConn() for more details. -func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +func (ccb *ccBalancerWrapper) close() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.closeBalancer(ccbModeClosed) } -func (ccb *ccBalancerWrapper) close() { - ccb.closed.Fire() - <-ccb.done.Done() +// enterIdleMode is invoked by grpc when the channel enters idle mode upon +// expiry of idle_timeout. This call blocks until the balancer is closed. +func (ccb *ccBalancerWrapper) enterIdleMode() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") + ccb.closeBalancer(ccbModeIdle) +} + +// closeBalancer is invoked when the channel is being closed or when it enters +// idle mode upon expiry of idle_timeout. +func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { + ccb.mu.Unlock() + return + } + + ccb.mode = m + done := ccb.serializer.Done() + b := ccb.balancer + ok := ccb.serializer.Schedule(func(_ context.Context) { + // Close the serializer to ensure that no more calls from gRPC are sent + // to the balancer. + ccb.serializerCancel() + // Empty the current balancer name because we don't have a balancer + // anymore and also so that we act on the next call to switchTo by + // creating a new balancer specified by the new resolver. + ccb.curBalancerName = "" + }) + if !ok { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + + // Give enqueued callbacks a chance to finish before closing the balancer. + <-done + b.Close() } -func (ccb *ccBalancerWrapper) handleClose() { - ccb.balancer.Close() - ccb.done.Fire() +// exitIdleMode is invoked by grpc when the channel exits idle mode either +// because of an RPC or because of an invocation of the Connect() API. This +// recreates the balancer that was closed previously when entering idle mode. +// +// If the channel is not in idle mode, we know for a fact that we are here as a +// result of the user calling the Connect() method on the ClientConn. In this +// case, we can simply forward the call to the underlying balancer, instructing +// it to reconnect to the backends. +func (ccb *ccBalancerWrapper) exitIdleMode() { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed { + // Request to exit idle is a no-op when wrapper is already closed. + ccb.mu.Unlock() + return + } + + if ccb.mode == ccbModeIdle { + // Recreate the serializer which was closed when we entered idle. + ctx, cancel := context.WithCancel(context.Background()) + ccb.serializer = grpcsync.NewCallbackSerializer(ctx) + ccb.serializerCancel = cancel + } + + // The ClientConn guarantees that mutual exclusion between close() and + // exitIdleMode(), and since we just created a new serializer, we can be + // sure that the below function will be scheduled. + done := make(chan struct{}) + ccb.serializer.Schedule(func(_ context.Context) { + defer close(done) + + ccb.mu.Lock() + defer ccb.mu.Unlock() + + if ccb.mode != ccbModeIdle { + ccb.balancer.ExitIdle() + return + } + + // Gracefulswitch balancer does not support a switchTo operation after + // being closed. Hence we need to create a new one here. + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) + ccb.mode = ccbModeActive + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") + + }) + ccb.mu.Unlock() + + <-done +} + +func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { + ccb.mu.Lock() + defer ccb.mu.Unlock() + return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if len(addrs) <= 0 { + if ccb.isIdleOrClosed() { + return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") + } + + if len(addrs) == 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } ac, err := ccb.cc.newAddrConn(addrs, opts) @@ -308,32 +300,26 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} - acbw.ac.mu.Lock() + acbw := &acBalancerWrapper{ + ccb: ccb, + ac: ac, + producers: make(map[balancer.ProducerBuilder]*refCountedProducer), + stateListener: opts.StateListener, + } ac.acbw = acbw - acbw.ac.mu.Unlock() return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it - // was required to handle the RemoveSubConn() method asynchronously by pushing - // the update onto the update channel. This was done to avoid a deadlock as - // switchBalancer() was holding cc.mu when calling Close() on the old - // balancer, which would in turn call RemoveSubConn(). - // - // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this - // asynchronously is probably not required anymore since the switchTo() method - // handles the balancer switch by pushing the update onto the channel. - // TODO(easwars): Handle this inline. - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) + // The graceful switch balancer will never call this. + logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + if ccb.isIdleOrClosed() { + return + } + acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -342,6 +328,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + if ccb.isIdleOrClosed() { + return + } + // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is @@ -352,6 +342,10 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + if ccb.isIdleOrClosed() { + return + } + ccb.cc.resolveNow(o) } @@ -362,78 +356,57 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { + ac *addrConn // read-only + ccb *ccBalancerWrapper // read-only + stateListener func(balancer.SubConnState) + mu sync.Mutex - ac *addrConn producers map[balancer.ProducerBuilder]*refCountedProducer } -func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - acbw.mu.Lock() - defer acbw.mu.Unlock() - if len(addrs) <= 0 { - acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) - return - } - if !acbw.ac.tryUpdateAddrs(addrs) { - cc := acbw.ac.cc - opts := acbw.ac.scopts - acbw.ac.mu.Lock() - // Set old ac.acbw to nil so the Shutdown state update will be ignored - // by balancer. - // - // TODO(bar) the state transition could be wrong when tearDown() old ac - // and creating new ac, fix the transition. - acbw.ac.acbw = nil - acbw.ac.mu.Unlock() - acState := acbw.ac.getState() - acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) - - if acState == connectivity.Shutdown { - return - } +func (acbw *acBalancerWrapper) String() string { + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) +} - newAC, err := cc.newAddrConn(addrs, opts) - if err != nil { - channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) - return - } - acbw.ac = newAC - newAC.mu.Lock() - newAC.acbw = acbw - newAC.mu.Unlock() - if acState != connectivity.Idle { - go newAC.connect() - } - } +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.ac.updateAddrs(addrs) } func (acbw *acBalancerWrapper) Connect() { - acbw.mu.Lock() - defer acbw.mu.Unlock() go acbw.ac.connect() } -func (acbw *acBalancerWrapper) getAddrConn() *addrConn { - acbw.mu.Lock() - defer acbw.mu.Unlock() - return acbw.ac -} +func (acbw *acBalancerWrapper) Shutdown() { + ccb := acbw.ccb + if ccb.isIdleOrClosed() { + // It it safe to ignore this call when the balancer is closed or in idle + // because the ClientConn takes care of closing the connections. + // + // Not returning early from here when the balancer is closed or in idle + // leads to a deadlock though, because of the following sequence of + // calls when holding cc.mu: + // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> + // ccb.RemoveAddrConn --> cc.removeAddrConn + return + } -var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected") + ccb.cc.removeAddrConn(acbw.ac, errConnDrain) +} // NewStream begins a streaming RPC on the addrConn. If the addrConn is not -// ready, returns errSubConnNotReady. +// ready, blocks until it is or ctx expires. Returns an error when the context +// expires or the addrConn is shut down. func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport := acbw.ac.getReadyTransport() - if transport == nil { - return nil, errSubConnNotReady + transport, err := acbw.ac.getTransport(ctx) + if err != nil { + return nil, err } return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) } // Invoke performs a unary RPC. If the addrConn is not ready, returns // errSubConnNotReady. -func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index ec2c2fa1..59548011 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index 9e20e4d3..788c89c1 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -26,7 +26,7 @@ import ( // received. This is typically called by generated code. // // All errors returned by Invoke are compatible with the status package. -func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -56,13 +56,13 @@ func combine(o1 []CallOption, o2 []CallOption) []CallOption { // received. This is typically called by generated code. // // DEPRECATED: Use ClientConn.Invoke instead. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { return cc.Invoke(ctx, method, args, reply, opts...) } var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} -func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 3a761424..ff7fea10 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -24,7 +24,6 @@ import ( "fmt" "math" "net/url" - "reflect" "strings" "sync" "sync/atomic" @@ -35,9 +34,12 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/idle" + "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -54,8 +56,6 @@ import ( const ( // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second - // must match grpclbName in grpclb/grpclb.go - grpclbName = "grpclb" ) var ( @@ -69,6 +69,9 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") + // errConnIdling indicates the the connection is being closed as the channel + // is moving to an idle mode due to inactivity. + errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" @@ -134,17 +137,28 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ - target: target, - csMgr: &connectivityStateManager{}, - conns: make(map[*addrConn]struct{}), - dopts: defaultDialOptions(), - blockingpicker: newPickerWrapper(), - czData: new(channelzData), - firstResolveEvent: grpcsync.NewEvent(), + target: target, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + czData: new(channelzData), } + + // We start the channel off in idle mode, but kick it out of idle at the end + // of this method, instead of waiting for the first RPC. Other gRPC + // implementations do wait for the first RPC to kick the channel out of + // idle. But doing so would be a major behavior change for our users who are + // used to seeing the channel active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, if at all we ever get to do that. + cc.idlenessState = ccIdlenessStateIdle + cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) + cc.exitIdleCond = sync.NewCond(&cc.mu) disableGlobalOpts := false for _, opt := range opts { @@ -173,40 +187,13 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - pid := cc.dopts.channelzParentID - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) - ted := &channelz.TraceEventDesc{ - Desc: "Channel created", - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) - cc.csMgr.channelzID = cc.channelzID + // Register ClientConn with channelz. + cc.channelzRegistration(target) - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return nil, errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return nil, errTransportCredsAndBundle - } - if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { - return nil, errNoTransportCredsInBundle - } - transportCreds := cc.dopts.copts.TransportCredentials - if transportCreds == nil { - transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() - } - if transportCreds.Info().SecurityProtocol == "insecure" { - for _, cd := range cc.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return nil, errTransportCredentialsMissing - } - } + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + + if err := cc.validateTransportCredentials(); err != nil { + return nil, err } if cc.dopts.defaultServiceConfigRawJSON != nil { @@ -249,15 +236,12 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } // Determine the resolver to use. - resolverBuilder, err := cc.parseTargetAndFindResolver() - if err != nil { + if err := cc.parseTargetAndFindResolver(); err != nil { return nil, err } - cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts) - if err != nil { + if err = cc.determineAuthority(); err != nil { return nil, err } - channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) if cc.dopts.scChan != nil { // Blocking wait for the initial service config. @@ -275,57 +259,234 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * go cc.scWatcher() } + // This creates the name resolver, load balancer, blocking picker etc. + if err := cc.exitIdleMode(); err != nil { + return nil, err + } + + // Configure idleness support with configured idle timeout or default idle + // timeout duration. Idleness can be explicitly disabled by the user, by + // setting the dial option to 0. + cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger}) + + // Return early for non-blocking dials. + if !cc.dopts.block { + return cc, nil + } + + // A blocking dial blocks until the clientConn is ready. + for { + s := cc.GetState() + if s == connectivity.Idle { + cc.Connect() + } + if s == connectivity.Ready { + return cc, nil + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() + } + } +} + +// addTraceEvent is a helper method to add a trace event on the channel. If the +// channel is a nested one, the same event is also added on the parent channel. +func (cc *ClientConn) addTraceEvent(msg string) { + ted := &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel %s", msg), + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) +} + +type idler ClientConn + +func (i *idler) EnterIdleMode() error { + return (*ClientConn)(i).enterIdleMode() +} + +func (i *idler) ExitIdleMode() error { + return (*ClientConn)(i).exitIdleMode() +} + +// exitIdleMode moves the channel out of idle mode by recreating the name +// resolver and load balancer. +func (cc *ClientConn) exitIdleMode() error { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return errConnClosing + } + if cc.idlenessState != ccIdlenessStateIdle { + cc.mu.Unlock() + channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) + return nil + } + + defer func() { + // When Close() and exitIdleMode() race against each other, one of the + // following two can happen: + // - Close() wins the race and runs first. exitIdleMode() runs after, and + // sees that the ClientConn is already closed and hence returns early. + // - exitIdleMode() wins the race and runs first and recreates the balancer + // and releases the lock before recreating the resolver. If Close() runs + // in this window, it will wait for exitIdleMode to complete. + // + // We achieve this synchronization using the below condition variable. + cc.mu.Lock() + cc.idlenessState = ccIdlenessStateActive + cc.exitIdleCond.Signal() + cc.mu.Unlock() + }() + + cc.idlenessState = ccIdlenessStateExitingIdle + exitedIdle := false + if cc.blockingpicker == nil { + cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers) + } else { + cc.blockingpicker.exitIdleMode() + exitedIdle = true + } + var credsClone credentials.TransportCredentials if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }) + if cc.balancerWrapper == nil { + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }) + } else { + cc.balancerWrapper.exitIdleMode() + } + cc.firstResolveEvent = grpcsync.NewEvent() + cc.mu.Unlock() - // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) - if err != nil { - return nil, fmt.Errorf("failed to build resolver: %v", err) + // This needs to be called without cc.mu because this builds a new resolver + // which might update state or report error inline which needs to be handled + // by cc.updateResolverState() which also grabs cc.mu. + if err := cc.initResolverWrapper(credsClone); err != nil { + return err + } + + if exitedIdle { + cc.addTraceEvent("exiting idle mode") } + return nil +} + +// enterIdleMode puts the channel in idle mode, and as part of it shuts down the +// name resolver, load balancer and any subchannels. +func (cc *ClientConn) enterIdleMode() error { cc.mu.Lock() - cc.resolverWrapper = rWrapper + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + if cc.idlenessState != ccIdlenessStateActive { + channelz.Errorf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) + cc.mu.Unlock() + return nil + } + + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + conns := cc.conns + cc.conns = make(map[*addrConn]struct{}) + + // TODO: Currently, we close the resolver wrapper upon entering idle mode + // and create a new one upon exiting idle mode. This means that the + // `cc.resolverWrapper` field would be overwritten everytime we exit idle + // mode. While this means that we need to hold `cc.mu` when accessing + // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should + // try to do the same for the balancer and picker wrappers too. + cc.resolverWrapper.close() + cc.blockingpicker.enterIdleMode() + cc.balancerWrapper.enterIdleMode() + cc.csMgr.updateState(connectivity.Idle) + cc.idlenessState = ccIdlenessStateIdle cc.mu.Unlock() - // A blocking dial blocks until the clientConn is ready. - if cc.dopts.block { - for { - cc.Connect() - s := cc.GetState() - if s == connectivity.Ready { - break - } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.connectionError(); err != nil { - terr, ok := err.(interface { - Temporary() bool - }) - if ok && !terr.Temporary() { - return nil, err - } - } - } - if !cc.WaitForStateChange(ctx, s) { - // ctx got timeout or canceled. - if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { - return nil, err - } - return nil, ctx.Err() + go func() { + cc.addTraceEvent("entering idle mode") + for ac := range conns { + ac.tearDown(errConnIdling) + } + }() + return nil +} + +// validateTransportCredentials performs a series of checks on the configured +// transport credentials. It returns a non-nil error if any of these conditions +// are met: +// - no transport creds and no creds bundle is configured +// - both transport creds and creds bundle are configured +// - creds bundle is configured, but it lacks a transport credentials +// - insecure transport creds configured alongside call creds that require +// transport level security +// +// If none of the above conditions are met, the configured credentials are +// deemed valid and a nil error is returned. +func (cc *ClientConn) validateTransportCredentials() error { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return errTransportCredentialsMissing } } } + return nil +} - return cc, nil +// channelzRegistration registers the newly created ClientConn with channelz and +// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. +// A channelz trace event is emitted for ClientConn creation. If the newly +// created ClientConn is a nested one, i.e a valid parent ClientConn ID is +// specified via a dial option, the trace event is also added to the parent. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) channelzRegistration(target string) { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + cc.addTraceEvent("created") } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -342,7 +503,7 @@ func chainUnaryClientInterceptors(cc *ClientConn) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) } } @@ -354,7 +515,7 @@ func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, final if curr == len(interceptors)-1 { return finalInvoker } - return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) } } @@ -390,13 +551,27 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr } } +// newConnectivityStateManager creates an connectivityStateManager with +// the specified id. +func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { + return &connectivityStateManager{ + channelzID: id, + pubSub: grpcsync.NewPubSub(ctx), + } +} + // connectivityStateManager keeps the connectivity.State of ClientConn. // This struct will eventually be exported so the balancers can access it. +// +// TODO: If possible, get rid of the `connectivityStateManager` type, and +// provide this functionality using the `PubSub`, to avoid keeping track of +// the connectivity state at two places. type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} channelzID *channelz.Identifier + pubSub *grpcsync.PubSub } // updateState updates the connectivity.State of ClientConn. @@ -412,6 +587,8 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state + csm.pubSub.Publish(state) + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. @@ -441,7 +618,7 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { type ClientConnInterface interface { // Invoke performs a unary RPC and returns after the response is received // into reply. - Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error // NewStream begins a streaming RPC. NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) } @@ -471,7 +648,9 @@ type ClientConn struct { authority string // See determineAuthority(). dopts dialOptions // Default and user specified dial options. channelzID *channelz.Identifier // Channelz identifier for the channel. + resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + idlenessMgr idle.Manager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -492,11 +671,44 @@ type ClientConn struct { sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + idlenessState ccIdlenessState // Tracks idleness state of the channel. + exitIdleCond *sync.Cond // Signalled when channel exits idle. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } +// ccIdlenessState tracks the idleness state of the channel. +// +// Channels start off in `active` and move to `idle` after a period of +// inactivity. When moving back to `active` upon an incoming RPC, they +// transition through `exiting_idle`. This state is useful for synchronization +// with Close(). +// +// This state tracking is mostly for self-protection. The idlenessManager is +// expected to keep track of the state as well, and is expected not to call into +// the ClientConn unnecessarily. +type ccIdlenessState int8 + +const ( + ccIdlenessStateActive ccIdlenessState = iota + ccIdlenessStateIdle + ccIdlenessStateExitingIdle +) + +func (s ccIdlenessState) String() string { + switch s { + case ccIdlenessStateActive: + return "active" + case ccIdlenessStateIdle: + return "idle" + case ccIdlenessStateExitingIdle: + return "exitingIdle" + default: + return "unknown" + } +} + // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -536,7 +748,10 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.balancerWrapper.exitIdle() + cc.exitIdleMode() + // If the ClientConn was not in idle mode, we need to call ExitIdle on the + // LB policy so that connections can be created. + cc.balancerWrapper.exitIdleMode() } func (cc *ClientConn) scWatcher() { @@ -585,6 +800,10 @@ func init() { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } emptyServiceConfig = cfg.Config.(*ServiceConfig) + + internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { + return cc.csMgr.pubSub.Subscribe(s) + } } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { @@ -693,6 +912,20 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi cc.balancerWrapper.updateSubConnState(sc, s, err) } +// Makes a copy of the input addresses slice and clears out the balancer +// attributes field. Addresses are passed during subconn creation and address +// update operations. In both cases, we will clear the balancer attributes by +// calling this function, and therefore we will be able to use the Equal method +// provided by the resolver.Address type for comparison. +func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { + out := make([]resolver.Address, len(in)) + for i := range in { + out[i] = in[i] + out[i].BalancerAttributes = nil + } + return out +} + // newAddrConn creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. @@ -700,11 +933,12 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub ac := &addrConn{ state: connectivity.Idle, cc: cc, - addrs: addrs, + addrs: copyAddressesWithoutBalancerAttributes(addrs), scopts: opts, dopts: cc.dopts, czData: new(channelzData), resetBackoff: make(chan struct{}), + stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. @@ -798,9 +1032,6 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } - // Update connectivity state within the lock to prevent subsequent or - // concurrent calls from resetting the transport more than once. - ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() ac.resetTransport() @@ -819,58 +1050,63 @@ func equalAddresses(a, b []resolver.Address) bool { return true } -// tryUpdateAddrs tries to update ac.addrs with the new addresses list. -// -// If ac is TransientFailure, it updates ac.addrs and returns true. The updated -// addresses will be picked up by retry in the next iteration after backoff. -// -// If ac is Shutdown or Idle, it updates ac.addrs and returns true. -// -// If the addresses is the same as the old list, it does nothing and returns -// true. -// -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// -// If ac is Ready, it checks whether current connected address of ac is in the -// new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. -func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { +// updateAddrs updates ac.addrs with the new addresses list and handles active +// connections or connection attempts. +func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.mu.Lock() - defer ac.mu.Unlock() - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) + + addrs = copyAddressesWithoutBalancerAttributes(addrs) + if equalAddresses(ac.addrs, addrs) { + ac.mu.Unlock() + return + } + + ac.addrs = addrs + if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { - ac.addrs = addrs - return true + // We were not connecting, so do nothing but update the addresses. + ac.mu.Unlock() + return } - if equalAddresses(ac.addrs, addrs) { - return true + if ac.state == connectivity.Ready { + // Try to find the connected address. + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if a.Equal(ac.curAddr) { + // We are connected to a valid address, so do nothing but + // update the addresses. + ac.mu.Unlock() + return + } + } } - if ac.state == connectivity.Connecting { - return false - } + // We are either connected to the wrong address or currently connecting. + // Stop the current iteration and restart. - // ac.state is Ready, try to find the connected address. - var curAddrFound bool - for _, a := range addrs { - a.ServerName = ac.cc.getServerName(a) - if reflect.DeepEqual(ac.curAddr, a) { - curAddrFound = true - break - } + ac.cancel() + ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) + + // We have to defer here because GracefulClose => onClose, which requires + // locking ac.mu. + if ac.transport != nil { + defer ac.transport.GracefulClose() + ac.transport = nil } - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) - if curAddrFound { - ac.addrs = addrs + + if len(addrs) == 0 { + ac.updateConnectivityState(connectivity.Idle, nil) } - return curAddrFound + ac.mu.Unlock() + + // Since we were connecting/connected, we should start a new connection + // attempt. + go ac.resetTransport() } // getServerName determines the serverName to be used in the connection @@ -961,23 +1197,13 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel } var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { + // No service config or no LB policy specified in config. + newBalancerName = PickFirstBalancerName + } else if cc.sc.lbConfig != nil { newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName - } + } else { // cc.sc.LB != nil + newBalancerName = *cc.sc.LB } cc.balancerWrapper.switchTo(newBalancerName) } @@ -1016,46 +1242,50 @@ func (cc *ClientConn) ResetConnectBackoff() { // Close tears down the ClientConn and all underlying connections. func (cc *ClientConn) Close() error { - defer cc.cancel() + defer func() { + cc.cancel() + <-cc.csMgr.pubSub.Done() + }() cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return ErrClientConnClosing } + + for cc.idlenessState == ccIdlenessStateExitingIdle { + cc.exitIdleCond.Wait() + } + conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) + pWrapper := cc.blockingpicker rWrapper := cc.resolverWrapper - cc.resolverWrapper = nil bWrapper := cc.balancerWrapper + idlenessMgr := cc.idlenessMgr cc.mu.Unlock() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - cc.blockingpicker.close() + if pWrapper != nil { + pWrapper.close() + } if bWrapper != nil { bWrapper.close() } if rWrapper != nil { rWrapper.close() } + if idlenessMgr != nil { + idlenessMgr.Close() + } for ac := range conns { ac.tearDown(ErrClientConnClosing) } - ted := &channelz.TraceEventDesc{ - Desc: "Channel deleted", - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + cc.addTraceEvent("deleted") // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being // deleted right away. @@ -1085,7 +1315,8 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State + state connectivity.State + stateChan chan struct{} // closed and recreated on every state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1099,6 +1330,9 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } + // When changing states, reset the state change channel. + close(ac.stateChan) + ac.stateChan = make(chan struct{}) ac.state = s if lastErr == nil { channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) @@ -1124,7 +1358,8 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { func (ac *addrConn) resetTransport() { ac.mu.Lock() - if ac.state == connectivity.Shutdown { + acCtx := ac.ctx + if acCtx.Err() != nil { ac.mu.Unlock() return } @@ -1152,15 +1387,16 @@ func (ac *addrConn) resetTransport() { ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() - if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil { + if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { ac.cc.resolveNow(resolver.ResolveNowOptions{}) - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. ac.mu.Lock() - if ac.state == connectivity.Shutdown { + if acCtx.Err() != nil { + // addrConn was torn down. ac.mu.Unlock() return } + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. @@ -1175,13 +1411,13 @@ func (ac *addrConn) resetTransport() { ac.mu.Unlock() case <-b: timer.Stop() - case <-ac.ctx.Done(): + case <-acCtx.Done(): timer.Stop() return } ac.mu.Lock() - if ac.state != connectivity.Shutdown { + if acCtx.Err() == nil { ac.updateConnectivityState(connectivity.Idle, err) } ac.mu.Unlock() @@ -1196,14 +1432,13 @@ func (ac *addrConn) resetTransport() { // tryAllAddrs tries to creates a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error { +func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() + if ctx.Err() != nil { return errConnClosing } + ac.mu.Lock() ac.cc.mu.RLock() ac.dopts.copts.KeepaliveParams = ac.cc.mkp @@ -1217,7 +1452,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - err := ac.createTransport(addr, copts, connectDeadline) + err := ac.createTransport(ctx, addr, copts, connectDeadline) if err == nil { return nil } @@ -1234,19 +1469,20 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T // createTransport creates a connection to addr. It returns an error if the // address was not successfully connected, or updates ac appropriately with the // new transport. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { +func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { addr.ServerName = ac.cc.getServerName(addr) - hctx, hcancel := context.WithCancel(ac.ctx) + hctx, hcancel := context.WithCancel(ctx) onClose := func(r transport.GoAwayReason) { ac.mu.Lock() defer ac.mu.Unlock() // adjust params based on GoAwayReason ac.adjustParams(r) - if ac.state == connectivity.Shutdown { - // Already shut down. tearDown() already cleared the transport and - // canceled hctx via ac.ctx, and we expected this connection to be - // closed, so do nothing here. + if ctx.Err() != nil { + // Already shut down or connection attempt canceled. tearDown() or + // updateAddrs() already cleared the transport and canceled hctx + // via ac.ctx, and we expected this connection to be closed, so do + // nothing here. return } hcancel() @@ -1265,7 +1501,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.updateConnectivityState(connectivity.Idle, nil) } - connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) defer cancel() copts.ChannelzParentID = ac.channelzID @@ -1282,7 +1518,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.mu.Lock() defer ac.mu.Unlock() - if ac.state == connectivity.Shutdown { + if ctx.Err() != nil { // This can happen if the subConn was removed while in `Connecting` // state. tearDown() would have set the state to `Shutdown`, but // would not have closed the transport since ac.transport would not @@ -1294,6 +1530,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // The error we pass to Close() is immaterial since there are no open // streams at this point, so no trailers with error details will be sent // out. We just need to pass a non-nil error. + // + // This can also happen when updateAddrs is called during a connection + // attempt. go newTr.Close(transport.ErrConnClosing) return nil } @@ -1353,7 +1592,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // Set up the health check helper functions. currentTr := ac.transport - newStream := func(method string) (interface{}, error) { + newStream := func(method string) (any, error) { ac.mu.Lock() if ac.transport != currentTr { ac.mu.Unlock() @@ -1401,6 +1640,29 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } +// getTransport waits until the addrconn is ready and returns the transport. +// If the context expires first, returns an appropriate status. If the +// addrConn is stopped first, returns an Unavailable status error. +func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { + for ctx.Err() == nil { + ac.mu.Lock() + t, state, sc := ac.transport, ac.state, ac.stateChan + ac.mu.Unlock() + if state == connectivity.Ready { + return t, nil + } + if state == connectivity.Shutdown { + return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") + } + + select { + case <-ctx.Done(): + case <-sc: + } + } + return nil, status.FromContextError(ctx.Err()).Err() +} + // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct @@ -1418,16 +1680,7 @@ func (ac *addrConn) tearDown(err error) { ac.updateConnectivityState(connectivity.Shutdown, nil) ac.cancel() ac.curAddr = resolver.Address{} - if err == errConnDrain && curTr != nil { - // GracefulClose(...) may be executed multiple times when - // i) receiving multiple GoAway frames from the server; or - // ii) there are concurrent name resolver/Balancer triggered - // address removal and GoAway. - // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. - ac.mu.Unlock() - curTr.GracefulClose() - ac.mu.Lock() - } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel deleted", Severity: channelz.CtInfo, @@ -1441,6 +1694,29 @@ func (ac *addrConn) tearDown(err error) { // being deleted right away. channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() + + // We have to release the lock before the call to GracefulClose/Close here + // because both of them call onClose(), which requires locking ac.mu. + if curTr != nil { + if err == errConnDrain { + // Close the transport gracefully when the subConn is being shutdown. + // + // GracefulClose() may be executed multiple times if: + // - multiple GoAway frames are received from the server + // - there are concurrent name resolver or balancer triggered + // address removal and GoAway + curTr.GracefulClose() + } else { + // Hard close the transport when the channel is entering idle or is + // being shutdown. In the case where the channel is being shutdown, + // closing of transports is also taken care of by cancelation of cc.ctx. + // But in the case where the channel is entering idle, we need to + // explicitly close the transports here. Instead of distinguishing + // between these two cases, it is simpler to close the transport + // unconditionally here. + curTr.Close(err) + } + } } func (ac *addrConn) getState() connectivity.State { @@ -1552,7 +1828,14 @@ func (cc *ClientConn) connectionError() error { return cc.lastConnectionError } -func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { +// parseTargetAndFindResolver parses the user's dial target and stores the +// parsed target in `cc.parsedTarget`. +// +// The resolver to use is determined based on the scheme in the parsed target +// and the same is stored in `cc.resolverBuilder`. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) parseTargetAndFindResolver() error { channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) var rb resolver.Builder @@ -1564,7 +1847,8 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget - return rb, nil + cc.resolverBuilder = rb + return nil } } @@ -1579,38 +1863,98 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { parsedTarget, err = parseTarget(canonicalTarget) if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) - return nil, err + return err } channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) + return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) } cc.parsedTarget = parsedTarget - return rb, nil + cc.resolverBuilder = rb + return nil } // parseTarget uses RFC 3986 semantics to parse the given target into a -// resolver.Target struct containing scheme, authority and url. Query -// params are stripped from the endpoint. +// resolver.Target struct containing url. Query params are stripped from the +// endpoint. func parseTarget(target string) (resolver.Target, error) { u, err := url.Parse(target) if err != nil { return resolver.Target{}, err } - return resolver.Target{ - Scheme: u.Scheme, - Authority: u.Host, - URL: *u, - }, nil + return resolver.Target{URL: *u}, nil +} + +func encodeAuthority(authority string) string { + const upperhex = "0123456789ABCDEF" + + // Return for characters that must be escaped as per + // Valid chars are mentioned here: + // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 + shouldEscape := func(c byte) bool { + // Alphanum are always allowed. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '.', '~': // Unreserved characters + return false + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters + return false + case ':', '[', ']', '@': // Authority related delimeters + return false + } + // Everything else must be escaped. + return true + } + + hexCount := 0 + for i := 0; i < len(authority); i++ { + c := authority[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return authority + } + + required := len(authority) + 2*hexCount + t := make([]byte, required) + + j := 0 + // This logic is a barebones version of escape in the go net/url library. + for i := 0; i < len(authority); i++ { + switch c := authority[i]; { + case shouldEscape(c): + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + default: + t[j] = authority[i] + j++ + } + } + return string(t) } // Determine channel authority. The order of precedence is as follows: // - user specified authority override using `WithAuthority` dial option // - creds' notion of server name for the authentication handshake // - endpoint from dial target of the form "scheme://[authority]/endpoint" -func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) { +// +// Stores the determined authority in `cc.authority`. +// +// Returns a non-nil error if the authority returned by the transport +// credentials do not match the authority configured through the dial option. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) determineAuthority() error { + dopts := cc.dopts // Historically, we had two options for users to specify the serverName or // authority for a channel. One was through the transport credentials // (either in its constructor, or through the OverrideServerName() method). @@ -1627,25 +1971,62 @@ func determineAuthority(endpoint, target string, dopts dialOptions) (string, err } authorityFromDialOption := dopts.authority if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { - return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) } + endpoint := cc.parsedTarget.Endpoint() + target := cc.target switch { case authorityFromDialOption != "": - return authorityFromDialOption, nil + cc.authority = authorityFromDialOption case authorityFromCreds != "": - return authorityFromCreds, nil + cc.authority = authorityFromCreds case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): // TODO: remove when the unix resolver implements optional interface to // return channel authority. - return "localhost", nil + cc.authority = "localhost" case strings.HasPrefix(endpoint, ":"): - return "localhost" + endpoint, nil + cc.authority = "localhost" + endpoint default: // TODO: Define an optional interface on the resolver builder to return // the channel authority given the user's dial target. For resolvers // which don't implement this interface, we will use the endpoint from // "scheme://authority/endpoint" as the default authority. - return endpoint, nil + // Escape the endpoint to handle use cases where the endpoint + // might not be a valid authority by default. + // For example an endpoint which has multiple paths like + // 'a/b/c', which is not a valid authority by default. + cc.authority = encodeAuthority(endpoint) } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + return nil +} + +// initResolverWrapper creates a ccResolverWrapper, which builds the name +// resolver. This method grabs the lock to assign the newly built resolver +// wrapper to the cc.resolverWrapper field. +func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { + rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ + target: cc.parsedTarget, + builder: cc.resolverBuilder, + bOpts: resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: creds, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + }, + channelzID: cc.channelzID, + }) + if err != nil { + return fmt.Errorf("failed to build resolver: %v", err) + } + // Resolver implementations may report state update or error inline when + // built (or right after), and this is handled in cc.updateResolverState. + // Also, an error from the resolver might lead to a re-resolution request + // from the balancer, which is handled in resolveNow() where + // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. + cc.mu.Lock() + cc.resolverWrapper = rw + cc.mu.Unlock() + return nil } diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 12977654..411e3dfd 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -27,8 +27,8 @@ import ( // omits the name/string, which vary between the two and are not needed for // anything besides the registry in the encoding package. type baseCodec interface { - Marshal(v interface{}) ([]byte, error) - Unmarshal(data []byte, v interface{}) error + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error } var _ baseCodec = Codec(nil) @@ -41,9 +41,9 @@ var _ baseCodec = encoding.Codec(nil) // Deprecated: use encoding.Codec instead. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // String returns the name of the Codec implementation. This is unused by // gRPC. String() string diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index cdc8263b..1fd0d5c1 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -77,6 +77,8 @@ type dialOptions struct { defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string resolvers []resolver.Builder + idleTimeout time.Duration + recvBufferPool SharedBufferPool } // DialOption configures how we set up the connection. @@ -137,6 +139,20 @@ func newJoinDialOption(opts ...DialOption) DialOption { return &joinDialOption{opts: opts} } +// WithSharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithSharedWriteBuffer(val bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.SharedWriteBuffer = val + }) +} + // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is @@ -627,6 +643,7 @@ func defaultDialOptions() dialOptions { ReadBufferSize: defaultReadBufSize, UseProxy: true, }, + recvBufferPool: nopBufferPool{}, } } @@ -655,3 +672,44 @@ func WithResolvers(rs ...resolver.Builder) DialOption { o.resolvers = append(o.resolvers, rs...) }) } + +// WithIdleTimeout returns a DialOption that configures an idle timeout for the +// channel. If the channel is idle for the configured timeout, i.e there are no +// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode +// and as a result the name resolver and load balancer will be shut down. The +// channel will exit idle mode when the Connect() method is called or when an +// RPC is initiated. +// +// By default this feature is disabled, which can also be explicitly configured +// by passing zero to this function. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithIdleTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.idleTimeout = d + }) +} + +// WithRecvBufferPool returns a DialOption that configures the ClientConn +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: WithStatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.recvBufferPool = bufferPool + }) +} diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 07a58613..69d5580b 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -90,9 +90,9 @@ func GetCompressor(name string) Compressor { // methods can be called from concurrent goroutines. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // Name returns the name of the Codec implementation. The returned string // will be used as part of content type in transmission. The result must be // static; the result cannot change between calls. diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 3009b35a..0ee3d3ba 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -37,7 +37,7 @@ func init() { // codec is a Codec implementation with protobuf. It is the default codec for gRPC. type codec struct{} -func (codec) Marshal(v interface{}) ([]byte, error) { +func (codec) Marshal(v any) ([]byte, error) { vv, ok := v.(proto.Message) if !ok { return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) @@ -45,7 +45,7 @@ func (codec) Marshal(v interface{}) ([]byte, error) { return proto.Marshal(vv) } -func (codec) Unmarshal(data []byte, v interface{}) error { +func (codec) Unmarshal(data []byte, v any) error { vv, ok := v.(proto.Message) if !ok { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index 8358dd6e..ac73c9ce 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -31,71 +31,71 @@ type componentData struct { var cache = map[string]*componentData{} -func (c *componentData) InfoDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) InfoDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.InfoDepth(depth+1, args...) } -func (c *componentData) WarningDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) WarningDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.WarningDepth(depth+1, args...) } -func (c *componentData) ErrorDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) ErrorDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.ErrorDepth(depth+1, args...) } -func (c *componentData) FatalDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) FatalDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.FatalDepth(depth+1, args...) } -func (c *componentData) Info(args ...interface{}) { +func (c *componentData) Info(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warning(args ...interface{}) { +func (c *componentData) Warning(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Error(args ...interface{}) { +func (c *componentData) Error(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatal(args ...interface{}) { +func (c *componentData) Fatal(args ...any) { c.FatalDepth(1, args...) } -func (c *componentData) Infof(format string, args ...interface{}) { +func (c *componentData) Infof(format string, args ...any) { c.InfoDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Warningf(format string, args ...interface{}) { +func (c *componentData) Warningf(format string, args ...any) { c.WarningDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Errorf(format string, args ...interface{}) { +func (c *componentData) Errorf(format string, args ...any) { c.ErrorDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Fatalf(format string, args ...interface{}) { +func (c *componentData) Fatalf(format string, args ...any) { c.FatalDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Infoln(args ...interface{}) { +func (c *componentData) Infoln(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warningln(args ...interface{}) { +func (c *componentData) Warningln(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Errorln(args ...interface{}) { +func (c *componentData) Errorln(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatalln(args ...interface{}) { +func (c *componentData) Fatalln(args ...any) { c.FatalDepth(1, args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index c8bb2be3..16928c9c 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -42,53 +42,53 @@ func V(l int) bool { } // Info logs to the INFO log. -func Info(args ...interface{}) { +func Info(args ...any) { grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. -func Infof(format string, args ...interface{}) { +func Infof(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. -func Infoln(args ...interface{}) { +func Infoln(args ...any) { grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. -func Warning(args ...interface{}) { +func Warning(args ...any) { grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. -func Warningf(format string, args ...interface{}) { +func Warningf(format string, args ...any) { grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. -func Warningln(args ...interface{}) { +func Warningln(args ...any) { grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. -func Error(args ...interface{}) { +func Error(args ...any) { grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. -func Errorf(format string, args ...interface{}) { +func Errorf(format string, args ...any) { grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. -func Errorln(args ...interface{}) { +func Errorln(args ...any) { grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. -func Fatal(args ...interface{}) { +func Fatal(args ...any) { grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -96,7 +96,7 @@ func Fatal(args ...interface{}) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. -func Fatalf(format string, args ...interface{}) { +func Fatalf(format string, args ...any) { grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) @@ -104,7 +104,7 @@ func Fatalf(format string, args ...interface{}) { // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // It calle os.Exit()) with exit code 1. -func Fatalln(args ...interface{}) { +func Fatalln(args ...any) { grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -113,20 +113,20 @@ func Fatalln(args ...interface{}) { // Print prints to the logger. Arguments are handled in the manner of fmt.Print. // // Deprecated: use Info. -func Print(args ...interface{}) { +func Print(args ...any) { grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. -func Printf(format string, args ...interface{}) { +func Printf(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. -func Println(args ...interface{}) { +func Println(args ...any) { grpclog.Logger.Infoln(args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index ef06a482..b1674d82 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -24,12 +24,12 @@ import "google.golang.org/grpc/internal/grpclog" // // Deprecated: use LoggerV2. type Logger interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) } // SetLogger sets the logger that is used in grpc. Call only from @@ -45,39 +45,39 @@ type loggerWrapper struct { Logger } -func (g *loggerWrapper) Info(args ...interface{}) { +func (g *loggerWrapper) Info(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Infoln(args ...interface{}) { +func (g *loggerWrapper) Infoln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Infof(format string, args ...interface{}) { +func (g *loggerWrapper) Infof(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Warning(args ...interface{}) { +func (g *loggerWrapper) Warning(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Warningln(args ...interface{}) { +func (g *loggerWrapper) Warningln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Warningf(format string, args ...interface{}) { +func (g *loggerWrapper) Warningf(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Error(args ...interface{}) { +func (g *loggerWrapper) Error(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Errorln(args ...interface{}) { +func (g *loggerWrapper) Errorln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Errorf(format string, args ...interface{}) { +func (g *loggerWrapper) Errorf(format string, args ...any) { g.Logger.Printf(format, args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 5de66e40..ecfd36d7 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -33,35 +33,35 @@ import ( // LoggerV2 does underlying logging work for grpclog. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -182,53 +182,53 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } -func (g *loggerT) Info(args ...interface{}) { +func (g *loggerT) Info(args ...any) { g.output(infoLog, fmt.Sprint(args...)) } -func (g *loggerT) Infoln(args ...interface{}) { +func (g *loggerT) Infoln(args ...any) { g.output(infoLog, fmt.Sprintln(args...)) } -func (g *loggerT) Infof(format string, args ...interface{}) { +func (g *loggerT) Infof(format string, args ...any) { g.output(infoLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Warning(args ...interface{}) { +func (g *loggerT) Warning(args ...any) { g.output(warningLog, fmt.Sprint(args...)) } -func (g *loggerT) Warningln(args ...interface{}) { +func (g *loggerT) Warningln(args ...any) { g.output(warningLog, fmt.Sprintln(args...)) } -func (g *loggerT) Warningf(format string, args ...interface{}) { +func (g *loggerT) Warningf(format string, args ...any) { g.output(warningLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Error(args ...interface{}) { +func (g *loggerT) Error(args ...any) { g.output(errorLog, fmt.Sprint(args...)) } -func (g *loggerT) Errorln(args ...interface{}) { +func (g *loggerT) Errorln(args ...any) { g.output(errorLog, fmt.Sprintln(args...)) } -func (g *loggerT) Errorf(format string, args ...interface{}) { +func (g *loggerT) Errorf(format string, args ...any) { g.output(errorLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Fatal(args ...interface{}) { +func (g *loggerT) Fatal(args ...any) { g.output(fatalLog, fmt.Sprint(args...)) os.Exit(1) } -func (g *loggerT) Fatalln(args ...interface{}) { +func (g *loggerT) Fatalln(args ...any) { g.output(fatalLog, fmt.Sprintln(args...)) os.Exit(1) } -func (g *loggerT) Fatalf(format string, args ...interface{}) { +func (g *loggerT) Fatalf(format string, args ...any) { g.output(fatalLog, fmt.Sprintf(format, args...)) os.Exit(1) } @@ -248,11 +248,11 @@ func (g *loggerT) V(l int) bool { type DepthLoggerV2 interface { LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 142d35f7..24299efd 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index bb96ef57..877d78fc 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -23,7 +23,7 @@ import ( ) // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. -type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error +type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. // Unary interceptors can be specified as a DialOption, using @@ -40,7 +40,7 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{ // defaults from the ClientConn as well as per-call options. // // The returned error must be compatible with the status package. -type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) @@ -66,7 +66,7 @@ type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *Cli // server side. All per-rpc information may be mutated by the interceptor. type UnaryServerInfo struct { // Server is the service implementation the user provides. This is read-only. - Server interface{} + Server any // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string } @@ -78,13 +78,13 @@ type UnaryServerInfo struct { // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) +type UnaryHandler func(ctx context.Context, req any) (any, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info // contains all the information of this RPC the interceptor can operate on. And handler is the wrapper // of the service method implementation. It is the responsibility of the interceptor to invoke handler // to complete the RPC. -type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) +type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) // StreamServerInfo consists of various information about a streaming RPC on // server side. All per-rpc information may be mutated by the interceptor. @@ -101,4 +101,4 @@ type StreamServerInfo struct { // info contains all the information of this RPC the interceptor can operate on. And handler is the // service method implementation. It is the responsibility of the interceptor to invoke handler to // complete the RPC. -type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error +type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 08666f62..3c594e6e 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -200,8 +200,8 @@ func (gsb *Balancer) ExitIdle() { } } -// UpdateSubConnState forwards the update to the appropriate child. -func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +// updateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { gsb.currentMu.Lock() defer gsb.currentMu.Unlock() gsb.mu.Lock() @@ -214,13 +214,26 @@ func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubC } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { balToUpdate = gsb.balancerPending } - gsb.mu.Unlock() if balToUpdate == nil { // SubConn belonged to a stale lb policy that has not yet fully closed, // or the balancer was already closed. + gsb.mu.Unlock() return } - balToUpdate.UpdateSubConnState(sc, state) + if state.ConnectivityState == connectivity.Shutdown { + delete(balToUpdate.subconns, sc) + } + gsb.mu.Unlock() + if cb != nil { + cb(state) + } else { + balToUpdate.UpdateSubConnState(sc, state) + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.updateSubConnState(sc, state, nil) } // Close closes any active child balancers. @@ -242,7 +255,7 @@ func (gsb *Balancer) Close() { // // It implements the balancer.ClientConn interface and is passed down in that // capacity to the wrapped balancer. It maintains a set of subConns created by -// the wrapped balancer and calls from the latter to create/update/remove +// the wrapped balancer and calls from the latter to create/update/shutdown // SubConns update this set before being forwarded to the parent ClientConn. // State updates from the wrapped balancer can result in invocation of the // graceful switch logic. @@ -254,21 +267,10 @@ type balancerWrapper struct { subconns map[balancer.SubConn]bool // subconns created by this balancer } -func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - if state.ConnectivityState == connectivity.Shutdown { - bw.gsb.mu.Lock() - delete(bw.subconns, sc) - bw.gsb.mu.Unlock() - } - // There is no need to protect this read with a mutex, as the write to the - // Balancer field happens in SwitchTo, which completes before this can be - // called. - bw.Balancer.UpdateSubConnState(sc, state) -} - -// Close closes the underlying LB policy and removes the subconns it created. bw -// must not be referenced via balancerCurrent or balancerPending in gsb when -// called. gsb.mu must not be held. Does not panic with a nil receiver. +// Close closes the underlying LB policy and shuts down the subconns it +// created. bw must not be referenced via balancerCurrent or balancerPending in +// gsb when called. gsb.mu must not be held. Does not panic with a nil +// receiver. func (bw *balancerWrapper) Close() { // before Close is called. if bw == nil { @@ -281,7 +283,7 @@ func (bw *balancerWrapper) Close() { bw.Balancer.Close() bw.gsb.mu.Lock() for sc := range bw.subconns { - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() } bw.gsb.mu.Unlock() } @@ -335,13 +337,16 @@ func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.Ne } bw.gsb.mu.Unlock() + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } sc, err := bw.gsb.cc.NewSubConn(addrs, opts) if err != nil { return nil, err } bw.gsb.mu.Lock() if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() bw.gsb.mu.Unlock() return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) } @@ -360,13 +365,9 @@ func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { } func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { - bw.gsb.mu.Lock() - if !bw.gsb.balancerCurrentOrPending(bw) { - bw.gsb.mu.Unlock() - return - } - bw.gsb.mu.Unlock() - bw.gsb.cc.RemoveSubConn(sc) + // Note: existing third party balancers may call this, so it must remain + // until RemoveSubConn is fully removed. + sc.Shutdown() } func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go index 3a905d96..94a08d68 100644 --- a/vendor/google.golang.org/grpc/internal/balancerload/load.go +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -25,7 +25,7 @@ import ( // Parser converts loads from metadata into a concrete type. type Parser interface { // Parse parses loads from metadata. - Parse(md metadata.MD) interface{} + Parse(md metadata.MD) any } var parser Parser @@ -38,7 +38,7 @@ func SetParser(lr Parser) { } // Parse calls parser.Read(). -func Parse(md metadata.MD) interface{} { +func Parse(md metadata.MD) any { if parser == nil { return nil } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index af03a40d..755fdebc 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -32,6 +32,9 @@ var grpclogLogger = grpclog.Component("binarylog") // Logger specifies MethodLoggers for method names with a Log call that // takes a context. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type Logger interface { GetMethodLogger(methodName string) MethodLogger } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 56fcf008..0f31274a 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -49,6 +49,9 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type MethodLogger interface { Log(context.Context, LogEntryConfig) } @@ -65,6 +68,9 @@ type TruncatingMethodLogger struct { } // NewTruncatingMethodLogger returns a new truncating method logger. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { return &TruncatingMethodLogger{ headerMaxLen: h, @@ -145,6 +151,9 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (trun } // LogEntryConfig represents the configuration for binary log entry. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type LogEntryConfig interface { toProto() *binlogpb.GrpcLogEntry } @@ -221,7 +230,7 @@ type ClientMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { @@ -261,7 +270,7 @@ type ServerMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 9f6a0c12..4399c3df 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -28,35 +28,38 @@ import "sync" // the underlying mutex used for synchronization. // // Unbounded supports values of any type to be stored in it by using a channel -// of `interface{}`. This means that a call to Put() incurs an extra memory -// allocation, and also that users need a type assertion while reading. For -// performance critical code paths, using Unbounded is strongly discouraged and -// defining a new type specific implementation of this buffer is preferred. See +// of `any`. This means that a call to Put() incurs an extra memory allocation, +// and also that users need a type assertion while reading. For performance +// critical code paths, using Unbounded is strongly discouraged and defining a +// new type specific implementation of this buffer is preferred. See // internal/transport/transport.go for an example of this. type Unbounded struct { - c chan interface{} + c chan any + closed bool mu sync.Mutex - backlog []interface{} + backlog []any } // NewUnbounded returns a new instance of Unbounded. func NewUnbounded() *Unbounded { - return &Unbounded{c: make(chan interface{}, 1)} + return &Unbounded{c: make(chan any, 1)} } // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t interface{}) { +func (b *Unbounded) Put(t any) { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) == 0 { select { case b.c <- t: - b.mu.Unlock() return default: } } b.backlog = append(b.backlog, t) - b.mu.Unlock() } // Load sends the earliest buffered data, if any, onto the read channel @@ -64,6 +67,10 @@ func (b *Unbounded) Put(t interface{}) { // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -72,7 +79,6 @@ func (b *Unbounded) Load() { default: } } - b.mu.Unlock() } // Get returns a read channel on which values added to the buffer, via Put(), @@ -80,6 +86,20 @@ func (b *Unbounded) Load() { // // Upon reading a value from this channel, users are expected to call Load() to // send the next buffered value onto the channel if there is any. -func (b *Unbounded) Get() <-chan interface{} { +// +// If the unbounded buffer is closed, the read channel returned by this method +// is closed. +func (b *Unbounded) Get() <-chan any { return b.c } + +// Close closes the unbounded buffer. +func (b *Unbounded) Close() { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } + b.closed = true + close(b.c) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 777cbcd7..5395e775 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,9 +24,7 @@ package channelz import ( - "context" "errors" - "fmt" "sort" "sync" "sync/atomic" @@ -40,8 +38,11 @@ const ( ) var ( - db dbWrapper - idGen idGenerator + // IDGen is the global channelz entity ID generator. It should not be used + // outside this package except by tests. + IDGen IDGenerator + + db dbWrapper // EntryPerPage defines the number of channelz entries to be shown on a web page. EntryPerPage = int64(50) curState int32 @@ -52,14 +53,14 @@ var ( func TurnOn() { if !IsOn() { db.set(newChannelMap()) - idGen.reset() + IDGen.Reset() atomic.StoreInt32(&curState, 1) } } // IsOn returns whether channelz data collection is on. func IsOn() bool { - return atomic.CompareAndSwapInt32(&curState, 1, 1) + return atomic.LoadInt32(&curState) == 1 } // SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). @@ -97,43 +98,6 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorageForTesting initializes channelz data storage and id -// generator for testing purposes. -// -// Returns a cleanup function to be invoked by the test, which waits for up to -// 10s for all channelz state to be reset by the grpc goroutines when those -// entities get closed. This cleanup function helps with ensuring that tests -// don't mess up each other. -func NewChannelzStorageForTesting() (cleanup func() error) { - db.set(newChannelMap()) - idGen.reset() - - return func() error { - cm := db.get() - if cm == nil { - return nil - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - ticker := time.NewTicker(10 * time.Millisecond) - defer ticker.Stop() - for { - cm.mu.RLock() - topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) - cm.mu.RUnlock() - - if err := ctx.Err(); err != nil { - return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) - } - if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { - return nil - } - <-ticker.C - } - } -} - // GetTopChannels returns a slice of top channel's ChannelMetric, along with a // boolean indicating whether there's more top channels to be queried for. // @@ -193,7 +157,7 @@ func GetServer(id int64) *ServerMetric { // // If channelz is not turned ON, the channelz database is not mutated. func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() var parent int64 isTopChannel := true if pid != nil { @@ -229,7 +193,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er if pid == nil { return nil, errors.New("a SubChannel's parent id cannot be nil") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefSubChannel, id, pid), nil } @@ -251,7 +215,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er // // If channelz is not turned ON, the channelz database is not mutated. func RegisterServer(s Server, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefServer, id, nil) } @@ -277,7 +241,7 @@ func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a ListenSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefListenSocket, id, pid), nil } @@ -297,7 +261,7 @@ func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a NormalSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefNormalSocket, id, pid), nil } @@ -776,14 +740,17 @@ func (c *channelMap) GetServer(id int64) *ServerMetric { return sm } -type idGenerator struct { +// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. +type IDGenerator struct { id int64 } -func (i *idGenerator) reset() { +// Reset resets the generated ID back to zero. Should only be used at +// initialization or by tests sensitive to the ID number. +func (i *IDGenerator) Reset() { atomic.StoreInt64(&i.id, 0) } -func (i *idGenerator) genID() int64 { +func (i *IDGenerator) genID() int64 { return atomic.AddInt64(&i.id, 1) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index 8e13a3d2..f89e6f77 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -31,7 +31,7 @@ func withParens(id *Identifier) string { } // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtInfo, @@ -39,7 +39,7 @@ func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtInfo, @@ -47,7 +47,7 @@ func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...inter } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtWarning, @@ -55,7 +55,7 @@ func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtWarning, @@ -63,7 +63,7 @@ func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...in } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtError, @@ -71,7 +71,7 @@ func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtError, diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 7b2f350e..1d4020f5 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -628,6 +628,7 @@ type tracedChannel interface { type channelTrace struct { cm *channelMap + clearCalled bool createdTime time.Time eventCount int64 mu sync.Mutex @@ -656,6 +657,10 @@ func (c *channelTrace) append(e *TraceEvent) { } func (c *channelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true c.mu.Lock() for _, e := range c.events { if e.RefID != 0 { diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go index 8d194e44..98288c3f 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -23,7 +23,7 @@ import ( ) // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket interface{}) *SocketOptionData { +func GetSocketOption(socket any) *SocketOptionData { c, ok := socket.(syscall.Conn) if !ok { return nil diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go index 837ddc40..b5568b22 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -22,6 +22,6 @@ package channelz // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c interface{}) *SocketOptionData { +func GetSocketOption(c any) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go index 32c9b590..9deee7f6 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -25,12 +25,12 @@ import ( type requestInfoKey struct{} // NewRequestInfoContext creates a context with ri. -func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { +func NewRequestInfoContext(ctx context.Context, ri any) context.Context { return context.WithValue(ctx, requestInfoKey{}, ri) } // RequestInfoFromContext extracts the RequestInfo from ctx. -func RequestInfoFromContext(ctx context.Context) interface{} { +func RequestInfoFromContext(ctx context.Context) any { return ctx.Value(requestInfoKey{}) } @@ -39,11 +39,11 @@ func RequestInfoFromContext(ctx context.Context) interface{} { type clientHandshakeInfoKey struct{} // ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. -func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { +func ClientHandshakeInfoFromContext(ctx context.Context) any { return ctx.Value(clientHandshakeInfoKey{}) } // NewClientHandshakeInfoContext creates a context with chi. -func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { +func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 5ba9d94d..3cf10ddf 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -36,6 +36,16 @@ var ( // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // PickFirstLBConfig is set if we should support configuration of the + // pick_first LB policy. + PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true) + // LeastRequestLB is set if we should support the least_request_experimental + // LB policy, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". + LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS + // handshakes that can be performed. + ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go index 821dd0a7..dd314cfb 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/observability.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -28,9 +28,15 @@ const ( var ( // ObservabilityConfig is the json configuration for the gcp/observability // package specified directly in the envObservabilityConfig env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ObservabilityConfig = os.Getenv(envObservabilityConfig) // ObservabilityConfigFile is the json configuration for the // gcp/observability specified in a file with the location specified in // envObservabilityConfigFile env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) ) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 3b17705b..02b4b6a1 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -61,11 +61,10 @@ var ( // have a brand new API on the server-side and users explicitly need to use // the new API to get security integration on the server. XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) - // XDSAggregateAndDNS indicates whether processing of aggregated cluster - // and DNS cluster is enabled, which can be enabled by setting the - // environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to - // "true". + // XDSAggregateAndDNS indicates whether processing of aggregated cluster and + // DNS cluster is enabled, which can be disabled by setting the environment + // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + // to "false". XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, @@ -82,11 +81,15 @@ var ( XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be enabled by + // support for the RLS CLuster Specifier is enabled, which can be disabled by // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "true". - XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false) + // "false". + XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which + // can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". + XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index b68e26a3..bfc45102 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -30,7 +30,7 @@ var Logger LoggerV2 var DepthLogger DepthLoggerV2 // InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...interface{}) { +func InfoDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.InfoDepth(depth, args...) } else { @@ -39,7 +39,7 @@ func InfoDepth(depth int, args ...interface{}) { } // WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...interface{}) { +func WarningDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.WarningDepth(depth, args...) } else { @@ -48,7 +48,7 @@ func WarningDepth(depth int, args ...interface{}) { } // ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...interface{}) { +func ErrorDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.ErrorDepth(depth, args...) } else { @@ -57,7 +57,7 @@ func ErrorDepth(depth int, args ...interface{}) { } // FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...interface{}) { +func FatalDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.FatalDepth(depth, args...) } else { @@ -71,35 +71,35 @@ func FatalDepth(depth int, args ...interface{}) { // is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -116,11 +116,11 @@ type LoggerV2 interface { // later release. type DepthLoggerV2 interface { // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go index 02224b42..faa998de 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -31,7 +31,7 @@ type PrefixLogger struct { } // Infof does info logging. -func (pl *PrefixLogger) Infof(format string, args ...interface{}) { +func (pl *PrefixLogger) Infof(format string, args ...any) { if pl != nil { // Handle nil, so the tests can pass in a nil logger. format = pl.prefix + format @@ -42,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...interface{}) { } // Warningf does warning logging. -func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { +func (pl *PrefixLogger) Warningf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) @@ -52,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { } // Errorf does error logging. -func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { +func (pl *PrefixLogger) Errorf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) @@ -62,7 +62,7 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { } // Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { +func (pl *PrefixLogger) Debugf(format string, args ...any) { // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe // rewrite PrefixLogger a little to ensure that we don't use the global // `Logger` here, and instead use the `logger` field. diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index 517ea706..aa97273e 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -72,3 +72,24 @@ func Uint64() uint64 { defer mu.Unlock() return r.Uint64() } + +// Uint32 implements rand.Uint32 on the grpcrand global source. +func Uint32() uint32 { + mu.Lock() + defer mu.Unlock() + return r.Uint32() +} + +// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. +func ExpFloat64() float64 { + mu.Lock() + defer mu.Unlock() + return r.ExpFloat64() +} + +// Shuffle implements rand.Shuffle on the grpcrand global source. +var Shuffle = func(n int, f func(int, int)) { + mu.Lock() + defer mu.Unlock() + r.Shuffle(n, f) +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 79993d34..900917db 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -20,6 +20,7 @@ package grpcsync import ( "context" + "sync" "google.golang.org/grpc/internal/buffer" ) @@ -31,35 +32,94 @@ import ( // // This type is safe for concurrent access. type CallbackSerializer struct { + // done is closed once the serializer is shut down completely, i.e all + // scheduled callbacks are executed and the serializer has deallocated all + // its resources. + done chan struct{} + callbacks *buffer.Unbounded + closedMu sync.Mutex + closed bool } // NewCallbackSerializer returns a new CallbackSerializer instance. The provided // context will be passed to the scheduled callbacks. Users should cancel the // provided context to shutdown the CallbackSerializer. It is guaranteed that no -// callbacks will be executed once this context is canceled. +// callbacks will be added once this context is canceled, and any pending un-run +// callbacks will be executed before the serializer is shut down. func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { - t := &CallbackSerializer{callbacks: buffer.NewUnbounded()} - go t.run(ctx) - return t + cs := &CallbackSerializer{ + done: make(chan struct{}), + callbacks: buffer.NewUnbounded(), + } + go cs.run(ctx) + return cs } // Schedule adds a callback to be scheduled after existing callbacks are run. // // Callbacks are expected to honor the context when performing any blocking // operations, and should return early when the context is canceled. -func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) { - t.callbacks.Put(f) +// +// Return value indicates if the callback was successfully added to the list of +// callbacks to be executed by the serializer. It is not possible to add +// callbacks once the context passed to NewCallbackSerializer is cancelled. +func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + cs.closedMu.Lock() + defer cs.closedMu.Unlock() + + if cs.closed { + return false + } + cs.callbacks.Put(f) + return true } -func (t *CallbackSerializer) run(ctx context.Context) { +func (cs *CallbackSerializer) run(ctx context.Context) { + var backlog []func(context.Context) + + defer close(cs.done) for ctx.Err() == nil { select { case <-ctx.Done(): - return - case callback := <-t.callbacks.Get(): - t.callbacks.Load() + // Do nothing here. Next iteration of the for loop will not happen, + // since ctx.Err() would be non-nil. + case callback, ok := <-cs.callbacks.Get(): + if !ok { + return + } + cs.callbacks.Load() callback.(func(ctx context.Context))(ctx) } } + + // Fetch pending callbacks if any, and execute them before returning from + // this method and closing cs.done. + cs.closedMu.Lock() + cs.closed = true + backlog = cs.fetchPendingCallbacks() + cs.callbacks.Close() + cs.closedMu.Unlock() + for _, b := range backlog { + b(ctx) + } +} + +func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { + var backlog []func(context.Context) + for { + select { + case b := <-cs.callbacks.Get(): + backlog = append(backlog, b.(func(context.Context))) + cs.callbacks.Load() + default: + return backlog + } + } +} + +// Done returns a channel that is closed after the context passed to +// NewCallbackSerializer is canceled and all callbacks have been executed. +func (cs *CallbackSerializer) Done() <-chan struct{} { + return cs.done } diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go new file mode 100644 index 00000000..aef8cec1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -0,0 +1,121 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" +) + +// Subscriber represents an entity that is subscribed to messages published on +// a PubSub. It wraps the callback to be invoked by the PubSub when a new +// message is published. +type Subscriber interface { + // OnMessage is invoked when a new message is published. Implementations + // must not block in this method. + OnMessage(msg any) +} + +// PubSub is a simple one-to-many publish-subscribe system that supports +// messages of arbitrary type. It guarantees that messages are delivered in +// the same order in which they were published. +// +// Publisher invokes the Publish() method to publish new messages, while +// subscribers interested in receiving these messages register a callback +// via the Subscribe() method. +// +// Once a PubSub is stopped, no more messages can be published, but any pending +// published messages will be delivered to the subscribers. Done may be used +// to determine when all published messages have been delivered. +type PubSub struct { + cs *CallbackSerializer + + // Access to the below fields are guarded by this mutex. + mu sync.Mutex + msg any + subscribers map[Subscriber]bool +} + +// NewPubSub returns a new PubSub instance. Users should cancel the +// provided context to shutdown the PubSub. +func NewPubSub(ctx context.Context) *PubSub { + return &PubSub{ + cs: NewCallbackSerializer(ctx), + subscribers: map[Subscriber]bool{}, + } +} + +// Subscribe registers the provided Subscriber to the PubSub. +// +// If the PubSub contains a previously published message, the Subscriber's +// OnMessage() callback will be invoked asynchronously with the existing +// message to begin with, and subsequently for every newly published message. +// +// The caller is responsible for invoking the returned cancel function to +// unsubscribe itself from the PubSub. +func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.subscribers[sub] = true + + if ps.msg != nil { + msg := ps.msg + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[sub] { + return + } + sub.OnMessage(msg) + }) + } + + return func() { + ps.mu.Lock() + defer ps.mu.Unlock() + delete(ps.subscribers, sub) + } +} + +// Publish publishes the provided message to the PubSub, and invokes +// callbacks registered by subscribers asynchronously. +func (ps *PubSub) Publish(msg any) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.msg = msg + for sub := range ps.subscribers { + s := sub + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[s] { + return + } + s.OnMessage(msg) + }) + } +} + +// Done returns a channel that is closed after the context passed to NewPubSub +// is canceled and all updates have been sent to subscribers. +func (ps *PubSub) Done() <-chan struct{} { + return ps.cs.Done() +} diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go new file mode 100644 index 00000000..6c272476 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -0,0 +1,301 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package idle contains a component for managing idleness (entering and exiting) +// based on RPC activity. +package idle + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// Enforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type Enforcer interface { + ExitIdleMode() error + EnterIdleMode() error +} + +// Manager defines the functionality required to track RPC activity on a +// channel. +type Manager interface { + OnCallBegin() error + OnCallEnd() + Close() +} + +type noopManager struct{} + +func (noopManager) OnCallBegin() error { return nil } +func (noopManager) OnCallEnd() {} +func (noopManager) Close() {} + +// manager implements the Manager interface. It uses atomic operations to +// synchronize access to shared state and a mutex to guarantee mutual exclusion +// in a critical section. +type manager struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer Enforcer // Functionality provided by grpc.ClientConn. + timeout int64 // Idle timeout duration nanos stored as an int64. + logger grpclog.LoggerV2 + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and OnCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// ManagerOptions is a collection of options used by +// NewManager. +type ManagerOptions struct { + Enforcer Enforcer + Timeout time.Duration + Logger grpclog.LoggerV2 +} + +// NewManager creates a new idleness manager implementation for the +// given idle timeout. +func NewManager(opts ManagerOptions) Manager { + if opts.Timeout == 0 { + return noopManager{} + } + + m := &manager{ + enforcer: opts.Enforcer, + timeout: int64(opts.Timeout), + logger: opts.Logger, + } + m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout) + return m +} + +// resetIdleTimer resets the idle timer to the given duration. This method +// should only be called from the timer callback. +func (m *manager) resetIdleTimer(d time.Duration) { + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if m.timer == nil { + // Only close sets timer to nil. We are done. + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback, which means the timer has + // already fired. + m.timer.Reset(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (m *manager) handleIdleTimeout() { + if m.isClosed() { + return + } + + if atomic.LoadInt32(&m.activeCallsCount) > 0 { + m.resetIdleTimer(time.Duration(m.timeout)) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano())) + return + } + + // This CAS operation is extremely likely to succeed given that there has + // been no activity since the last time we were here. Setting the + // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the + // channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity at the top of this method, or one was ongoing from before + // the last time we were here. In both case, reset the timer and return. + m.resetIdleTimer(time.Duration(m.timeout)) + return + } + + // Now that we've set the active calls count to -math.MaxInt32, it's time to + // actually move to idle mode. + if m.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.resetIdleTimer(time.Duration(m.timeout)) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (m *manager) tryEnterIdleMode() bool { + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + return false + } + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // An very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + return false + } + + // No new RPCs have come in since we last set the active calls count value + // -math.MaxInt32 in the timer callback. And since we have the lock, it is + // safe to enter idle mode now. + if err := m.enforcer.EnterIdleMode(); err != nil { + m.logger.Errorf("Failed to enter idle mode: %v", err) + return false + } + + // Successfully entered idle mode. + m.actuallyIdle = true + return true +} + +// OnCallBegin is invoked at the start of every RPC. +func (m *manager) OnCallBegin() error { + if m.isClosed() { + return nil + } + + if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := m.exitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&m.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil +} + +// exitIdleMode instructs the channel to exit idle mode. +// +// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. +func (m *manager) exitIdleMode() error { + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if !m.actuallyIdle { + // This can happen in two scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and OnCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in OnCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // + // Either way, nothing to do here. + return nil + } + + if err := m.enforcer.ExitIdleMode(); err != nil { + return fmt.Errorf("channel failed to exit idle mode: %v", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout) + return nil +} + +// OnCallEnd is invoked at the end of every RPC. +func (m *manager) OnCallEnd() { + if m.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&m.activeCallsCount, -1) +} + +func (m *manager) isClosed() bool { + return atomic.LoadInt32(&m.closed) == 1 +} + +func (m *manager) Close() { + atomic.StoreInt32(&m.closed, 1) + + m.idleMu.Lock() + m.timer.Stop() + m.timer = nil + m.idleMu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 836b6a3b..c8a8c76d 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -30,7 +30,7 @@ import ( var ( // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. @@ -38,8 +38,12 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second + // KeepaliveMinServerPingTime is the minimum ping interval for servers. + // This must be 1s by default, but tests may wish to set it lower for + // convenience. + KeepaliveMinServerPingTime = time.Second // ParseServiceConfig parses a JSON representation of the service config. - ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult + ParseServiceConfig any // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the @@ -49,54 +53,81 @@ var ( // given name. This is set by package certprovider for use from xDS // bootstrap code while parsing certificate provider configs in the // bootstrap file. - GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder + GetCertificateProviderBuilder any // func(string) certprovider.Builder // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo // GetServerCredentials returns the transport credentials configured on a // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. - GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials // CanonicalString returns the canonical string of the code defined here: // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. - CanonicalString interface{} // func (codes.Code) string + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + CanonicalString any // func (codes.Code) string // DrainServerTransports initiates a graceful close of existing connections // on a gRPC server accepted on the provided listener address. An // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. - DrainServerTransports interface{} // func(*grpc.Server, string) + DrainServerTransports any // func(*grpc.Server, string) // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. - AddGlobalServerOptions interface{} // func(opt ...ServerOption) + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalServerOptions any // func(opt ...ServerOption) // ClearGlobalServerOptions clears the array of extra ServerOption. This // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ClearGlobalServerOptions func() // AddGlobalDialOptions adds an array of DialOption that will be effective // globally for newly created client channels. The priority will be: 1. // user-provided; 2. this method; 3. default values. - AddGlobalDialOptions interface{} // func(opt ...DialOption) + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalDialOptions any // func(opt ...DialOption) // DisableGlobalDialOptions returns a DialOption that prevents the // ClientConn from applying the global DialOptions (set via // AddGlobalDialOptions). - DisableGlobalDialOptions interface{} // func() grpc.DialOption + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + DisableGlobalDialOptions any // func() grpc.DialOption // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ClearGlobalDialOptions func() // JoinDialOptions combines the dial options passed as arguments into a // single dial option. - JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption + JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption // JoinServerOptions combines the server options passed as arguments into a // single server option. - JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption // WithBinaryLogger returns a DialOption that specifies the binary logger // for a ClientConn. - WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption // BinaryLogger returns a ServerOption that can set the binary logger for a // server. - BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + BinaryLogger any // func(binarylog.Logger) grpc.ServerOption + + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using // the provided xds bootstrap config instead of the global configuration from @@ -107,7 +138,7 @@ var ( // // This function should ONLY be used for testing and may not work with some // other features, including the CSDS service. - NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment @@ -139,7 +170,11 @@ var ( UnregisterRBACHTTPFilterForTesting func() // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. - ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) + ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) + + // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra + // metadata to RPCs. + GRPCResolverSchemeExtraMetadata string = "xds" ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -150,7 +185,7 @@ var ( // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error +type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error const ( // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index c82e608e..900bfb71 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -35,7 +35,7 @@ const mdKey = mdKeyType("grpc.internal.address.metadata") type mdValue metadata.MD -func (m mdValue) Equal(o interface{}) bool { +func (m mdValue) Equal(o any) bool { om, ok := o.(mdValue) if !ok { return false diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go index 0177af4b..70331913 100644 --- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -35,7 +35,7 @@ const jsonIndent = " " // ToJSON marshals the input into a json string. // // If marshal fails, it falls back to fmt.Sprintf("%+v"). -func ToJSON(e interface{}) string { +func ToJSON(e any) string { switch ee := e.(type) { case protov1.Message: mm := jsonpb.Marshaler{Indent: jsonIndent} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index c7a18a94..f0603871 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -92,7 +92,7 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -101,7 +101,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientInterceptor is an interceptor for gRPC client streams. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 09a667f3..99e1e5b3 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -62,7 +62,8 @@ const ( defaultPort = "443" defaultDNSSvrPort = "53" golang = "GO" - // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + // txtPrefix is the prefix string to be prepended to the host name for txt + // record lookup. txtPrefix = "_grpc_config." // In DNS, service config is encoded in a TXT record via the mechanism // described in RFC-1464 using the attribute name grpc_config. @@ -86,14 +87,14 @@ var ( minDNSResRate = 30 * time.Second ) -var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { - return func(ctx context.Context, network, address string) (net.Conn, error) { +var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer - return dialer.DialContext(ctx, network, authority) + return dialer.DialContext(ctx, network, address) } } -var customAuthorityResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (netResolver, error) { host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -103,7 +104,7 @@ var customAuthorityResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: customAuthorityDialler(authorityWithPort), + Dial: addressDialer(authorityWithPort), }, nil } @@ -114,7 +115,8 @@ func NewBuilder() resolver.Builder { type dnsBuilder struct{} -// Build creates and starts a DNS resolver that watches the name resolution of the target. +// Build creates and starts a DNS resolver that watches the name resolution of +// the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { host, port, err := parseTarget(target.Endpoint(), defaultPort) if err != nil { @@ -143,7 +145,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.URL.Host) + d.resolver, err = newNetResolver(target.URL.Host) if err != nil { return nil, err } @@ -180,19 +182,22 @@ type dnsResolver struct { ctx context.Context cancel context.CancelFunc cc resolver.ClientConn - // rn channel is used by ResolveNow() to force an immediate resolution of the target. + // rn channel is used by ResolveNow() to force an immediate resolution of the + // target. rn chan struct{} - // wg is used to enforce Close() to return after the watcher() goroutine has finished. - // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we - // replace the real lookup functions with mocked ones to facilitate testing. - // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes - // will warns lookup (READ the lookup function pointers) inside watcher() goroutine - // has data race with replaceNetFunc (WRITE the lookup function pointers). + // wg is used to enforce Close() to return after the watcher() goroutine has + // finished. Otherwise, data race will be possible. [Race Example] in + // dns_resolver_test we replace the real lookup functions with mocked ones to + // facilitate testing. If Close() doesn't wait for watcher() goroutine + // finishes, race detector sometimes will warns lookup (READ the lookup + // function pointers) inside watcher() goroutine has data race with + // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup disableServiceConfig bool } -// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +// ResolveNow invoke an immediate resolution of the target that this +// dnsResolver watches. func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { select { case d.rn <- struct{}{}: @@ -220,8 +225,8 @@ func (d *dnsResolver) watcher() { var timer *time.Timer if err == nil { - // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least - // to prevent constantly re-resolving. + // Success resolving, wait for the next ResolveNow. However, also wait 30 + // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 timer = newTimerDNSResRate(minDNSResRate) select { @@ -231,7 +236,8 @@ func (d *dnsResolver) watcher() { case <-d.rn: } } else { - // Poll on an error found in DNS Resolver or an error received from ClientConn. + // Poll on an error found in DNS Resolver or an error received from + // ClientConn. timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) backoffIndex++ } @@ -278,7 +284,8 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { } func handleDNSError(err error, lookupType string) error { - if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + dnsErr, ok := err.(*net.DNSError) + if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). @@ -307,10 +314,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { res += s } - // TXT record must have "grpc_config=" attribute in order to be used as service config. + // TXT record must have "grpc_config=" attribute in order to be used as + // service config. if !strings.HasPrefix(res, txtAttribute) { logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) - // This is not an error; it is the equivalent of not having a service config. + // This is not an error; it is the equivalent of not having a service + // config. return nil } sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) @@ -352,9 +361,10 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and +// ok = true. func formatIP(addr string) (addrIP string, ok bool) { ip := net.ParseIP(addr) if ip == nil { @@ -366,10 +376,10 @@ func formatIP(addr string) (addrIP string, ok bool) { return "[" + addr + "]", true } -// parseTarget takes the user input target string and default port, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. +// parseTarget takes the user input target string and default port, returns +// formatted host and port info. If target doesn't specify a port, set the port +// to be the defaultPort. If target is in IPv6 format and host-name is enclosed +// in square brackets, brackets are stripped when setting the host. // examples: // target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" @@ -385,12 +395,14 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { } if host, port, err = net.SplitHostPort(target); err == nil { if port == "" { - // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + // If the port field is empty (target ends with colon), e.g. "[::1]:", + // this is an error. return "", "", errEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + // Keep consistent with net.Dial(): If the host is empty, as in ":80", + // the local system is assumed. host = "localhost" } return host, port, nil diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go new file mode 100644 index 00000000..11d82afc --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package serviceconfig + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Duration defines JSON marshal and unmarshal methods to conform to the +// protobuf JSON spec defined [here]. +// +// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration +type Duration time.Duration + +func (d Duration) String() string { + return fmt.Sprint(time.Duration(d)) +} + +// MarshalJSON converts from d to a JSON string output. +func (d Duration) MarshalJSON() ([]byte, error) { + ns := time.Duration(d).Nanoseconds() + sec := ns / int64(time.Second) + ns = ns % int64(time.Second) + + var sign string + if sec < 0 || ns < 0 { + sign, sec, ns = "-", -1*sec, -1*ns + } + + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision. + str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, ".000") + return []byte(fmt.Sprintf("\"%ss\"", str)), nil +} + +// UnmarshalJSON unmarshals b as a duration JSON string into d. +func (d *Duration) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !strings.HasSuffix(s, "s") { + return fmt.Errorf("malformed duration %q: missing seconds unit", s) + } + neg := false + if s[0] == '-' { + neg = true + s = s[1:] + } + ss := strings.SplitN(s[:len(s)-1], ".", 3) + if len(ss) > 2 { + return fmt.Errorf("malformed duration %q: too many decimals", s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var sec, ns int64 + if len(ss[0]) > 0 { + var err error + if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + // Maximum seconds value per the durationpb spec. + const maxProtoSeconds = 315_576_000_000 + if sec > maxProtoSeconds { + return fmt.Errorf("out of range: %q", s) + } + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return fmt.Errorf("malformed duration %q: too many digits after decimal", s) + } + var err error + if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + for i := 9; i > len(ss[1]); i-- { + ns *= 10 + } + hasDigits = true + } + if !hasDigits { + return fmt.Errorf("malformed duration %q: contains no numbers", s) + } + + if neg { + sec *= -1 + ns *= -1 + } + + // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. + const maxSeconds = math.MaxInt64 / int64(time.Second) + const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) + const minSeconds = math.MinInt64 / int64(time.Second) + const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) + + if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { + *d = Duration(math.MaxInt64) + } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { + *d = Duration(math.MinInt64) + } else { + *d = Duration(sec*int64(time.Second) + ns) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index b0ead4f5..4cf85cad 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -49,7 +49,7 @@ func New(c codes.Code, msg string) *Status { } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -64,7 +64,7 @@ func Err(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Err(c, fmt.Sprintf(format, a...)) } @@ -120,11 +120,11 @@ func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { +func (s *Status) Details() []any { if s == nil || s.s == nil { return nil } - details := make([]interface{}, 0, len(s.s.Details)) + details := make([]any, 0, len(s.s.Details)) for _, any := range s.s.Details { detail := &ptypes.DynamicAny{} if err := ptypes.UnmarshalAny(any, detail); err != nil { diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index be5a9c81..b330cced 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -40,7 +40,7 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { } type itemNode struct { - it interface{} + it any next *itemNode } @@ -49,7 +49,7 @@ type itemList struct { tail *itemNode } -func (il *itemList) enqueue(i interface{}) { +func (il *itemList) enqueue(i any) { n := &itemNode{it: i} if il.tail == nil { il.head, il.tail = n, n @@ -61,11 +61,11 @@ func (il *itemList) enqueue(i interface{}) { // peek returns the first item in the list without removing it from the // list. -func (il *itemList) peek() interface{} { +func (il *itemList) peek() any { return il.head.it } -func (il *itemList) dequeue() interface{} { +func (il *itemList) dequeue() any { if il.head == nil { return nil } @@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error { return err } -func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { +func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { @@ -373,7 +373,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b } // Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { +func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { c.mu.Lock() if c.err != nil { c.mu.Unlock() @@ -387,7 +387,7 @@ func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bo return true, nil } -func (c *controlBuffer) get(block bool) (interface{}, error) { +func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() if c.err != nil { @@ -830,7 +830,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } -func (l *loopyWriter) handle(i interface{}) error { +func (l *loopyWriter) handle(i any) error { switch i := i.(type) { case *incomingWindowUpdate: l.incomingWindowUpdateHandler(i) diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index fbee581b..98f80e3f 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -453,7 +453,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain() { +func (ht *serverHandlerTransport) Drain(debugData string) { panic("Drain() is not implemented") } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 5216998a..badab8ac 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -330,7 +330,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*Stream), @@ -762,7 +762,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, firstTry := true var ch chan struct{} transportDrainRequired := false - checkForStreamQuota := func(it interface{}) bool { + checkForStreamQuota := func(it any) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ @@ -800,7 +800,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } var hdrListSizeErr error - checkForHeaderListSize := func(it interface{}) bool { + checkForHeaderListSize := func(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -815,7 +815,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } for { - success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + success, err := t.controlBuf.executeAndPut(func(it any) bool { return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { @@ -927,7 +927,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. rst: rst, rstCode: rstCode, } - addBackStreamQuota := func(interface{}) bool { + addBackStreamQuota := func(any) bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { @@ -1080,7 +1080,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - updateIWS := func(interface{}) bool { + updateIWS := func(any) bool { t.initialWindowSize = int32(n) t.mu.Lock() for _, s := range t.activeStreams { @@ -1233,7 +1233,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } updateFuncs = append(updateFuncs, updateStreamQuota) } - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -1337,7 +1337,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // setGoAwayReason sets the value of t.goAwayReason based // on the GoAway frame received. -// It expects a lock on transport's mutext to be held by +// It expects a lock on transport's mutex to be held by // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason @@ -1505,14 +1505,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - isHeader := false - - // If headerChan hasn't been closed yet - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { - s.headerValid = true - if !endStream { - // HEADERS frame block carries a Response-Headers. - isHeader = true + // For headers, set them in s.header and close headerChan. For trailers or + // trailers-only, closeStream will set the trailers and close headerChan as + // needed. + if !endStream { + // If headerChan hasn't been closed yet (expected, given we checked it + // above, but something else could have potentially closed the whole + // stream). + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. @@ -1520,15 +1521,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if len(mdata) > 0 { s.header = mdata } - } else { - // HEADERS frame block carries a Trailers-Only. - s.noHeaders = true + close(s.headerChan) } - close(s.headerChan) } for _, sh := range t.statsHandlers { - if isHeader { + if !endStream { inHeader := &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), @@ -1554,9 +1552,10 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { statusGen = status.New(rawStatusCode, grpcMessage) } - // if client received END_STREAM from server while stream was still active, send RST_STREAM - rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) + // If client received END_STREAM from server while stream was still active, + // send RST_STREAM. + rstStream := s.getState() == streamActive + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, statusGen, mdata, true) } // readServerPreface reads and handles the initial settings frame from the diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 4b406b8c..c06db679 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -165,21 +165,16 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, Val: http2MaxFrameLen, }} - // TODO(zhaoq): Have a better way to signal "no limit" because 0 is - // permitted in the HTTP2 spec. - maxStreams := config.MaxStreams - if maxStreams == 0 { - maxStreams = math.MaxUint32 - } else { + if config.MaxStreams != math.MaxUint32 { isettings = append(isettings, http2.Setting{ ID: http2.SettingMaxConcurrentStreams, - Val: maxStreams, + Val: config.MaxStreams, }) } dynamicWindow := true @@ -238,7 +233,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, kp.Timeout = defaultServerKeepaliveTimeout } if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } } @@ -258,7 +253,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, framer: framer, readerDone: make(chan struct{}), writerDone: make(chan struct{}), - maxStreams: maxStreams, + maxStreams: config.MaxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, @@ -855,7 +850,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { } return nil }) - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -939,7 +934,7 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) return headerFields } -func (t *http2Server) checkForHeaderListSize(it interface{}) bool { +func (t *http2Server) checkForHeaderListSize(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -1166,12 +1161,12 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.Drain() + t.Drain("max_idle") return } idleTimer.Reset(val) case <-ageTimer.C: - t.Drain() + t.Drain("max_age") ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: @@ -1318,14 +1313,14 @@ func (t *http2Server) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Server) Drain() { +func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() if t.drainEvent != nil { return } t.drainEvent = grpcsync.NewEvent() - t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} @@ -1367,7 +1362,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // originated before the GoAway reaches the client. // After getting the ack or timer expiration send out another GoAway this // time with an ID of the max stream server intends to process. - if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { return false, err } if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 19cbb18f..19581400 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -30,6 +30,7 @@ import ( "net/url" "strconv" "strings" + "sync" "time" "unicode/utf8" @@ -309,6 +310,7 @@ func decodeGrpcMessageUnchecked(msg string) string { } type bufWriter struct { + pool *sync.Pool buf []byte offset int batchSize int @@ -316,12 +318,17 @@ type bufWriter struct { err error } -func newBufWriter(conn net.Conn, batchSize int) *bufWriter { - return &bufWriter{ - buf: make([]byte, batchSize*2), +func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { + w := &bufWriter{ batchSize: batchSize, conn: conn, + pool: pool, } + // this indicates that we should use non shared buf + if pool == nil { + w.buf = make([]byte, batchSize) + } + return w } func (w *bufWriter) Write(b []byte) (n int, err error) { @@ -332,19 +339,34 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { n, err = w.conn.Write(b) return n, toIOError(err) } + if w.buf == nil { + b := w.pool.Get().(*[]byte) + w.buf = *b + } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) b = b[nn:] w.offset += nn n += nn if w.offset >= w.batchSize { - err = w.Flush() + err = w.flushKeepBuffer() } } return n, err } func (w *bufWriter) Flush() error { + err := w.flushKeepBuffer() + // Only release the buffer if we are in a "shared" mode + if w.buf != nil && w.pool != nil { + b := w.buf + w.pool.Put(&b) + w.buf = nil + } + return err +} + +func (w *bufWriter) flushKeepBuffer() error { if w.err != nil { return w.err } @@ -381,7 +403,10 @@ type framer struct { fr *http2.Framer } -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { +var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferMutex sync.Mutex + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { if writeBufferSize < 0 { writeBufferSize = 0 } @@ -389,7 +414,11 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList if readBufferSize > 0 { r = bufio.NewReaderSize(r, readBufferSize) } - w := newBufWriter(conn, writeBufferSize) + var pool *sync.Pool + if sharedWriteBuffer { + pool = getWriteBufferPool(writeBufferSize) + } + w := newBufWriter(conn, writeBufferSize, pool) f := &framer{ writer: w, fr: http2.NewFramer(w, r), @@ -403,6 +432,24 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList return f } +func getWriteBufferPool(writeBufferSize int) *sync.Pool { + writeBufferMutex.Lock() + defer writeBufferMutex.Unlock() + size := writeBufferSize * 2 + pool, ok := writeBufferPoolMap[size] + if ok { + return pool + } + pool = &sync.Pool{ + New: func() any { + b := make([]byte, size) + return &b + }, + } + writeBufferPoolMap[size] = pool + return pool +} + // parseDialTarget returns the network and address to pass to dialer. func parseDialTarget(target string) (string, string) { net := "tcp" diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 1b7d7fab..74a811fc 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -43,10 +43,6 @@ import ( "google.golang.org/grpc/tap" ) -// ErrNoHeaders is used as a signal that a trailers only response was received, -// and is not a real error. -var ErrNoHeaders = errors.New("stream has no headers") - const logLevel = 2 type bufferPool struct { @@ -56,7 +52,7 @@ type bufferPool struct { func newBufferPool() *bufferPool { return &bufferPool{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { return new(bytes.Buffer) }, }, @@ -390,14 +386,10 @@ func (s *Stream) Header() (metadata.MD, error) { } s.waitOnHeader() - if !s.headerValid { + if !s.headerValid || s.noHeaders { return nil, s.status.Err() } - if s.noHeaders { - return nil, ErrNoHeaders - } - return s.header.Copy(), nil } @@ -559,6 +551,7 @@ type ServerConfig struct { InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int + SharedWriteBuffer bool ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 @@ -592,6 +585,8 @@ type ConnectOptions struct { WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int + // SharedWriteBuffer indicates whether connections should reuse write buffer + SharedWriteBuffer bool // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. @@ -726,7 +721,7 @@ type ServerTransport interface { RemoteAddr() net.Addr // Drain notifies the client this ServerTransport stops accepting new RPCs. - Drain() + Drain(debugData string) // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() @@ -736,7 +731,7 @@ type ServerTransport interface { } // connectionErrorf creates an ConnectionError with the specified error description. -func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { +func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index c525dc07..236837f4 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -28,26 +28,36 @@ import ( "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - blockingCh chan struct{} - picker balancer.Picker + mu sync.Mutex + done bool + idle bool + blockingCh chan struct{} + picker balancer.Picker + statsHandlers []stats.Handler // to record blocking picker calls } -func newPickerWrapper() *pickerWrapper { - return &pickerWrapper{blockingCh: make(chan struct{})} +func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { + return &pickerWrapper{ + blockingCh: make(chan struct{}), + statsHandlers: statsHandlers, + } } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done { + if pw.done || pw.idle { + // There is a small window where a picker update from the LB policy can + // race with the channel going to idle mode. If the picker is idle here, + // it is because the channel asked it to do so, and therefore it is sage + // to ignore the update from the LB policy. pw.mu.Unlock() return } @@ -63,10 +73,8 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) { // - wraps the done function in the passed in result to increment the calls // failed or calls succeeded channelz counter before invoking the actual // done function. -func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) { - acw.mu.Lock() - ac := acw.ac - acw.mu.Unlock() +func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { + ac := acbw.ac ac.incrCallsStarted() done := result.Done result.Done = func(b balancer.DoneInfo) { @@ -92,6 +100,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var ch chan struct{} var lastPickErr error + for { pw.mu.Lock() if pw.done { @@ -126,6 +135,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. continue } + // If the channel is set, it means that the pick call had to wait for a + // new picker at some point. Either it's the first iteration and this + // function received the first picker, or a picker errored with + // ErrNoSubConnAvailable or errored with failfast set to false, which + // will trigger a continue to the next iteration. In the first case this + // conditional will hit if this call had to block (the channel is set). + // In the second case, the only way it will get to this conditional is + // if there is a new picker. + if ch != nil { + for _, sh := range pw.statsHandlers { + sh.HandleRPC(ctx, &stats.PickerUpdated{}) + } + } + ch = pw.blockingCh p := pw.picker pw.mu.Unlock() @@ -152,14 +175,14 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) } - acw, ok := pickResult.SubConn.(*acBalancerWrapper) + acbw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } - if t := acw.getAddrConn().getReadyTransport(); t != nil { + if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { - doneChannelzWrapper(acw, &pickResult) + doneChannelzWrapper(acbw, &pickResult) return t, pickResult, nil } return t, pickResult, nil @@ -187,6 +210,25 @@ func (pw *pickerWrapper) close() { close(pw.blockingCh) } +func (pw *pickerWrapper) enterIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.idle = true +} + +func (pw *pickerWrapper) exitIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.blockingCh = make(chan struct{}) + pw.idle = false +} + // dropError is a wrapper error that indicates the LB policy wishes to drop the // RPC and not retry it. type dropError struct { diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index fc91b4d2..2e9cf66b 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -19,15 +19,25 @@ package grpc import ( + "encoding/json" "errors" "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" ) -// PickFirstBalancerName is the name of the pick_first balancer. -const PickFirstBalancerName = "pick_first" +const ( + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = "pick_first" + logPrefix = "[pick-first-lb %p] " +) func newPickfirstBuilder() balancer.Builder { return &pickfirstBuilder{} @@ -36,22 +46,55 @@ func newPickfirstBuilder() balancer.Builder { type pickfirstBuilder struct{} func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - return &pickfirstBalancer{cc: cc} + b := &pickfirstBalancer{cc: cc} + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b } func (*pickfirstBuilder) Name() string { return PickFirstBalancerName } +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of addresses received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + if !envconfig.PickFirstLBConfig { + // Prior to supporting loadbalancing configuration, the pick_first LB + // policy did not implement the balancer.ConfigParser interface. This + // meant that if a non-empty configuration was passed to it, the service + // config unmarshaling code would throw a warning log, but would + // continue using the pick_first LB policy. The code below ensures the + // same behavior is retained if the env var is not set. + if string(js) != "{}" { + logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js)) + } + return nil, nil + } + + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + type pickfirstBalancer struct { + logger *internalgrpclog.PrefixLogger state connectivity.State cc balancer.ClientConn subConn balancer.SubConn } func (b *pickfirstBalancer) ResolverError(err error) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) } if b.subConn == nil { b.state = connectivity.TransientFailure @@ -69,28 +112,49 @@ func (b *pickfirstBalancer) ResolverError(err error) { } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - if len(state.ResolverState.Addresses) == 0 { + addrs := state.ResolverState.Addresses + if len(addrs) == 0 { // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { - // Remove the old subConn. All addresses were removed, so it is no longer - // valid. - b.cc.RemoveSubConn(b.subConn) + // Shut down the old subConn. All addresses were removed, so it is + // no longer valid. + b.subConn.Shutdown() b.subConn = nil } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + // We don't have to guard this block with the env var because ParseConfig + // already does so. + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + } + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) + grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + if b.subConn != nil { - b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + b.cc.UpdateAddresses(b.subConn, addrs) return nil } - subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + var subConn balancer.SubConn + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(subConn, state) + }, + }) if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + if b.logger.V(2) { + b.logger.Infof("Failed to create new SubConn: %v", err) } b.state = connectivity.TransientFailure b.cc.UpdateState(balancer.State{ @@ -109,17 +173,22 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState return nil } +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + if b.logger.V(2) { + b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) } if b.subConn != subConn { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") + if b.logger.V(2) { + b.logger.Infof("Ignored state change because subConn is not recognized") } return } - b.state = state.ConnectivityState if state.ConnectivityState == connectivity.Shutdown { b.subConn = nil return @@ -132,11 +201,21 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, }) case connectivity.Connecting: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. See A62. + return + } b.cc.UpdateState(balancer.State{ ConnectivityState: state.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) case connectivity.Idle: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. Also kick the + // subConn out of Idle into Connecting. See A62. + b.subConn.Connect() + return + } b.cc.UpdateState(balancer.State{ ConnectivityState: state.ConnectivityState, Picker: &idlePicker{subConn: subConn}, @@ -147,6 +226,7 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b Picker: &picker{err: state.ConnectionError}, }) } + b.state = state.ConnectivityState } func (b *pickfirstBalancer) Close() { diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index cd455478..73bd6336 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -37,7 +37,7 @@ type PreparedMsg struct { } // Encode marshalls and compresses the message using the codec and compressor for the stream. -func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { +func (p *PreparedMsg) Encode(s Stream, msg any) error { ctx := s.Context() rpcInfo, ok := rpcInfoFromContext(ctx) if !ok { diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index efcb7f3e..804be887 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -20,7 +20,7 @@ package resolver type addressMapEntry struct { addr Address - value interface{} + value any } // AddressMap is a map of addresses to arbitrary values taking into account @@ -69,7 +69,7 @@ func (l addressMapEntryList) find(addr Address) int { } // Get returns the value for the address in the map, if present. -func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { +func (a *AddressMap) Get(addr Address) (value any, ok bool) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -79,7 +79,7 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { } // Set updates or adds the value to the address in the map. -func (a *AddressMap) Set(addr Address, value interface{}) { +func (a *AddressMap) Set(addr Address, value any) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -127,8 +127,8 @@ func (a *AddressMap) Keys() []Address { } // Values returns a slice of all current map values. -func (a *AddressMap) Values() []interface{} { - ret := make([]interface{}, 0, a.Len()) +func (a *AddressMap) Values() []any { + ret := make([]any, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { ret = append(ret, entry.value) diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 6215e5ef..11384e22 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,13 +22,13 @@ package resolver import ( "context" + "fmt" "net" "net/url" "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -77,25 +77,6 @@ func GetDefaultScheme() string { return defaultScheme } -// AddressType indicates the address type returned by name resolution. -// -// Deprecated: use Attributes in Address instead. -type AddressType uint8 - -const ( - // Backend indicates the address is for a backend server. - // - // Deprecated: use Attributes in Address instead. - Backend AddressType = iota - // GRPCLB indicates the address is for a grpclb load balancer. - // - // Deprecated: to select the GRPCLB load balancing policy, use a service - // config with a corresponding loadBalancingConfig. To supply balancer - // addresses to the GRPCLB load balancing policy, set State.Attributes - // using balancer/grpclb/state.Set. - GRPCLB -) - // Address represents a server the client connects to. // // # Experimental @@ -111,9 +92,6 @@ type Address struct { // the address, instead of the hostname from the Dial target string. In most cases, // this should not be set. // - // If Type is GRPCLB, ServerName should be the name of the remote load - // balancer, not the name of the backend. - // // WARNING: ServerName must only be populated with trusted values. It // is insecure to populate it with data from untrusted inputs since untrusted // values could be used to bypass the authority checks performed by TLS. @@ -124,34 +102,46 @@ type Address struct { Attributes *attributes.Attributes // BalancerAttributes contains arbitrary data about this address intended - // for consumption by the LB policy. These attribes do not affect SubConn + // for consumption by the LB policy. These attributes do not affect SubConn // creation, connection establishment, handshaking, etc. - BalancerAttributes *attributes.Attributes - - // Type is the type of this address. // - // Deprecated: use Attributes instead. - Type AddressType + // Deprecated: when an Address is inside an Endpoint, this field should not + // be used, and it will eventually be removed entirely. + BalancerAttributes *attributes.Attributes // Metadata is the information associated with Addr, which may be used // to make load balancing decision. // // Deprecated: use Attributes instead. - Metadata interface{} + Metadata any } // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. +// +// This method compares all fields of the address. When used to tell apart +// addresses during subchannel creation or connection establishment, it might be +// more appropriate for the caller to implement custom equality logic. func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && - a.Type == o.Type && a.Metadata == o.Metadata + a.Metadata == o.Metadata } // String returns JSON formatted string representation of the address. func (a Address) String() string { - return pretty.ToJSON(a) + var sb strings.Builder + sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) + sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) + if a.Attributes != nil { + sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) + } + if a.BalancerAttributes != nil { + sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) + } + sb.WriteString("}") + return sb.String() } // BuildOptions includes additional information for the builder to create @@ -180,11 +170,37 @@ type BuildOptions struct { Dialer func(context.Context, string) (net.Conn, error) } +// An Endpoint is one network endpoint, or server, which may have multiple +// addresses with which it can be accessed. +type Endpoint struct { + // Addresses contains a list of addresses used to access this endpoint. + Addresses []Address + + // Attributes contains arbitrary data about this endpoint intended for + // consumption by the LB policy. + Attributes *attributes.Attributes +} + // State contains the current Resolver state relevant to the ClientConn. type State struct { // Addresses is the latest set of resolved addresses for the target. + // + // If a resolver sets Addresses but does not set Endpoints, one Endpoint + // will be created for each Address before the State is passed to the LB + // policy. The BalancerAttributes of each entry in Addresses will be set + // in Endpoints.Attributes, and be cleared in the Endpoint's Address's + // BalancerAttributes. + // + // Soon, Addresses will be deprecated and replaced fully by Endpoints. Addresses []Address + // Endpoints is the latest set of resolved endpoints for the target. + // + // If a resolver produces a State containing Endpoints but not Addresses, + // it must take care to ensure the LB policies it selects will support + // Endpoints. + Endpoints []Endpoint + // ServiceConfig contains the result from parsing the latest service // config. If it is nil, it indicates no service config is present or the // resolver does not provide service configs. @@ -244,20 +260,7 @@ type ClientConn interface { // target does not contain a scheme or if the parsed scheme is not registered // (i.e. no corresponding resolver available to resolve the endpoint), we will // apply the default scheme, and will attempt to reparse it. -// -// Examples: -// -// - "dns://some_authority/foo.bar" -// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// - "foo.bar" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} -// - "unknown_scheme://authority/endpoint" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - // Deprecated: use URL.Scheme instead. - Scheme string - // Deprecated: use URL.Host instead. - Authority string // URL contains the parsed dial target with an optional default scheme added // to it if the original dial target contained no scheme or contained an // unregistered scheme. Any query params specified in the original dial @@ -311,10 +314,3 @@ type Resolver interface { // Close closes the resolver. Close() } - -// UnregisterForTesting removes the resolver builder with the given scheme from the -// resolver map. -// This function is for testing only. -func UnregisterForTesting(scheme string) { - delete(m, scheme) -} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 05a9d4e0..d6833056 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,11 +19,11 @@ package grpc import ( + "context" "strings" "sync" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" @@ -31,129 +31,200 @@ import ( "google.golang.org/grpc/serviceconfig" ) +// resolverStateUpdater wraps the single method used by ccResolverWrapper to +// report a state update from the actual resolver implementation. +type resolverStateUpdater interface { + updateResolverState(s resolver.State, err error) error +} + // ccResolverWrapper is a wrapper on top of cc for resolvers. // It implements resolver.ClientConn interface. type ccResolverWrapper struct { - cc *ClientConn - resolverMu sync.Mutex - resolver resolver.Resolver - done *grpcsync.Event - curState resolver.State + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc resolverStateUpdater + channelzID *channelz.Identifier + ignoreServiceConfig bool + opts ccResolverWrapperOpts + serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. + serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). + + // All incoming (resolver --> gRPC) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled on the serializer. + // Fields accessed *only* in these serializer callbacks, can therefore be + // accessed without a mutex. + curState resolver.State + + // mu guards access to the below fields. + mu sync.Mutex + closed bool + resolver resolver.Resolver // Accessed only from outgoing calls. +} - incomingMu sync.Mutex // Synchronizes all the incoming calls. +// ccResolverWrapperOpts wraps the arguments to be passed when creating a new +// ccResolverWrapper. +type ccResolverWrapperOpts struct { + target resolver.Target // User specified dial target to resolve. + builder resolver.Builder // Resolver builder to use. + bOpts resolver.BuildOptions // Resolver build options to use. + channelzID *channelz.Identifier // Channelz identifier for the channel. } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and // returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { +func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { + ctx, cancel := context.WithCancel(context.Background()) ccr := &ccResolverWrapper{ - cc: cc, - done: grpcsync.NewEvent(), - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - rbo := resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - } - - var err error - // We need to hold the lock here while we assign to the ccr.resolver field - // to guard against a data race caused by the following code path, - // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up - // accessing ccr.resolver which is being assigned here. - ccr.resolverMu.Lock() - defer ccr.resolverMu.Unlock() - ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + cc: cc, + channelzID: opts.channelzID, + ignoreServiceConfig: opts.bOpts.DisableServiceConfig, + opts: opts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + + // Cannot hold the lock at build time because the resolver can send an + // update or error inline and these incoming calls grab the lock to schedule + // a callback in the serializer. + r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) if err != nil { + cancel() return nil, err } + + // Any error reported by the resolver at build time that leads to a + // re-resolution request from the balancer is dropped by grpc until we + // return from this function. So, we don't have to handle pending resolveNow + // requests here. + ccr.mu.Lock() + ccr.resolver = r + ccr.mu.Unlock() + return ccr, nil } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.resolverMu.Lock() - if !ccr.done.HasFired() { - ccr.resolver.ResolveNow(o) + ccr.mu.Lock() + defer ccr.mu.Unlock() + + // ccr.resolver field is set only after the call to Build() returns. But in + // the process of building, the resolver may send an error update which when + // propagated to the balancer may result in a re-resolution request. + if ccr.closed || ccr.resolver == nil { + return } - ccr.resolverMu.Unlock() + ccr.resolver.ResolveNow(o) } func (ccr *ccResolverWrapper) close() { - ccr.resolverMu.Lock() - ccr.resolver.Close() - ccr.done.Fire() - ccr.resolverMu.Unlock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + return + } + + channelz.Info(logger, ccr.channelzID, "Closing the name resolver") + + // Close the serializer to ensure that no more calls from the resolver are + // handled, before actually closing the resolver. + ccr.serializerCancel() + ccr.closed = true + r := ccr.resolver + ccr.mu.Unlock() + + // Give enqueued callbacks a chance to finish. + <-ccr.serializer.Done() + + // Spawn a goroutine to close the resolver (since it may block trying to + // cleanup all allocated resources) and return early. + go r.Close() +} + +// serializerScheduleLocked is a convenience method to schedule a function to be +// run on the serializer while holding ccr.mu. +func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { + ccr.mu.Lock() + ccr.serializer.Schedule(f) + ccr.mu.Unlock() } +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { + errCh := make(chan error, 1) + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ok := ccr.serializer.Schedule(func(context.Context) { + ccr.addChannelzTraceEvent(s) + ccr.curState = s + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + errCh <- balancer.ErrBadResolverState + return + } + errCh <- nil + }) + if !ok { + // The only time when Schedule() fail to add the callback to the + // serializer is when the serializer is closed, and this happens only + // when the resolver wrapper is closed. return nil } - ccr.addChannelzTraceEvent(s) - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - return balancer.ErrBadResolverState - } - return nil + return <-errCh } +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverState(resolver.State{}, err) + }) } -// NewAddress is called by the resolver implementation to send addresses to gRPC. +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) + ccr.serializerScheduleLocked(func(_ context.Context) { + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + ccr.curState.Addresses = addrs + ccr.cc.updateResolverState(ccr.curState, nil) + }) } // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.cc.dopts.disableServiceConfig { - channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) + if ccr.ignoreServiceConfig { + channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + return + } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) + ccr.curState.ServiceConfig = scpr + ccr.cc.updateResolverState(ccr.curState, nil) + }) } +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { return parseServiceConfig(scJSON) } +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { var updates []string var oldSC, newSC *ServiceConfig @@ -172,5 +243,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) + channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 2030736a..b7723aa0 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -75,7 +75,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { } return &gzipCompressor{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) @@ -577,6 +577,9 @@ type parser struct { // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte + + // recvBufferPool is the pool of shared receive buffers. + recvBufferPool SharedBufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -610,9 +613,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead - // of making it for each message: - msg = make([]byte, int(length)) + msg = p.recvBufferPool.Get(int(length)) if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF @@ -625,7 +626,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg interface{}) ([]byte, error) { +func encode(c baseCodec, msg any) ([]byte, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -692,7 +693,7 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { return hdr, data } -func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, @@ -726,12 +727,12 @@ type payloadInfo struct { } func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, d, err := p.recvMsg(maxReceiveMessageSize) + pf, buf, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return nil, err } if payInfo != nil { - payInfo.compressedLength = len(d) + payInfo.compressedLength = len(buf) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { @@ -743,10 +744,10 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - d, err = dc.Do(bytes.NewReader(d)) - size = len(d) + buf, err = dc.Do(bytes.NewReader(buf)) + size = len(buf) } else { - d, size, err = decompress(compressor, d, maxReceiveMessageSize) + buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) } if err != nil { return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) @@ -757,7 +758,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } - return d, nil + return buf, nil } // Using compressor, decompress d, returning data and size. @@ -791,16 +792,18 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } - if err := c.Unmarshal(d, m); err != nil { + if err := c.Unmarshal(buf, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } if payInfo != nil { - payInfo.uncompressedBytes = d + payInfo.uncompressedBytes = buf + } else { + p.recvBufferPool.Put(&buf) } return nil } @@ -860,19 +863,22 @@ func ErrorDesc(err error) string { // Errorf returns nil if c is OK. // // Deprecated: use status.Errorf instead. -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return status.Errorf(c, format, a...) } +var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) +var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { switch err { case nil, io.EOF: return err case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) + return errContextDeadline case context.Canceled: - return status.Error(codes.Canceled, err.Error()) + return errContextCanceled case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 76d152a6..eeae92fb 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -86,7 +86,7 @@ func init() { var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) +type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -99,26 +99,20 @@ type ServiceDesc struct { ServiceName string // The pointer to the service interface. Used to check whether the user // provided implementation satisfies the interface requirements. - HandlerType interface{} + HandlerType any Methods []MethodDesc Streams []StreamDesc - Metadata interface{} + Metadata any } // serviceInfo wraps information about a service. It is very similar to // ServiceDesc and is constructed from it for internal purposes. type serviceInfo struct { // Contains the implementation for the methods in this service. - serviceImpl interface{} + serviceImpl any methods map[string]*MethodDesc streams map[string]*StreamDesc - mdata interface{} -} - -type serverWorkerData struct { - st transport.ServerTransport - wg *sync.WaitGroup - stream *transport.Stream + mdata any } // Server is a gRPC server to serve RPC requests. @@ -145,7 +139,7 @@ type Server struct { channelzID *channelz.Identifier czData *channelzData - serverWorkerChannel chan *serverWorkerData + serverWorkerChannel chan func() } type serverOptions struct { @@ -170,18 +164,22 @@ type serverOptions struct { initialConnWindowSize int32 writeBufferSize int readBufferSize int + sharedWriteBuffer bool connectionTimeout time.Duration maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 + recvBufferPool SharedBufferPool } var defaultServerOptions = serverOptions{ + maxConcurrentStreams: math.MaxUint32, maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, + recvBufferPool: nopBufferPool{}, } var globalServerOptions []ServerOption @@ -233,6 +231,20 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { return &joinServerOption{opts: opts} } +// SharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func SharedWriteBuffer(val bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.sharedWriteBuffer = val + }) +} + // WriteBufferSize determines how much data can be batched before doing a write // on the wire. The corresponding memory allocation for this buffer will be // twice the size to keep syscalls low. The default value for this buffer is @@ -273,9 +285,9 @@ func InitialConnWindowSize(s int32) ServerOption { // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { - if kp.Time > 0 && kp.Time < time.Second { + if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") - kp.Time = time.Second + kp.Time = internal.KeepaliveMinServerPingTime } return newFuncServerOption(func(o *serverOptions) { @@ -387,6 +399,9 @@ func MaxSendMsgSize(m int) ServerOption { // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { + if n == 0 { + n = math.MaxUint32 + } return newFuncServerOption(func(o *serverOptions) { o.maxConcurrentStreams = n }) @@ -552,6 +567,27 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { }) } +// RecvBufferPool returns a ServerOption that configures the server +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: StatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.recvBufferPool = bufferPool + }) +} + // serverWorkerResetThreshold defines how often the stack must be reset. Every // N requests, by spawning a new goroutine in its place, a worker can reset its // stack so that large stacks don't live in memory forever. 2^16 should allow @@ -567,24 +603,19 @@ const serverWorkerResetThreshold = 1 << 16 // [1] https://github.com/golang/go/issues/18138 func (s *Server) serverWorker() { for completed := 0; completed < serverWorkerResetThreshold; completed++ { - data, ok := <-s.serverWorkerChannel + f, ok := <-s.serverWorkerChannel if !ok { return } - s.handleSingleStream(data) + f() } go s.serverWorker() } -func (s *Server) handleSingleStream(data *serverWorkerData) { - defer data.wg.Done() - s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) -} - // initServerWorkers creates worker goroutines and a channel to process incoming // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { - s.serverWorkerChannel = make(chan *serverWorkerData) + s.serverWorkerChannel = make(chan func()) for i := uint32(0); i < s.opts.numServerWorkers; i++ { go s.serverWorker() } @@ -632,7 +663,7 @@ func NewServer(opt ...ServerOption) *Server { // printf records an event in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) printf(format string, a ...interface{}) { +func (s *Server) printf(format string, a ...any) { if s.events != nil { s.events.Printf(format, a...) } @@ -640,7 +671,7 @@ func (s *Server) printf(format string, a ...interface{}) { // errorf records an error in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) errorf(format string, a ...interface{}) { +func (s *Server) errorf(format string, a ...any) { if s.events != nil { s.events.Errorf(format, a...) } @@ -655,14 +686,14 @@ type ServiceRegistrar interface { // once the server has started serving. // desc describes the service and its methods and handlers. impl is the // service implementation which is passed to the method handlers. - RegisterService(desc *ServiceDesc, impl interface{}) + RegisterService(desc *ServiceDesc, impl any) } // RegisterService registers a service and its implementation to the gRPC // server. It is called from the IDL generated code. This must be called before // invoking Serve. If ss is non-nil (for legacy code), its type is checked to // ensure it implements sd.HandlerType. -func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { +func (s *Server) RegisterService(sd *ServiceDesc, ss any) { if ss != nil { ht := reflect.TypeOf(sd.HandlerType).Elem() st := reflect.TypeOf(ss) @@ -673,7 +704,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { s.register(sd, ss) } -func (s *Server) register(sd *ServiceDesc, ss interface{}) { +func (s *Server) register(sd *ServiceDesc, ss any) { s.mu.Lock() defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) @@ -714,7 +745,7 @@ type MethodInfo struct { type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. - Metadata interface{} + Metadata any } // GetServiceInfo returns a map from service names to ServiceInfo. @@ -895,7 +926,7 @@ func (s *Server) drainServerTransports(addr string) { s.mu.Lock() conns := s.conns[addr] for st := range conns { - st.Drain() + st.Drain("") } s.mu.Unlock() } @@ -915,6 +946,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { InitialConnWindowSize: s.opts.initialConnWindowSize, WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, + SharedWriteBuffer: s.opts.sharedWriteBuffer, ChannelzParentID: s.channelzID, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, @@ -943,21 +975,26 @@ func (s *Server) serveStreams(st transport.ServerTransport) { defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) st.HandleStreams(func(stream *transport.Stream) { wg.Add(1) + + streamQuota.acquire() + f := func() { + defer streamQuota.release() + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + } + if s.opts.numServerWorkers > 0 { - data := &serverWorkerData{st: st, wg: &wg, stream: stream} select { - case s.serverWorkerChannel <- data: + case s.serverWorkerChannel <- f: return default: // If all stream workers are busy, fallback to the default code path. } } - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() + go f() }, func(ctx context.Context, method string) context.Context { if !EnableTracing { return ctx @@ -1046,7 +1083,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool { if s.drain { // Transport added after we drained our existing conns: drain it // immediately. - st.Drain() + st.Drain("") } if s.conns[addr] == nil { @@ -1096,7 +1133,7 @@ func (s *Server) incrCallsFailed() { atomic.AddInt64(&s.czData.callsFailed, 1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) @@ -1143,7 +1180,7 @@ func chainUnaryServerInterceptors(s *Server) { } func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) } } @@ -1152,7 +1189,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info if curr == len(interceptors)-1 { return finalHandler } - return func(ctx context.Context, req interface{}) (interface{}, error) { + return func(ctx context.Context, req any) (any, error) { return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1189,7 +1226,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. defer func() { if trInfo != nil { if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } trInfo.tr.Finish() @@ -1296,7 +1333,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } - d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) @@ -1306,7 +1343,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if channelz.IsOn() { t.IncrMsgRecv() } - df := func(v interface{}) error { + df := func(v any) error { if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } @@ -1470,7 +1507,7 @@ func chainStreamServerInterceptors(s *Server) { } func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { - return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) } } @@ -1479,7 +1516,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf if curr == len(interceptors)-1 { return finalHandler } - return func(srv interface{}, stream ServerStream) error { + return func(srv any, stream ServerStream) error { return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1506,7 +1543,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ctx: ctx, t: t, s: stream, - p: &parser{r: stream}, + p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1520,7 +1557,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if trInfo != nil { ss.mu.Lock() if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } ss.trInfo.tr.Finish() @@ -1623,7 +1660,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp trInfo.tr.LazyLog(&trInfo.firstLine, false) } var appErr error - var server interface{} + var server any if info != nil { server = info.serviceImpl } @@ -1689,13 +1726,13 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str pos := strings.LastIndex(sm, "/") if pos == -1 { if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) @@ -1736,7 +1773,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) @@ -1856,7 +1893,7 @@ func (s *Server) GracefulStop() { if !s.drain { for _, conns := range s.conns { for st := range conns { - st.Drain() + st.Drain("graceful_stop") } } s.drain = true @@ -2052,3 +2089,34 @@ func validateSendCompressor(name, clientCompressors string) error { } return fmt.Errorf("client does not support compressor %q", name) } + +// atomicSemaphore implements a blocking, counting semaphore. acquire should be +// called synchronously; release may be called asynchronously. +type atomicSemaphore struct { + n atomic.Int64 + wait chan struct{} +} + +func (q *atomicSemaphore) acquire() { + if q.n.Add(-1) < 0 { + // We ran out of quota. Block until a release happens. + <-q.wait + } +} + +func (q *atomicSemaphore) release() { + // N.B. the "<= 0" check below should allow for this to work with multiple + // concurrent calls to acquire, but also note that with synchronous calls to + // acquire, as our system does, n will never be less than -1. There are + // fairness issues (queuing) to consider if this was to be generalized. + if q.n.Add(1) <= 0 { + // An acquire was waiting on us. Unblock it. + q.wait <- struct{}{} + } +} + +func newHandlerQuota(n uint32) *atomicSemaphore { + a := &atomicSemaphore{wait: make(chan struct{}, 1)} + a.n.Store(int64(n)) + return a +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index f22acace..0df11fc0 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -23,8 +23,6 @@ import ( "errors" "fmt" "reflect" - "strconv" - "strings" "time" "google.golang.org/grpc/codes" @@ -106,8 +104,8 @@ type healthCheckConfig struct { type jsonRetryPolicy struct { MaxAttempts int - InitialBackoff string - MaxBackoff string + InitialBackoff internalserviceconfig.Duration + MaxBackoff internalserviceconfig.Duration BackoffMultiplier float64 RetryableStatusCodes []codes.Code } @@ -129,50 +127,6 @@ type retryThrottlingPolicy struct { TokenRatio float64 } -func parseDuration(s *string) (*time.Duration, error) { - if s == nil { - return nil, nil - } - if !strings.HasSuffix(*s, "s") { - return nil, fmt.Errorf("malformed duration %q", *s) - } - ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) - if len(ss) > 2 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - // hasDigits is set if either the whole or fractional part of the number is - // present, since both are optional but one is required. - hasDigits := false - var d time.Duration - if len(ss[0]) > 0 { - i, err := strconv.ParseInt(ss[0], 10, 32) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - d = time.Duration(i) * time.Second - hasDigits = true - } - if len(ss) == 2 && len(ss[1]) > 0 { - if len(ss[1]) > 9 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - f, err := strconv.ParseInt(ss[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - for i := 9; i > len(ss[1]); i-- { - f *= 10 - } - d += time.Duration(f) - hasDigits = true - } - if !hasDigits { - return nil, fmt.Errorf("malformed duration %q", *s) - } - - return &d, nil -} - type jsonName struct { Service string Method string @@ -201,7 +155,7 @@ func (j jsonName) generatePath() (string, error) { type jsonMC struct { Name *[]jsonName WaitForReady *bool - Timeout *string + Timeout *internalserviceconfig.Duration MaxRequestMessageBytes *int64 MaxResponseMessageBytes *int64 RetryPolicy *jsonRetryPolicy @@ -252,15 +206,10 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { if m.Name == nil { continue } - d, err := parseDuration(m.Timeout) - if err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) - return &serviceconfig.ParseResult{Err: err} - } mc := MethodConfig{ WaitForReady: m.WaitForReady, - Timeout: d, + Timeout: (*time.Duration)(m.Timeout), } if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) @@ -312,18 +261,10 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol if jrp == nil { return nil, nil } - ib, err := parseDuration(&jrp.InitialBackoff) - if err != nil { - return nil, err - } - mb, err := parseDuration(&jrp.MaxBackoff) - if err != nil { - return nil, err - } if jrp.MaxAttempts <= 1 || - *ib <= 0 || - *mb <= 0 || + jrp.InitialBackoff <= 0 || + jrp.MaxBackoff <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) @@ -332,8 +273,8 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol rp := &internalserviceconfig.RetryPolicy{ MaxAttempts: jrp.MaxAttempts, - InitialBackoff: *ib, - MaxBackoff: *mb, + InitialBackoff: time.Duration(jrp.InitialBackoff), + MaxBackoff: time.Duration(jrp.MaxBackoff), BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go new file mode 100644 index 00000000..48a64cfe --- /dev/null +++ b/vendor/google.golang.org/grpc/shared_buffer_pool.go @@ -0,0 +1,154 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import "sync" + +// SharedBufferPool is a pool of buffers that can be shared, resulting in +// decreased memory allocation. Currently, in gRPC-go, it is only utilized +// for parsing incoming messages. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +type SharedBufferPool interface { + // Get returns a buffer with specified length from the pool. + // + // The returned byte slice may be not zero initialized. + Get(length int) []byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +// NewSharedBufferPool creates a simple SharedBufferPool with buckets +// of different sizes to optimize memory usage. This prevents the pool from +// wasting large amounts of memory, even when handling messages of varying sizes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewSharedBufferPool() SharedBufferPool { + return &simpleSharedBufferPool{ + pools: [poolArraySize]simpleSharedBufferChildPool{ + newBytesPool(level0PoolMaxSize), + newBytesPool(level1PoolMaxSize), + newBytesPool(level2PoolMaxSize), + newBytesPool(level3PoolMaxSize), + newBytesPool(level4PoolMaxSize), + newBytesPool(0), + }, + } +} + +// simpleSharedBufferPool is a simple implementation of SharedBufferPool. +type simpleSharedBufferPool struct { + pools [poolArraySize]simpleSharedBufferChildPool +} + +func (p *simpleSharedBufferPool) Get(size int) []byte { + return p.pools[p.poolIdx(size)].Get(size) +} + +func (p *simpleSharedBufferPool) Put(bs *[]byte) { + p.pools[p.poolIdx(cap(*bs))].Put(bs) +} + +func (p *simpleSharedBufferPool) poolIdx(size int) int { + switch { + case size <= level0PoolMaxSize: + return level0PoolIdx + case size <= level1PoolMaxSize: + return level1PoolIdx + case size <= level2PoolMaxSize: + return level2PoolIdx + case size <= level3PoolMaxSize: + return level3PoolIdx + case size <= level4PoolMaxSize: + return level4PoolIdx + default: + return levelMaxPoolIdx + } +} + +const ( + level0PoolMaxSize = 16 // 16 B + level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B + level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB + level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB + level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB +) + +const ( + level0PoolIdx = iota + level1PoolIdx + level2PoolIdx + level3PoolIdx + level4PoolIdx + levelMaxPoolIdx + poolArraySize +) + +type simpleSharedBufferChildPool interface { + Get(size int) []byte + Put(any) +} + +type bufferPool struct { + sync.Pool + + defaultSize int +} + +func (p *bufferPool) Get(size int) []byte { + bs := p.Pool.Get().(*[]byte) + + if cap(*bs) < size { + p.Pool.Put(bs) + + return make([]byte, size) + } + + return (*bs)[:size] +} + +func newBytesPool(size int) simpleSharedBufferChildPool { + return &bufferPool{ + Pool: sync.Pool{ + New: func() any { + bs := make([]byte, size) + return &bs + }, + }, + defaultSize: size, + } +} + +// nopBufferPool is a buffer pool just makes new buffer without pooling. +type nopBufferPool struct { +} + +func (nopBufferPool) Get(length int) []byte { + return make([]byte, length) +} + +func (nopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 7a552a9b..4ab70e2d 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -59,12 +59,22 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} +// PickerUpdated indicates that the LB policy provided a new picker while the +// RPC was waiting for one. +type PickerUpdated struct{} + +// IsClient indicates if the stats information is from client side. Only Client +// Side interfaces with a Picker, thus always returns true. +func (*PickerUpdated) IsClient() bool { return true } + +func (*PickerUpdated) isRPCStats() {} + // InPayload contains the information for an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte @@ -134,7 +144,7 @@ type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte // Length is the size of the uncompressed payload data. Does not include any diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 53910fb7..a93360ef 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -50,7 +50,7 @@ func New(c codes.Code, msg string) *Status { } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -60,7 +60,7 @@ func Error(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Error(c, fmt.Sprintf(format, a...)) } @@ -77,11 +77,18 @@ func FromProto(s *spb.Status) *Status { // FromError returns a Status representation of err. // // - If err was produced by this package or implements the method `GRPCStatus() -// *Status`, or if err wraps a type satisfying this, the appropriate Status is -// returned. For wrapped errors, the message returned contains the entire -// err.Error() text and not just the wrapped status. +// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type +// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped +// errors, the message returned contains the entire err.Error() text and not +// just the wrapped status. In that case, ok is true. // -// - If err is nil, a Status is returned with codes.OK and no message. +// - If err is nil, a Status is returned with codes.OK and no message, and ok +// is true. +// +// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` +// returns nil (which maps to Codes.OK), or if err wraps a type +// satisfying this, a Status is returned with codes.Unknown and err's +// Error() message, and ok is false. // // - Otherwise, err is an error not compatible with this package. In this // case, a Status is returned with codes.Unknown and err's Error() message, @@ -92,11 +99,27 @@ func FromError(err error) (s *Status, ok bool) { } type grpcstatus interface{ GRPCStatus() *Status } if gs, ok := err.(grpcstatus); ok { - return gs.GRPCStatus(), true + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error has status nil, which maps to codes.OK. There + // is no sensible behavior for this, so we turn it into + // an error with codes.Unknown and discard the existing + // status. + return New(codes.Unknown, err.Error()), false + } + return grpcStatus, true } var gs grpcstatus if errors.As(err, &gs) { - p := gs.GRPCStatus().Proto() + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error wraps an error that has status nil, which maps + // to codes.OK. There is no sensible behavior for this, + // so we turn it into an error with codes.Unknown and + // discard the existing status. + return New(codes.Unknown, err.Error()), false + } + p := grpcStatus.Proto() p.Message = err.Error() return status.FromProto(p), true } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index d1226a41..b14b2fbe 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" @@ -54,7 +55,7 @@ import ( // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type StreamHandler func(srv interface{}, stream ServerStream) error +type StreamHandler func(srv any, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used // on the server when registering services and on the client when initiating @@ -79,9 +80,9 @@ type Stream interface { // Deprecated: See ClientStream and ServerStream documentation instead. Context() context.Context // Deprecated: See ClientStream and ServerStream documentation instead. - SendMsg(m interface{}) error + SendMsg(m any) error // Deprecated: See ClientStream and ServerStream documentation instead. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientStream defines the client-side behavior of a streaming RPC. @@ -90,7 +91,9 @@ type Stream interface { // status package. type ClientStream interface { // Header returns the header metadata received from the server if there - // is any. It blocks if the metadata is not ready to read. + // is any. It blocks if the metadata is not ready to read. If the metadata + // is nil and the error is also nil, then the stream was terminated without + // headers, and the status can be discovered by calling RecvMsg. Header() (metadata.MD, error) // Trailer returns the trailer metadata from the server, if there is any. // It must only be called after stream.CloseAndRecv has returned, or @@ -123,7 +126,10 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m interface{}) error + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -132,7 +138,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // NewStream creates a new Stream for the client side. This is typically @@ -168,6 +174,16 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + // Start tracking the RPC for idleness purposes. This is where a stream is + // created for both streaming and unary RPCs, and hence is a good place to + // track active RPC count. + if err := cc.idlenessMgr.OnCallBegin(); err != nil { + return nil, err + } + // Add a calloption, to decrement the active call count, that gets executed + // when the RPC completes. + opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { // validate md if err := imetadata.Validate(md); err != nil { @@ -425,7 +441,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.URL.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( @@ -469,7 +485,7 @@ func (a *csAttempt) newStream() error { // It is safe to overwrite the csAttempt's context here, since all state // maintained in it are local to the attempt. When the attempt has to be // retried, a new instance of csAttempt will be created. - if a.pickResult.Metatada != nil { + if a.pickResult.Metadata != nil { // We currently do not have a function it the metadata package which // merges given metadata with existing metadata in a context. Existing // function `AppendToOutgoingContext()` takes a variadic argument of key @@ -479,7 +495,7 @@ func (a *csAttempt) newStream() error { // in a form passable to AppendToOutgoingContext(), or create a version // of AppendToOutgoingContext() that accepts a metadata.MD. md, _ := metadata.FromOutgoingContext(a.ctx) - md = metadata.Join(md, a.pickResult.Metatada) + md = metadata.Join(md, a.pickResult.Metadata) a.ctx = metadata.NewOutgoingContext(a.ctx, md) } @@ -499,7 +515,7 @@ func (a *csAttempt) newStream() error { return toRPCErr(nse.Err) } a.s = s - a.p = &parser{r: s} + a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} return nil } @@ -780,23 +796,24 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD - noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() - if err == transport.ErrNoHeaders { - noHeader = true - return nil - } return toRPCErr(err) }, cs.commitAttemptLocked) + if m == nil && err == nil { + // The stream ended with success. Finish the clientStream. + err = io.EOF + } + if err != nil { cs.finish(err) - return nil, err + // Do not return the error. The user should get it by calling Recv(). + return nil, nil } - if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { // Only log if binary log is on and header has not been logged, and // there is actually headers to log. logEntry := &binarylog.ServerHeader{ @@ -812,6 +829,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { binlog.Log(cs.ctx, logEntry) } } + return m, nil } @@ -852,7 +870,7 @@ func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error cs.buffer = append(cs.buffer, op) } -func (cs *clientStream) SendMsg(m interface{}) (err error) { +func (cs *clientStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -896,7 +914,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { return err } -func (cs *clientStream) RecvMsg(m interface{}) error { +func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() @@ -920,24 +938,6 @@ func (cs *clientStream) RecvMsg(m interface{}) error { if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - - if len(cs.binlogs) != 0 { - // finish will not log Trailer. Log Trailer here. - logEntry := &binarylog.ServerTrailer{ - OnClientSide: true, - Trailer: cs.Trailer(), - Err: err, - } - if logEntry.Err == io.EOF { - logEntry.Err = nil - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, logEntry) - } - } } return err } @@ -993,18 +993,30 @@ func (cs *clientStream) finish(err error) { } } } + cs.mu.Unlock() - // For binary logging. only log cancel in finish (could be caused by RPC ctx - // canceled or ClientConn closed). Trailer will be logged in RecvMsg. - // - // Only one of cancel or trailer needs to be logged. In the cases where - // users don't call RecvMsg, users must have already canceled the RPC. - if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { - c := &binarylog.Cancel{ - OnClientSide: true, - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, c) + // Only one of cancel or trailer needs to be logged. + if len(cs.binlogs) != 0 { + switch err { + case errContextCanceled, errContextDeadline, ErrClientConnClosing: + c := &binarylog.Cancel{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } + default: + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } } if err == nil { @@ -1020,7 +1032,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1047,7 +1059,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { return nil } -func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { +func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} @@ -1262,17 +1274,22 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s} + as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { - // Listen on cc and stream contexts to cleanup when the user closes the - // ClientConn or cancels the stream context. In all other cases, an error - // should already be injected into the recv buffer by the transport, which - // the client will eventually receive, and then we will cancel the stream's - // context in clientStream.finish. + // Listen on stream context to cleanup when the stream context is + // canceled. Also listen for the addrConn's context in case the + // addrConn is closed or reconnects to a different address. In all + // other cases, an error should already be injected into the recv + // buffer by the transport, which the client will eventually receive, + // and then we will cancel the stream's context in + // addrConnStream.finish. go func() { + ac.mu.Lock() + acCtx := ac.ctx + ac.mu.Unlock() select { - case <-ac.ctx.Done(): + case <-acCtx.Done(): as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) case <-ctx.Done(): as.finish(toRPCErr(ctx.Err())) @@ -1335,7 +1352,7 @@ func (as *addrConnStream) Context() context.Context { return as.s.Context() } -func (as *addrConnStream) SendMsg(m interface{}) (err error) { +func (as *addrConnStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -1380,7 +1397,7 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) { return nil } -func (as *addrConnStream) RecvMsg(m interface{}) (err error) { +func (as *addrConnStream) RecvMsg(m any) (err error) { defer func() { if err != nil || !as.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. @@ -1499,7 +1516,7 @@ type ServerStream interface { // // It is not safe to modify the message after calling SendMsg. Tracing // libraries and stats handlers may use the message lazily. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On // any non-EOF error, the stream is aborted and the error contains the @@ -1508,7 +1525,7 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // serverStream implements a server side Stream. @@ -1589,7 +1606,7 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { ss.s.SetTrailer(md) } -func (ss *serverStream) SendMsg(m interface{}) (err error) { +func (ss *serverStream) SendMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1597,7 +1614,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) } else { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1664,7 +1681,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { return nil } -func (ss *serverStream) RecvMsg(m interface{}) (err error) { +func (ss *serverStream) RecvMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1672,7 +1689,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) } else if err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1744,7 +1761,7 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { // prepareMsg returns the hdr, payload and data // using the compressors passed or using the // passed preparedmsg -func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil } diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 07a2d26b..9ded7932 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -97,8 +97,8 @@ func truncate(x string, l int) string { // payload represents an RPC request or response payload. type payload struct { - sent bool // whether this is an outgoing payload - msg interface{} // e.g. a proto.Message + sent bool // whether this is an outgoing payload + msg any // e.g. a proto.Message // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? } @@ -111,7 +111,7 @@ func (p payload) String() string { type fmtStringer struct { format string - a []interface{} + a []any } func (f *fmtStringer) String() string { diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 853ce0e3..724ad210 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.55.0" +const Version = "1.58.3" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index a8e4732b..bbc9e2e3 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -84,6 +84,9 @@ not git grep -l 'x/net/context' -- "*.go" # thread safety. git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' +# - Do not use "interface{}"; use "any" instead. +git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' + # - Do not call grpclog directly. Use grpclog.Component instead. git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' @@ -106,7 +109,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do goimports -l . 2>&1 | not grep -vE "\.pb\.go" golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" - go mod tidy -compat=1.17 + go mod tidy -compat=1.19 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) popd @@ -168,8 +171,6 @@ proto.RegisteredExtension is deprecated proto.RegisteredExtensions is deprecated proto.RegisterMapType is deprecated proto.Unmarshaler is deprecated -resolver.Backend -resolver.GRPCLB Target is deprecated: Use the Target field in the BuildOptions instead. xxx_messageInfo_ ' "${SC_OUT}" diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index d09d22e1..66b95870 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -106,13 +106,19 @@ func (o MarshalOptions) Format(m proto.Message) string { // MarshalOptions. Do not depend on the output being stable. It may change over // time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { - return o.marshal(m) + return o.marshal(nil, m) +} + +// MarshalAppend appends the JSON format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) } // marshal is a centralized function that all marshal operations go through. // For profiling purposes, avoid changing the name of this function or // introducing other code paths for marshal that do not go through this. -func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { if o.Multiline && o.Indent == "" { o.Indent = defaultIndent } @@ -120,7 +126,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { o.Resolver = protoregistry.GlobalTypes } - internalEnc, err := json.NewEncoder(o.Indent) + internalEnc, err := json.NewEncoder(b, o.Indent) if err != nil { return nil, err } @@ -128,7 +134,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { // Treat nil message interface as an empty message, // in which case the output in an empty JSON object. if m == nil { - return []byte("{}"), nil + return append(b, '{', '}'), nil } enc := encoder{internalEnc, o} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index ebf6c652..722a7b41 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -101,13 +101,19 @@ func (o MarshalOptions) Format(m proto.Message) string { // MarshalOptions object. Do not depend on the output being stable. It may // change over time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { - return o.marshal(m) + return o.marshal(nil, m) +} + +// MarshalAppend appends the textproto format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) } // marshal is a centralized function that all marshal operations go through. // For profiling purposes, avoid changing the name of this function or // introducing other code paths for marshal that do not go through this. -func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { var delims = [2]byte{'{', '}'} if o.Multiline && o.Indent == "" { @@ -117,7 +123,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { o.Resolver = protoregistry.GlobalTypes } - internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII) + internalEnc, err := text.NewEncoder(b, o.Indent, delims, o.EmitASCII) if err != nil { return nil, err } @@ -125,7 +131,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { // Treat nil message interface as an empty message, // in which case there is nothing to output. if m == nil { - return []byte{}, nil + return b, nil } enc := encoder{internalEnc, o} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go index fbdf3487..934f2dcb 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go @@ -41,8 +41,10 @@ type Encoder struct { // // If indent is a non-empty string, it causes every entry for an Array or Object // to be preceded by the indent and trailed by a newline. -func NewEncoder(indent string) (*Encoder, error) { - e := &Encoder{} +func NewEncoder(buf []byte, indent string) (*Encoder, error) { + e := &Encoder{ + out: buf, + } if len(indent) > 0 { if strings.Trim(indent, " \t") != "" { return nil, errors.New("indent may only be composed of space or tab characters") @@ -176,13 +178,13 @@ func appendFloat(out []byte, n float64, bitSize int) []byte { // WriteInt writes out the given signed integer in JSON number value. func (e *Encoder) WriteInt(n int64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatInt(n, 10)...) + e.out = strconv.AppendInt(e.out, n, 10) } // WriteUint writes out the given unsigned integer in JSON number value. func (e *Encoder) WriteUint(n uint64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatUint(n, 10)...) + e.out = strconv.AppendUint(e.out, n, 10) } // StartObject writes out the '{' symbol. diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go index da289ccc..cf7aed77 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -53,8 +53,10 @@ type encoderState struct { // If outputASCII is true, strings will be serialized in such a way that // multi-byte UTF-8 sequences are escaped. This property ensures that the // overall output is ASCII (as opposed to UTF-8). -func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { - e := &Encoder{} +func NewEncoder(buf []byte, indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { + e := &Encoder{ + encoderState: encoderState{out: buf}, + } if len(indent) > 0 { if strings.Trim(indent, " \t") != "" { return nil, errors.New("indent may only be composed of space and tab characters") @@ -195,13 +197,13 @@ func appendFloat(out []byte, n float64, bitSize int) []byte { // WriteInt writes out the given signed integer value. func (e *Encoder) WriteInt(n int64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatInt(n, 10)...) + e.out = strconv.AppendInt(e.out, n, 10) } // WriteUint writes out the given unsigned integer value. func (e *Encoder) WriteUint(n uint64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatUint(n, 10)...) + e.out = strconv.AppendUint(e.out, n, 10) } // WriteLiteral writes out the given string as a literal value without quotes. diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 5c0e8f73..136f1b21 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -183,13 +183,58 @@ const ( // Field names for google.protobuf.ExtensionRangeOptions. const ( ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration" + ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification" ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" + ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration" + ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification" ) // Field numbers for google.protobuf.ExtensionRangeOptions. const ( ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3 +) + +// Full and short names for google.protobuf.ExtensionRangeOptions.VerificationState. +const ( + ExtensionRangeOptions_VerificationState_enum_fullname = "google.protobuf.ExtensionRangeOptions.VerificationState" + ExtensionRangeOptions_VerificationState_enum_name = "VerificationState" +) + +// Names for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration" + ExtensionRangeOptions_Declaration_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration" +) + +// Field names for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" + ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" + ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" + ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated" + ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" + ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" + + ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" + ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" + ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" + ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated" + ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" + ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" +) + +// Field numbers for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 + ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 + ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4 + ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 + ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.FieldDescriptorProto. @@ -540,6 +585,7 @@ const ( FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" FieldOptions_Retention_field_name protoreflect.Name = "retention" FieldOptions_Target_field_name protoreflect.Name = "target" + FieldOptions_Targets_field_name protoreflect.Name = "targets" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -552,6 +598,7 @@ const ( FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" + FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -567,6 +614,7 @@ const ( FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 FieldOptions_Target_field_number protoreflect.FieldNumber = 18 + FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go index 3bc71013..e0f75fea 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -32,6 +32,7 @@ const ( Type_Options_field_name protoreflect.Name = "options" Type_SourceContext_field_name protoreflect.Name = "source_context" Type_Syntax_field_name protoreflect.Name = "syntax" + Type_Edition_field_name protoreflect.Name = "edition" Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name" Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields" @@ -39,6 +40,7 @@ const ( Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options" Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context" Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax" + Type_Edition_field_fullname protoreflect.FullName = "google.protobuf.Type.edition" ) // Field numbers for google.protobuf.Type. @@ -49,6 +51,7 @@ const ( Type_Options_field_number protoreflect.FieldNumber = 4 Type_SourceContext_field_number protoreflect.FieldNumber = 5 Type_Syntax_field_number protoreflect.FieldNumber = 6 + Type_Edition_field_number protoreflect.FieldNumber = 7 ) // Names for google.protobuf.Field. @@ -121,12 +124,14 @@ const ( Enum_Options_field_name protoreflect.Name = "options" Enum_SourceContext_field_name protoreflect.Name = "source_context" Enum_Syntax_field_name protoreflect.Name = "syntax" + Enum_Edition_field_name protoreflect.Name = "edition" Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name" Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue" Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options" Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context" Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax" + Enum_Edition_field_fullname protoreflect.FullName = "google.protobuf.Enum.edition" ) // Field numbers for google.protobuf.Enum. @@ -136,6 +141,7 @@ const ( Enum_Options_field_number protoreflect.FieldNumber = 3 Enum_SourceContext_field_number protoreflect.FieldNumber = 4 Enum_Syntax_field_number protoreflect.FieldNumber = 5 + Enum_Edition_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.EnumValue. diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go index 33745ed0..dea522e1 100644 --- a/vendor/google.golang.org/protobuf/internal/order/order.go +++ b/vendor/google.golang.org/protobuf/internal/order/order.go @@ -33,7 +33,7 @@ var ( return !inOneof(ox) && inOneof(oy) } // Fields in disjoint oneof sets are sorted by declaration index. - if ox != nil && oy != nil && ox != oy { + if inOneof(ox) && inOneof(oy) && ox != oy { return ox.Index() < oy.Index() } // Fields sorted by field number. diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index f7014cd5..0999f29d 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,7 +51,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 30 + Minor = 31 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go index 554b9c6c..f1692b49 100644 --- a/vendor/google.golang.org/protobuf/proto/size.go +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -73,23 +73,27 @@ func (o MarshalOptions) sizeField(fd protoreflect.FieldDescriptor, value protore } func (o MarshalOptions) sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) { + sizeTag := protowire.SizeTag(num) + if fd.IsPacked() && list.Len() > 0 { content := 0 for i, llen := 0, list.Len(); i < llen; i++ { content += o.sizeSingular(num, fd.Kind(), list.Get(i)) } - return protowire.SizeTag(num) + protowire.SizeBytes(content) + return sizeTag + protowire.SizeBytes(content) } for i, llen := 0, list.Len(); i < llen; i++ { - size += protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), list.Get(i)) + size += sizeTag + o.sizeSingular(num, fd.Kind(), list.Get(i)) } return size } func (o MarshalOptions) sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) { + sizeTag := protowire.SizeTag(num) + mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool { - size += protowire.SizeTag(num) + size += sizeTag size += protowire.SizeBytes(o.sizeField(fd.MapKey(), key.Value()) + o.sizeField(fd.MapValue(), value)) return true }) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 54ce326d..717b106f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -363,6 +363,8 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendSingularField(b, "retention", nil) case 18: b = p.appendSingularField(b, "target", nil) + case 19: + b = p.appendRepeatedField(b, "targets", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -418,6 +420,10 @@ func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { switch (*p)[0] { case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + case 2: + b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration) + case 3: + b = p.appendSingularField(b, "verification", nil) } return b } @@ -473,3 +479,24 @@ func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { } return b } + +func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "number", nil) + case 2: + b = p.appendSingularField(b, "full_name", nil) + case 3: + b = p.appendSingularField(b, "type", nil) + case 4: + b = p.appendSingularField(b, "is_repeated", nil) + case 5: + b = p.appendSingularField(b, "reserved", nil) + case 6: + b = p.appendSingularField(b, "repeated", nil) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index dac5671d..04c00f73 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -48,6 +48,64 @@ import ( sync "sync" ) +// The verification state of the extension range. +type ExtensionRangeOptions_VerificationState int32 + +const ( + // All the extensions of the range must be declared. + ExtensionRangeOptions_DECLARATION ExtensionRangeOptions_VerificationState = 0 + ExtensionRangeOptions_UNVERIFIED ExtensionRangeOptions_VerificationState = 1 +) + +// Enum value maps for ExtensionRangeOptions_VerificationState. +var ( + ExtensionRangeOptions_VerificationState_name = map[int32]string{ + 0: "DECLARATION", + 1: "UNVERIFIED", + } + ExtensionRangeOptions_VerificationState_value = map[string]int32{ + "DECLARATION": 0, + "UNVERIFIED": 1, + } +) + +func (x ExtensionRangeOptions_VerificationState) Enum() *ExtensionRangeOptions_VerificationState { + p := new(ExtensionRangeOptions_VerificationState) + *p = x + return p +} + +func (x ExtensionRangeOptions_VerificationState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *ExtensionRangeOptions_VerificationState) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = ExtensionRangeOptions_VerificationState(num) + return nil +} + +// Deprecated: Use ExtensionRangeOptions_VerificationState.Descriptor instead. +func (ExtensionRangeOptions_VerificationState) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3, 0} +} + type FieldDescriptorProto_Type int32 const ( @@ -137,11 +195,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[0] + return &file_google_protobuf_descriptor_proto_enumTypes[1] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -197,11 +255,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -258,11 +316,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -288,7 +346,13 @@ type FieldOptions_CType int32 const ( // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_STRING FieldOptions_CType = 0 + // The option [ctype=CORD] may be applied to a non-repeated field of type + // "bytes". It indicates that in C++, the data should be stored in a Cord + // instead of a string. For very large strings, this may reduce memory + // fragmentation. It may also allow better performance when parsing from a + // Cord, or when parsing with aliasing enabled, as the parsed Cord may then + // alias the original buffer. FieldOptions_CORD FieldOptions_CType = 1 FieldOptions_STRING_PIECE FieldOptions_CType = 2 ) @@ -318,11 +382,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -380,11 +444,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -442,11 +506,11 @@ func (x FieldOptions_OptionRetention) String() string { } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -526,11 +590,11 @@ func (x FieldOptions_OptionTargetType) String() string { } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -588,11 +652,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -652,11 +716,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -1015,7 +1079,21 @@ type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` -} + // go/protobuf-stripping-extension-declarations + // Like Metadata, but we use a repeated field to hold all extension + // declarations. This should avoid the size increases of transforming a large + // extension range into small ranges in generated binaries. + Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"` + // The verification state of the range. + // TODO(b/278783756): flip the default to DECLARATION once all empty ranges + // are marked as UNVERIFIED. + Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` +} + +// Default values for ExtensionRangeOptions fields. +const ( + Default_ExtensionRangeOptions_Verification = ExtensionRangeOptions_UNVERIFIED +) func (x *ExtensionRangeOptions) Reset() { *x = ExtensionRangeOptions{} @@ -1056,6 +1134,20 @@ func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption return nil } +func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declaration { + if x != nil { + return x.Declaration + } + return nil +} + +func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState { + if x != nil && x.Verification != nil { + return *x.Verification + } + return Default_ExtensionRangeOptions_Verification +} + // Describes a field within a message. type FieldDescriptorProto struct { state protoimpl.MessageState @@ -2046,8 +2138,10 @@ type FieldOptions struct { // The ctype option instructs the C++ code generator to use a different // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! + // options below. This option is only implemented to support use of + // [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of + // type "bytes" in the open source release -- sorry, we'll try to include + // other types in a future version! Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` // The packed option can be enabled for repeated primitive fields to enable // a more efficient representation on the wire. Rather than repeatedly @@ -2111,9 +2205,11 @@ type FieldOptions struct { Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. - DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` - Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` - Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` + Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2224,6 +2320,7 @@ func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { return FieldOptions_RETENTION_UNKNOWN } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { if x != nil && x.Target != nil { return *x.Target @@ -2231,6 +2328,13 @@ func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { return FieldOptions_TARGET_TYPE_UNKNOWN } +func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { + if x != nil { + return x.Targets + } + return nil +} + func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2960,6 +3064,108 @@ func (x *DescriptorProto_ReservedRange) GetEnd() int32 { return 0 } +type ExtensionRangeOptions_Declaration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The extension number declared within the extension range. + Number *int32 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"` + // The fully-qualified name of the extension field. There must be a leading + // dot in front of the full name. + FullName *string `protobuf:"bytes,2,opt,name=full_name,json=fullName" json:"full_name,omitempty"` + // The fully-qualified type name of the extension field. Unlike + // Metadata.type, Declaration.type must have a leading dot for messages + // and enums. + Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"` + // Deprecated. Please use "repeated". + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + IsRepeated *bool `protobuf:"varint,4,opt,name=is_repeated,json=isRepeated" json:"is_repeated,omitempty"` + // If true, indicates that the number is reserved in the extension range, + // and any extension field with the number will fail to compile. Set this + // when a declared extension field is deleted. + Reserved *bool `protobuf:"varint,5,opt,name=reserved" json:"reserved,omitempty"` + // If true, indicates that the extension must be defined as repeated. + // Otherwise the extension must be defined as optional. + Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"` +} + +func (x *ExtensionRangeOptions_Declaration) Reset() { + *x = ExtensionRangeOptions_Declaration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRangeOptions_Declaration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} + +func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRangeOptions_Declaration.ProtoReflect.Descriptor instead. +func (*ExtensionRangeOptions_Declaration) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *ExtensionRangeOptions_Declaration) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *ExtensionRangeOptions_Declaration) GetFullName() string { + if x != nil && x.FullName != nil { + return *x.FullName + } + return "" +} + +func (x *ExtensionRangeOptions_Declaration) GetType() string { + if x != nil && x.Type != nil { + return *x.Type + } + return "" +} + +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *ExtensionRangeOptions_Declaration) GetIsRepeated() bool { + if x != nil && x.IsRepeated != nil { + return *x.IsRepeated + } + return false +} + +func (x *ExtensionRangeOptions_Declaration) GetReserved() bool { + if x != nil && x.Reserved != nil { + return *x.Reserved + } + return false +} + +func (x *ExtensionRangeOptions_Declaration) GetRepeated() bool { + if x != nil && x.Repeated != nil { + return *x.Repeated + } + return false +} + // Range of reserved numeric values. Reserved values may not be used by // entries in the same enum. Reserved ranges may not overlap. // @@ -2978,7 +3184,7 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2991,7 +3197,7 @@ func (x *EnumDescriptorProto_EnumReservedRange) String() string { func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3038,7 +3244,7 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3051,7 +3257,7 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3182,7 +3388,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3195,7 +3401,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3269,7 +3475,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3282,7 +3488,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3436,264 +3642,296 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, - 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, - 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0xad, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, + 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x68, + 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x0a, + 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xb3, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, + 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, + 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x34, + 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, + 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, - 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, - 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, - 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, - 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, - 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, - 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, - 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, - 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, - 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, - 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, - 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, - 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, - 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, - 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, - 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, - 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, - 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, - 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, + 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, + 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, + 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, + 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, + 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, + 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, + 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, + 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, + 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, + 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, + 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, + 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, + 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, + 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, + 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, + 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, + 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, + 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, + 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, + 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, + 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, + 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, + 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, + 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, + 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, + 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, + 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, + 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, + 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, + 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, + 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, + 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, + 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, - 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, - 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, - 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, - 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, - 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, - 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, - 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, - 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, - 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, - 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, - 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, - 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, - 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, - 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, - 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, - 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, - 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, - 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, - 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, - 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, - 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, - 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, - 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, - 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, - 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, - 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, - 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, - 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, - 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, 0x03, 0x0a, - 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, - 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, - 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, - 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, - 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, + 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, + 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, + 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, + 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, + 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, + 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, + 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, + 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, + 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, + 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, + 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, - 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xb7, 0x08, 0x0a, 0x0c, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, - 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, - 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x12, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, + 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, + 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, + 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, + 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, + 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, + 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x85, 0x09, 0x0a, + 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, + 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, + 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, + 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, + 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, + 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, + 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, + 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, @@ -3885,98 +4123,103 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 9) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 10) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 28) var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ - (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 1: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 5: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 6: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 7: google.protobuf.MethodOptions.IdempotencyLevel - (GeneratedCodeInfo_Annotation_Semantic)(0), // 8: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 9: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 10: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 11: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 12: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 13: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 14: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 15: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 16: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 17: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 18: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 19: google.protobuf.FileOptions - (*MessageOptions)(nil), // 20: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 21: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 22: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 23: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 24: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 25: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 26: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 27: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 28: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 29: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 30: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 31: google.protobuf.DescriptorProto.ReservedRange - (*EnumDescriptorProto_EnumReservedRange)(nil), // 32: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 33: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 34: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 35: google.protobuf.GeneratedCodeInfo.Annotation + (ExtensionRangeOptions_VerificationState)(0), // 0: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 1: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 2: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 3: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 4: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 5: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 6: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 7: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 8: google.protobuf.MethodOptions.IdempotencyLevel + (GeneratedCodeInfo_Annotation_Semantic)(0), // 9: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 10: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 11: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 12: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 13: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 14: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 15: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 16: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 17: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 18: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 19: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 20: google.protobuf.FileOptions + (*MessageOptions)(nil), // 21: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 22: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 23: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 24: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 25: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 26: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 27: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 28: google.protobuf.UninterpretedOption + (*SourceCodeInfo)(nil), // 29: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 30: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 31: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 32: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 33: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 34: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*UninterpretedOption_NamePart)(nil), // 35: google.protobuf.UninterpretedOption.NamePart + (*SourceCodeInfo_Location)(nil), // 36: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 37: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 10, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 11, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 15, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 17, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 13, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 19, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 28, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 13, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 13, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 11, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 15, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 30, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 14, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 20, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 31, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 27, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 21, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 22, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 16, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 23, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 32, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 24, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 18, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 25, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 26, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 27, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 5, // 32: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 6, // 33: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType - 27, // 34: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 35: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 36: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 37: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 38: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 7, // 39: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 27, // 40: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 33, // 41: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 34, // 42: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 35, // 43: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 12, // 44: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 8, // 45: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 46, // [46:46] is the sub-list for method output_type - 46, // [46:46] is the sub-list for method input_type - 46, // [46:46] is the sub-list for extension type_name - 46, // [46:46] is the sub-list for extension extendee - 0, // [0:46] is the sub-list for field type_name + 11, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 12, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 16, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 18, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 14, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 20, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 29, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 14, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 14, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 12, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 16, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 31, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 15, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 21, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 32, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 28, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 33, // 16: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 0, // 17: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 2, // 18: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 1, // 19: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 22, // 20: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 23, // 21: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 17, // 22: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 24, // 23: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 34, // 24: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 25, // 25: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 19, // 26: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 26, // 27: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 27, // 28: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 3, // 29: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 28, // 30: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 31: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 4, // 32: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 5, // 33: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 6, // 34: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 7, // 35: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType + 7, // 36: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 28, // 37: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 38: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 39: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 40: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 41: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 8, // 42: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 28, // 43: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 35, // 44: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 36, // 45: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 37, // 46: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 13, // 47: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 9, // 48: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 49, // [49:49] is the sub-list for method output_type + 49, // [49:49] is the sub-list for method input_type + 49, // [49:49] is the sub-list for extension type_name + 49, // [49:49] is the sub-list for extension extendee + 0, // [0:49] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -4280,7 +4523,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state case 1: @@ -4292,7 +4535,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UninterpretedOption_NamePart); i { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state case 1: @@ -4304,7 +4547,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo_Location); i { + switch v := v.(*UninterpretedOption_NamePart); i { case 0: return &v.state case 1: @@ -4316,6 +4559,18 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -4333,8 +4588,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 9, - NumMessages: 27, + NumEnums: 10, + NumMessages: 28, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index a6c7a33f..580b232f 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -142,39 +142,39 @@ import ( // // Example 2: Pack and unpack a message in Java. // -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// // or ... -// if (any.isSameTypeAs(Foo.getDefaultInstance())) { -// foo = any.unpack(Foo.getDefaultInstance()); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := anypb.New(foo) -// if err != nil { -// ... -// } -// ... -// foo := &pb.Foo{} -// if err := any.UnmarshalTo(foo); err != nil { -// ... -// } +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// // or ... +// if (any.isSameTypeAs(Foo.getDefaultInstance())) { +// foo = any.unpack(Foo.getDefaultInstance()); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } // // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack @@ -182,8 +182,8 @@ import ( // in the type URL, for example "foo.bar.com/x/y.z" will yield type // name "y.z". // -// # JSON -// +// JSON +// ==== // The JSON representation of an `Any` value uses the regular // representation of the deserialized, embedded message, with an // additional field `@type` which contains the type URL. Example: diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 61f69fc1..81511a33 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -167,7 +167,7 @@ import ( // [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with // the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use // the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() // ) to obtain a formatter capable of generating timestamps in this format. type Timestamp struct { state protoimpl.MessageState diff --git a/vendor/modules.txt b/vendor/modules.txt index f013a495..33ea2791 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,10 +1,10 @@ # dario.cat/mergo v1.0.0 ## explicit; go 1.13 dario.cat/mergo -# github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 -## explicit; go 1.18 +# github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 +## explicit; go 1.20 github.com/AdaLogics/go-fuzz-headers -# github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20221215162035-5330a85ea652 +# github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 ## explicit; go 1.18 github.com/AdamKorcz/go-118-fuzz-build/testing # github.com/CycloneDX/cyclonedx-go v0.7.2 @@ -13,18 +13,9 @@ github.com/CycloneDX/cyclonedx-go # github.com/DataDog/zstd v1.4.5 ## explicit github.com/DataDog/zstd -# github.com/Masterminds/goutils v1.1.1 -## explicit -github.com/Masterminds/goutils # github.com/Masterminds/semver v1.5.0 ## explicit github.com/Masterminds/semver -# github.com/Masterminds/semver/v3 v3.2.0 -## explicit; go 1.18 -github.com/Masterminds/semver/v3 -# github.com/Masterminds/sprig/v3 v3.2.3 -## explicit; go 1.13 -github.com/Masterminds/sprig/v3 # github.com/Microsoft/go-winio v0.6.1 ## explicit; go 1.17 github.com/Microsoft/go-winio @@ -32,10 +23,11 @@ github.com/Microsoft/go-winio/backuptar github.com/Microsoft/go-winio/internal/fs github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/internal/stringbuffer +github.com/Microsoft/go-winio/pkg/bindfilter github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/tools/mkwinsyscall github.com/Microsoft/go-winio/vhd -# github.com/Microsoft/hcsshim v0.10.0-rc.7 +# github.com/Microsoft/hcsshim v0.11.1 ## explicit; go 1.18 github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/computestorage @@ -114,7 +106,7 @@ github.com/anchore/go-struct-converter # github.com/anchore/packageurl-go v0.1.1-0.20230104203445-02e0a6721501 ## explicit; go 1.17 github.com/anchore/packageurl-go -# github.com/anchore/stereoscope v0.0.0-20230925132944-bf05af58eb44 +# github.com/anchore/stereoscope v0.0.0-20231117203853-3610f4ef3e83 ## explicit; go 1.19 github.com/anchore/stereoscope github.com/anchore/stereoscope/internal/bus @@ -133,7 +125,7 @@ github.com/anchore/stereoscope/pkg/image/oci github.com/anchore/stereoscope/pkg/image/sif github.com/anchore/stereoscope/pkg/tree github.com/anchore/stereoscope/pkg/tree/node -# github.com/anchore/syft v0.94.0 +# github.com/anchore/syft v0.97.1 ## explicit; go 1.21.0 github.com/anchore/syft/internal github.com/anchore/syft/internal/bus @@ -148,40 +140,40 @@ github.com/anchore/syft/syft/cpe github.com/anchore/syft/syft/event github.com/anchore/syft/syft/event/monitor github.com/anchore/syft/syft/file -github.com/anchore/syft/syft/formats -github.com/anchore/syft/syft/formats/common -github.com/anchore/syft/syft/formats/common/cyclonedxhelpers -github.com/anchore/syft/syft/formats/common/spdxhelpers -github.com/anchore/syft/syft/formats/common/util -github.com/anchore/syft/syft/formats/cyclonedxjson -github.com/anchore/syft/syft/formats/cyclonedxxml -github.com/anchore/syft/syft/formats/github -github.com/anchore/syft/syft/formats/spdxjson -github.com/anchore/syft/syft/formats/spdxtagvalue -github.com/anchore/syft/syft/formats/syftjson -github.com/anchore/syft/syft/formats/syftjson/model -github.com/anchore/syft/syft/formats/table -github.com/anchore/syft/syft/formats/template -github.com/anchore/syft/syft/formats/text +github.com/anchore/syft/syft/format +github.com/anchore/syft/syft/format/common +github.com/anchore/syft/syft/format/common/cyclonedxhelpers +github.com/anchore/syft/syft/format/common/spdxhelpers +github.com/anchore/syft/syft/format/common/util +github.com/anchore/syft/syft/format/cyclonedxjson +github.com/anchore/syft/syft/format/cyclonedxxml +github.com/anchore/syft/syft/format/internal/cyclonedxutil +github.com/anchore/syft/syft/format/internal/spdxutil +github.com/anchore/syft/syft/format/spdxjson +github.com/anchore/syft/syft/format/spdxtagvalue +github.com/anchore/syft/syft/format/syftjson +github.com/anchore/syft/syft/format/syftjson/model github.com/anchore/syft/syft/internal/fileresolver +github.com/anchore/syft/syft/internal/packagemetadata github.com/anchore/syft/syft/internal/sourcemetadata github.com/anchore/syft/syft/internal/windows github.com/anchore/syft/syft/license github.com/anchore/syft/syft/linux github.com/anchore/syft/syft/pkg github.com/anchore/syft/syft/pkg/cataloger -github.com/anchore/syft/syft/pkg/cataloger/alpm -github.com/anchore/syft/syft/pkg/cataloger/apkdb +github.com/anchore/syft/syft/pkg/cataloger/alpine +github.com/anchore/syft/syft/pkg/cataloger/arch github.com/anchore/syft/syft/pkg/cataloger/binary github.com/anchore/syft/syft/pkg/cataloger/common/cpe github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary github.com/anchore/syft/syft/pkg/cataloger/cpp github.com/anchore/syft/syft/pkg/cataloger/dart -github.com/anchore/syft/syft/pkg/cataloger/deb +github.com/anchore/syft/syft/pkg/cataloger/debian github.com/anchore/syft/syft/pkg/cataloger/dotnet github.com/anchore/syft/syft/pkg/cataloger/elixir github.com/anchore/syft/syft/pkg/cataloger/erlang github.com/anchore/syft/syft/pkg/cataloger/generic +github.com/anchore/syft/syft/pkg/cataloger/gentoo github.com/anchore/syft/syft/pkg/cataloger/githubactions github.com/anchore/syft/syft/pkg/cataloger/golang github.com/anchore/syft/syft/pkg/cataloger/golang/internal/xcoff @@ -192,10 +184,9 @@ github.com/anchore/syft/syft/pkg/cataloger/javascript github.com/anchore/syft/syft/pkg/cataloger/kernel github.com/anchore/syft/syft/pkg/cataloger/nix github.com/anchore/syft/syft/pkg/cataloger/php -github.com/anchore/syft/syft/pkg/cataloger/portage github.com/anchore/syft/syft/pkg/cataloger/python github.com/anchore/syft/syft/pkg/cataloger/r -github.com/anchore/syft/syft/pkg/cataloger/rpm +github.com/anchore/syft/syft/pkg/cataloger/redhat github.com/anchore/syft/syft/pkg/cataloger/ruby github.com/anchore/syft/syft/pkg/cataloger/rust github.com/anchore/syft/syft/pkg/cataloger/sbom @@ -214,7 +205,7 @@ github.com/aquasecurity/go-version/pkg/part # github.com/becheran/wildmatch-go v1.0.0 ## explicit; go 1.15 github.com/becheran/wildmatch-go -# github.com/bmatcuk/doublestar/v4 v4.6.0 +# github.com/bmatcuk/doublestar/v4 v4.6.1 ## explicit; go 1.16 github.com/bmatcuk/doublestar/v4 # github.com/cloudflare/circl v1.3.3 @@ -234,7 +225,7 @@ github.com/cloudflare/circl/sign/ed448 # github.com/containerd/cgroups v1.1.0 ## explicit; go 1.17 github.com/containerd/cgroups/stats/v1 -# github.com/containerd/containerd v1.7.0 +# github.com/containerd/containerd v1.7.8 ## explicit; go 1.19 github.com/containerd/containerd github.com/containerd/containerd/api/runtime/sandbox/v1 @@ -265,6 +256,7 @@ github.com/containerd/containerd/content/local github.com/containerd/containerd/content/proxy github.com/containerd/containerd/defaults github.com/containerd/containerd/diff +github.com/containerd/containerd/diff/proxy github.com/containerd/containerd/errdefs github.com/containerd/containerd/events github.com/containerd/containerd/events/exchange @@ -316,21 +308,24 @@ github.com/containerd/containerd/snapshots github.com/containerd/containerd/snapshots/proxy github.com/containerd/containerd/tracing github.com/containerd/containerd/version -# github.com/containerd/continuity v0.3.0 -## explicit; go 1.17 +# github.com/containerd/continuity v0.4.2 +## explicit; go 1.19 github.com/containerd/continuity/fs github.com/containerd/continuity/sysx # github.com/containerd/fifo v1.1.0 ## explicit; go 1.18 github.com/containerd/fifo +# github.com/containerd/log v0.1.0 +## explicit; go 1.20 +github.com/containerd/log # github.com/containerd/stargz-snapshotter/estargz v0.14.3 ## explicit; go 1.19 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil -# github.com/containerd/ttrpc v1.2.1 +# github.com/containerd/ttrpc v1.2.2 ## explicit; go 1.13 github.com/containerd/ttrpc -# github.com/containerd/typeurl/v2 v2.1.0 +# github.com/containerd/typeurl/v2 v2.1.1 ## explicit; go 1.13 github.com/containerd/typeurl/v2 # github.com/cyphar/filepath-securejoin v0.2.4 @@ -357,7 +352,7 @@ github.com/docker/cli/cli/connhelper/ssh ## explicit github.com/docker/distribution/reference github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v24.0.6+incompatible +# github.com/docker/docker v24.0.7+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -447,7 +442,7 @@ github.com/go-git/go-billy/v5/helper/polyfill github.com/go-git/go-billy/v5/memfs github.com/go-git/go-billy/v5/osfs github.com/go-git/go-billy/v5/util -# github.com/go-git/go-git/v5 v5.9.0 +# github.com/go-git/go-git/v5 v5.10.0 ## explicit; go 1.19 github.com/go-git/go-git/v5 github.com/go-git/go-git/v5/config @@ -564,7 +559,7 @@ github.com/google/licensecheck/internal/match # github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 ## explicit; go 1.18 github.com/google/pprof/profile -# github.com/google/uuid v1.3.1 +# github.com/google/uuid v1.4.0 ## explicit github.com/google/uuid # github.com/gookit/color v1.5.4 @@ -588,15 +583,9 @@ github.com/hashicorp/hcl/hcl/token github.com/hashicorp/hcl/json/parser github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token -# github.com/huandu/xstrings v1.3.3 -## explicit; go 1.12 -github.com/huandu/xstrings # github.com/iancoleman/strcase v0.3.0 ## explicit; go 1.16 github.com/iancoleman/strcase -# github.com/imdario/mergo v0.3.15 -## explicit; go 1.13 -github.com/imdario/mergo # github.com/in-toto/in-toto-golang v0.9.0 ## explicit; go 1.20 github.com/in-toto/in-toto-golang/in_toto @@ -651,9 +640,6 @@ github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.18 ## explicit; go 1.15 github.com/mattn/go-isatty -# github.com/mattn/go-runewidth v0.0.15 -## explicit; go 1.9 -github.com/mattn/go-runewidth # github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d ## explicit github.com/mgutz/ansi @@ -663,9 +649,6 @@ github.com/mholt/archiver/v3 # github.com/microsoft/go-rustaudit v0.0.0-20220730194248-4b17361d90a5 ## explicit; go 1.18 github.com/microsoft/go-rustaudit -# github.com/mitchellh/copystructure v1.2.0 -## explicit; go 1.15 -github.com/mitchellh/copystructure # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir @@ -675,9 +658,6 @@ github.com/mitchellh/hashstructure/v2 # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure -# github.com/mitchellh/reflectwalk v1.0.2 -## explicit -github.com/mitchellh/reflectwalk # github.com/moby/locker v1.0.1 ## explicit; go 1.13 github.com/moby/locker @@ -693,9 +673,6 @@ github.com/moby/sys/signal # github.com/nwaples/rardecode v1.1.0 ## explicit github.com/nwaples/rardecode -# github.com/olekukonko/tablewriter v0.0.5 -## explicit; go 1.12 -github.com/olekukonko/tablewriter # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest @@ -747,9 +724,6 @@ github.com/pkg/errors # github.com/pkg/profile v1.7.0 ## explicit; go 1.13 github.com/pkg/profile -# github.com/rivo/uniseg v0.2.0 -## explicit; go 1.12 -github.com/rivo/uniseg # github.com/saferwall/pe v1.4.7 ## explicit; go 1.15 github.com/saferwall/pe @@ -777,9 +751,6 @@ github.com/sergi/go-diff/diffmatchpatch # github.com/shibumi/go-pathspec v1.3.0 ## explicit; go 1.17 github.com/shibumi/go-pathspec -# github.com/shopspring/decimal v1.2.0 -## explicit; go 1.13 -github.com/shopspring/decimal # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus @@ -812,7 +783,7 @@ github.com/spf13/afero/mem # github.com/spf13/cast v1.5.1 ## explicit; go 1.18 github.com/spf13/cast -# github.com/spf13/cobra v1.7.0 +# github.com/spf13/cobra v1.8.0 ## explicit; go 1.15 github.com/spf13/cobra # github.com/spf13/jwalterweatherman v1.1.0 @@ -911,10 +882,9 @@ go.opentelemetry.io/otel/semconv/v1.17.0/httpconv go.opentelemetry.io/otel/trace # go.uber.org/goleak v1.2.0 ## explicit; go 1.18 -# golang.org/x/crypto v0.14.0 -## explicit; go 1.17 +# golang.org/x/crypto v0.15.0 +## explicit; go 1.18 golang.org/x/crypto/argon2 -golang.org/x/crypto/bcrypt golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 @@ -930,22 +900,20 @@ golang.org/x/crypto/openpgp/elgamal golang.org/x/crypto/openpgp/errors golang.org/x/crypto/openpgp/packet golang.org/x/crypto/openpgp/s2k -golang.org/x/crypto/pbkdf2 golang.org/x/crypto/ripemd160 -golang.org/x/crypto/scrypt golang.org/x/crypto/sha3 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts -# golang.org/x/mod v0.13.0 +# golang.org/x/mod v0.14.0 ## explicit; go 1.18 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.17.0 -## explicit; go 1.17 +# golang.org/x/net v0.18.0 +## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html golang.org/x/net/html/atom @@ -962,18 +930,18 @@ golang.org/x/net/trace ## explicit; go 1.17 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.13.0 -## explicit; go 1.17 +# golang.org/x/sys v0.14.0 +## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.13.0 -## explicit; go 1.17 +# golang.org/x/term v0.14.0 +## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.13.0 -## explicit; go 1.17 +# golang.org/x/text v0.14.0 +## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/charmap golang.org/x/text/encoding/htmlindex @@ -1017,13 +985,15 @@ golang.org/x/tools/internal/typesinternal ## explicit; go 1.17 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 +# google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 +## explicit; go 1.19 +google.golang.org/genproto/protobuf/field_mask +# google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/status -google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.55.0 -## explicit; go 1.17 +# google.golang.org/grpc v1.58.3 +## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -1054,6 +1024,7 @@ google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/idle google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver @@ -1073,7 +1044,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.30.0 +# google.golang.org/protobuf v1.31.0 ## explicit; go 1.11 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext